mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-10 21:50:14 -07:00
o1 hotfix for images
This commit is contained in:
@@ -246,10 +246,13 @@ function _clientCreateAixMetaInReferenceToPart(items: DMetaReferenceItem[]): Aix
|
||||
* Hot fix for handling system messages with OpenAI O1 Preview models.
|
||||
* Converts System to User messages for compatibility.
|
||||
*/
|
||||
export function clientHotFixSystemMessageForO1Preview(aixChatGenerate: AixAPIChatGenerate_Request): void {
|
||||
export function clientHotFixGenerateRequestForO1Preview(aixChatGenerate: AixAPIChatGenerate_Request): void {
|
||||
|
||||
let workaroundsCount = 0;
|
||||
|
||||
// Convert the main system message if it exists
|
||||
if (aixChatGenerate.systemMessage) {
|
||||
workaroundsCount++;
|
||||
|
||||
// Convert system message to user message
|
||||
const systemAsUser: AixMessages_UserMessage = {
|
||||
@@ -267,4 +270,21 @@ export function clientHotFixSystemMessageForO1Preview(aixChatGenerate: AixAPICha
|
||||
// Note: other conversions that would translate to system inside the AIX Dispatch will be handled there, as we have a
|
||||
// higher level representation here, where the roles are 'user', 'model', and 'tool'.
|
||||
|
||||
// Remove any inline images from the entire chat sequence
|
||||
for (let i = 0; i < aixChatGenerate.chatSequence.length; i++) {
|
||||
const message = aixChatGenerate.chatSequence[i];
|
||||
|
||||
// Iterate over message parts and remove inline images
|
||||
for (let j = message.parts.length - 1; j >= 0; j--) {
|
||||
if (message.parts[j].pt === 'inline_image') {
|
||||
workaroundsCount++;
|
||||
message.parts.splice(j, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log the number of workarounds applied
|
||||
if (workaroundsCount > 0)
|
||||
console.warn(`[DEV] Working around o1 models limitations: applied ${workaroundsCount} client-side workarounds`);
|
||||
|
||||
}
|
||||
@@ -15,7 +15,7 @@ import type { AixAPI_Access, AixAPI_Context, AixAPI_Context_ChatGenerateNS, AixA
|
||||
|
||||
import { ContentReassembler } from './ContentReassembler';
|
||||
import { ThrottleFunctionCall } from './ThrottleFunctionCall';
|
||||
import { aixChatGenerateRequestFromDMessages, clientHotFixSystemMessageForO1Preview } from './aix.client.chatGenerateRequest';
|
||||
import { aixChatGenerateRequestFromDMessages, clientHotFixGenerateRequestForO1Preview } from './aix.client.chatGenerateRequest';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -153,9 +153,8 @@ export async function aixLLMChatGenerateContent<TServiceSettings extends object
|
||||
|
||||
// [OpenAI] Apply the hot fix for O1 Preview models; however this is a late-stage emergency hotfix as we expect the caller to be aware of this logic
|
||||
const isO1Preview = llm.interfaces.includes(LLM_IF_SPECIAL_OAI_O1Preview);
|
||||
if (isO1Preview && aixChatGenerate.systemMessage) {
|
||||
console.warn('[DEV] Working around o1 models limitations.');
|
||||
clientHotFixSystemMessageForO1Preview(aixChatGenerate);
|
||||
if (isO1Preview) {
|
||||
clientHotFixGenerateRequestForO1Preview(aixChatGenerate);
|
||||
aixStreaming = false;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user