From 7df767119b503a2f508e094725d64e97d45409b7 Mon Sep 17 00:00:00 2001 From: Enrico Ros Date: Fri, 27 Sep 2024 11:47:15 -0700 Subject: [PATCH] AIX: Temperature override --- src/modules/aifn/agicodefixup/agiFixupCode.ts | 4 ++-- src/modules/aix/client/aix.client.ts | 20 +++++++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/modules/aifn/agicodefixup/agiFixupCode.ts b/src/modules/aifn/agicodefixup/agiFixupCode.ts index d10f60a86..363bb4362 100644 --- a/src/modules/aifn/agicodefixup/agiFixupCode.ts +++ b/src/modules/aifn/agicodefixup/agiFixupCode.ts @@ -33,7 +33,7 @@ Respond only by calling the \`{{functionName}}\` function.`, \`\`\` {{errorMessageSection}} -Please analyze the code, correct any errors, in particular remove functions if any, and provide a valid JSON configuration that can be parsed by ChartJS. +Please analyze the JSON, correct any errors (of course remove functions if any), and provide a valid JSON configuration that can be parsed by ChartJS. Call the function \`{{functionName}}\` once, providing the corrected code.`, functionName: 'provide_corrected_chartjs_code', outputSchema: z.object({ @@ -86,7 +86,7 @@ export async function agiFixupCode(issueType: CodeFixType, codeToFix: string, er aixRequest, aixCreateChatGenerateStreamContext('DEV', 'DEV'), false, - { abortSignal }, + { abortSignal, llmOptionsOverride: { llmTemperature: 0 /* chill the model for fixing code, we need valid json, not creativity */ } }, ); // Validate and parse the AI's response diff --git a/src/modules/aix/client/aix.client.ts b/src/modules/aix/client/aix.client.ts index f27fc1c45..d2dfbc265 100644 --- a/src/modules/aix/client/aix.client.ts +++ b/src/modules/aix/client/aix.client.ts @@ -30,11 +30,19 @@ export function aixCreateChatGenerateStreamContext(name: AixAPI_Context_ChatGene return { method: 'chat-stream', name, ref }; } -export function aixCreateModelFromLLMOptions(llmOptions: Record, debugLlmId: string): AixAPI_Model { +export function aixCreateModelFromLLMOptions( + llmOptions: Record | undefined, + llmOptionsOverride: Record | undefined, + debugLlmId: string +): AixAPI_Model { // model params (llm) - const { llmRef, llmTemperature, llmResponseTokens } = llmOptions || {}; + let { llmRef, llmTemperature, llmResponseTokens } = llmOptions || {}; if (!llmRef || llmTemperature === undefined) - throw new Error(`Error in configuration for model ${debugLlmId}: ${JSON.stringify(llmOptions)}`); + throw new Error(`AIX: Error in configuration for model ${debugLlmId} (missing ref, temperature): ${JSON.stringify(llmOptions)}`); + + // model params overrides + if (llmOptionsOverride?.llmTemperature !== undefined) llmTemperature = llmOptionsOverride.llmTemperature; + if (llmOptionsOverride?.llmResponseTokens !== undefined) llmResponseTokens = llmOptionsOverride.llmResponseTokens; return { id: llmRef, @@ -64,6 +72,10 @@ type StreamMessageStatus = { interface AixClientOptions { abortSignal: AbortSignal, throttleParallelThreads?: number; // 0: disable, 1: default throttle (12Hz), 2+ reduce frequency with the square root + llmOptionsOverride?: Partial<{ + llmTemperature: number, + llmResponseTokens: number, + }>; } @@ -178,7 +190,7 @@ export async function aixChatGenerateContent_DMessage(llm.sId); // Aix Model - const aixModel = aixCreateModelFromLLMOptions(llm.options, llmId); + const aixModel = aixCreateModelFromLLMOptions(llm.options, clientOptions?.llmOptionsOverride, llmId); // [OpenAI] Apply the hot fix for O1 Preview models; however this is a late-stage emergency hotfix as we expect the caller to be aware of this logic const isO1Preview = llm.interfaces.includes(LLM_IF_SPECIAL_OAI_O1Preview);