LLMs: make maxTokens optional

This commit is contained in:
Enrico Ros
2024-01-04 03:38:40 -08:00
parent e79449b38c
commit 122bbf0034
+2 -2
View File
@@ -66,7 +66,7 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
// model params (llm)
const { llmRef, llmTemperature, llmResponseTokens } = (llmOptions as any) || {};
if (!llmRef || llmTemperature === undefined || !llmResponseTokens)
if (!llmRef || llmTemperature === undefined)
throw new Error(`Error in configuration for model ${llmId}: ${JSON.stringify(llmOptions)}`);
// prepare the input, similarly to the tRPC openAI.chatGenerate
@@ -75,7 +75,7 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
model: {
id: llmRef,
temperature: llmTemperature,
maxTokens: llmResponseTokens,
...(llmResponseTokens ? { maxTokens: llmResponseTokens } : {}),
},
history: messages,
};