mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-10 21:50:14 -07:00
LLMs: make maxTokens optional
This commit is contained in:
+2
-2
@@ -66,7 +66,7 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
|
||||
|
||||
// model params (llm)
|
||||
const { llmRef, llmTemperature, llmResponseTokens } = (llmOptions as any) || {};
|
||||
if (!llmRef || llmTemperature === undefined || !llmResponseTokens)
|
||||
if (!llmRef || llmTemperature === undefined)
|
||||
throw new Error(`Error in configuration for model ${llmId}: ${JSON.stringify(llmOptions)}`);
|
||||
|
||||
// prepare the input, similarly to the tRPC openAI.chatGenerate
|
||||
@@ -75,7 +75,7 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
|
||||
model: {
|
||||
id: llmRef,
|
||||
temperature: llmTemperature,
|
||||
maxTokens: llmResponseTokens,
|
||||
...(llmResponseTokens ? { maxTokens: llmResponseTokens } : {}),
|
||||
},
|
||||
history: messages,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user