diff --git a/src/modules/llms/server/openai/models/openai.models.ts b/src/modules/llms/server/openai/models/openai.models.ts index d6b68f545..75457dc7b 100644 --- a/src/modules/llms/server/openai/models/openai.models.ts +++ b/src/modules/llms/server/openai/models/openai.models.ts @@ -25,7 +25,10 @@ export const _knownOpenAIChatModels: ManualMappings = [ maxCompletionTokens: 128000, trainingDataCutoff: 'Sep 30, 2024', interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Reasoning, LLM_IF_OAI_Responses, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoTemperature], - parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }], + parameterSpecs: [ + { paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }, + { paramId: 'llmVndOaiRestoreMarkdown' } // activate markdown restoration (true as initial value) + ], chatPrice: { input: 1.25, cache: { cType: 'oai-ac', read: 0.125 }, output: 10 }, benchmark: { cbaElo: 1481 }, }, @@ -40,7 +43,7 @@ export const _knownOpenAIChatModels: ManualMappings = [ maxCompletionTokens: 128000, trainingDataCutoff: 'Sep 30, 2024', interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Reasoning, LLM_IF_OAI_Responses, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoTemperature], - parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }], + parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }, { paramId: 'llmVndOaiRestoreMarkdown' }], chatPrice: { input: 1.25, cache: { cType: 'oai-ac', read: 0.125 }, output: 10 }, benchmark: { cbaElo: 1481 }, }, @@ -68,7 +71,7 @@ export const _knownOpenAIChatModels: ManualMappings = [ maxCompletionTokens: 128000, trainingDataCutoff: 'May 30, 2024', interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Reasoning, LLM_IF_OAI_Responses, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoTemperature], - parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }], + parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }, { paramId: 'llmVndOaiRestoreMarkdown' }], chatPrice: { input: 0.25, cache: { cType: 'oai-ac', read: 0.025 }, output: 2 }, // benchmark: { cbaElo: TBD }, // Not yet available }, @@ -83,7 +86,7 @@ export const _knownOpenAIChatModels: ManualMappings = [ maxCompletionTokens: 128000, trainingDataCutoff: 'May 30, 2024', interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Reasoning, LLM_IF_OAI_Responses, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoTemperature], - parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }], + parameterSpecs: [{ paramId: 'llmVndOaiReasoningEffort4' }, { paramId: 'llmVndOaiWebSearchContext' }, { paramId: 'llmVndOaiRestoreMarkdown' }], chatPrice: { input: 0.25, cache: { cType: 'oai-ac', read: 0.025 }, output: 2 }, // benchmark: { cbaElo: TBD }, // Not yet available },