diff --git a/tools/develop/llm-parameter-sweep/sweep-config.template.json b/tools/develop/llm-parameter-sweep/sweep-config.template.json index d55ead941..6a2fe81e6 100644 --- a/tools/develop/llm-parameter-sweep/sweep-config.template.json +++ b/tools/develop/llm-parameter-sweep/sweep-config.template.json @@ -7,7 +7,8 @@ "openai": { "access": { "dialect": "openai", "oaiKey": "sk-...", "oaiOrg": "", "oaiHost": "", "heliKey": "" }, "modelFilter": ["gpt-5", "o"], - "sweeps": ["temperature", "oai-reasoning-effort", "oai-verbosity", "oai-image-generation", "oai-web-search"] + "sweeps": ["temperature", "oai-temperature-think-high", "oai-temperature-think-none", "oai-reasoning-effort", "oai-verbosity", "oai-image-generation", "oai-web-search"], + "baseModelOverrides": { "maxTokens": 4096 } }, "anthropic": { "access": { "dialect": "anthropic", "anthropicKey": "sk-ant-...", "anthropicHost": null, "heliconeKey": null }, diff --git a/tools/develop/llm-parameter-sweep/sweep.ts b/tools/develop/llm-parameter-sweep/sweep.ts index c06871dff..8b7dd40dc 100644 --- a/tools/develop/llm-parameter-sweep/sweep.ts +++ b/tools/develop/llm-parameter-sweep/sweep.ts @@ -37,6 +37,24 @@ const SWEEP_DEFINITIONS = [ mode: 'enumerate', }), + // OpenAI: temperature with/without reasoning + defineSweep({ + name: 'oai-temperature-think-high', + description: 'Temperature parameter acceptance range', + applicability: { type: 'all' }, + applyToModel: (value) => ({ temperature: value, reasoningEffort: 'high' }), + values: [0, 0.5, 1.0, 1.5, 2.0], + mode: 'enumerate', + }), + defineSweep({ + name: 'oai-temperature-think-none', + description: 'Temperature parameter acceptance range', + applicability: { type: 'all' }, + applyToModel: (value) => ({ temperature: value, reasoningEffort: 'none' }), + values: [0, 0.5, 1.0, 1.5, 2.0], + mode: 'enumerate', + }), + // OpenAI: reasoning effort (Chat Completions + Responses API) defineSweep({ name: 'oai-reasoning-effort',