This commit is contained in:
reanon
2025-10-06 21:01:39 +02:00
parent b16bb6d17d
commit 8b757bf89f
11 changed files with 63 additions and 1 deletions
+1 -1
View File
@@ -963,6 +963,6 @@ function parseCsv(val: string): string[] {
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter(
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
(f) => !f.includes("o1-pro") && !f.includes("o3-pro") && !f.includes("gpt5-pro")
) as ModelFamily[];
}
+3
View File
@@ -44,6 +44,7 @@ const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
gpt5: "GPT-5",
"gpt5-mini": "GPT-5 Mini",
"gpt5-nano": "GPT-5 Nano",
"gpt5-pro": "GPT-5 Pro",
"gpt5-chat-latest": "GPT-5 Chat Latest",
gpt45: "GPT-4.5",
o1: "OpenAI o1",
@@ -85,6 +86,7 @@ const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
"azure-gpt5": "Azure GPT-5",
"azure-gpt5-mini": "Azure GPT-5 Mini",
"azure-gpt5-nano": "Azure GPT-5 Nano",
"azure-gpt5-pro": "GPT-5 Pro (Azure)",
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
"azure-o1": "Azure o1",
"azure-o1-mini": "Azure o1 mini",
@@ -402,3 +404,4 @@ if (config.enableInfoPageLogin) {
/* ─── Removed the public /status route : simply not added ─── */
export { infoPageRouter };
+1
View File
@@ -16,6 +16,7 @@ export { transformOutboundPayload } from "./preprocessors/transform-outbound-pay
export { validateContextSize } from "./preprocessors/validate-context-size";
export { validateModelFamily } from "./preprocessors/validate-model-family";
export { validateVision } from "./preprocessors/validate-vision";
export { validateStreaming } from "./preprocessors/validate-streaming";
export { extractQwenExtraBody } from "./preprocessors/extract-qwen-extra-body";
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
@@ -12,6 +12,7 @@ import {
validateContextSize,
validateModelFamily,
validateVision,
validateStreaming,
applyQuotaLimits,
} from ".";
@@ -57,6 +58,7 @@ export const createPreprocessorMiddleware = (
...(afterTransform ?? []),
validateContextSize,
validateVision,
validateStreaming,
validateModelFamily,
applyQuotaLimits,
];
@@ -102,6 +102,19 @@ function applyOpenAIResponsesTransform(req: Request): void {
}
}
// Handle reasoning_effort for models that require it
const model = req.body.model || "";
const isO3Pro = model === "o3-pro" || model.startsWith("o3-pro-");
const isGpt5Pro = model === "gpt-5-pro" || model.startsWith("gpt-5-pro-");
// o3-pro and gpt-5-pro default to and only support "high" reasoning effort
if (isO3Pro || isGpt5Pro) {
if (!req.body.reasoning_effort || req.body.reasoning_effort !== "high") {
req.body.reasoning_effort = "high";
req.log.info({ model, reasoning_effort: "high" }, "Set reasoning_effort to 'high' (required for this model)");
}
}
req.log.info({
originalModel: originalBody.model,
newFormat: "openai-responses"
@@ -102,6 +102,8 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
@@ -0,0 +1,30 @@
import { BadRequestError } from "../../../../shared/errors";
import { RequestPreprocessor } from "../index";
/**
* Throws an error if streaming is requested for models that don't support it
* (o3-pro, o1-pro, gpt-5-pro and their variants).
*/
export const validateStreaming: RequestPreprocessor = (req) => {
const { model, stream } = req.body;
// Check if streaming is enabled
const isStreaming = stream === "true" || stream === true;
if (!isStreaming) {
return;
}
// Check if model is one of the non-streaming models
const modelStr = String(model).toLowerCase();
const nonStreamingModels = ["o3-pro", "o1-pro", "gpt-5-pro"];
const isNonStreamingModel = nonStreamingModels.some((modelName) =>
modelStr.includes(modelName)
);
if (isNonStreamingModel) {
throw new BadRequestError(
"Streaming is not supported for this model. The dev is too lazy to implement it. Please set stream=false."
);
}
};
+1
View File
@@ -371,6 +371,7 @@ const setupChunkedTransfer: RequestHandler = (req, res, next) => {
function shouldUseResponsesApi(model: string): boolean {
return model === "o1-pro" || model.startsWith("o1-pro") ||
model === "o3-pro" || model.startsWith("o3-pro") ||
model === "gpt-5-pro" || model.startsWith("gpt-5-pro") ||
model === "codex-mini-latest" || model.startsWith("codex-mini") ||
model === "gpt-5-codex-latest" || model.startsWith("gpt-5-codex");
}
+2
View File
@@ -56,6 +56,7 @@ const MODEL_FAMILY_ORDER: ModelFamily[] = [
"gpt5-mini",
"gpt5-nano",
"gpt5-chat-latest",
"gpt5-pro",
"o1",
"o1-mini",
"o1-pro",
@@ -80,6 +81,7 @@ const MODEL_FAMILY_ORDER: ModelFamily[] = [
"azure-gpt5-mini",
"azure-gpt5-nano",
"azure-gpt5-chat-latest",
"azure-gpt5-pro",
"azure-o1",
"azure-o1-mini",
"azure-o1-pro",
+6
View File
@@ -36,6 +36,7 @@ export type OpenAIModelFamily =
| "gpt5-mini"
| "gpt5-nano"
| "gpt5-chat-latest"
| "gpt5-pro"
| "o1"
| "o1-mini"
| "o1-pro"
@@ -104,6 +105,7 @@ export const MODEL_FAMILIES = (<A extends readonly ModelFamily[]>(
"gpt5-mini",
"gpt5-nano",
"gpt5-chat-latest",
"gpt5-pro",
"o1",
"o1-mini",
"o1-pro",
@@ -144,6 +146,7 @@ export const MODEL_FAMILIES = (<A extends readonly ModelFamily[]>(
"azure-gpt5-mini",
"azure-gpt5-nano",
"azure-gpt5-chat-latest",
"azure-gpt5-pro",
"azure-dall-e",
"azure-o1",
"azure-o1-mini",
@@ -196,6 +199,7 @@ export const MODEL_FAMILY_SERVICE: {
"gpt5-mini": "openai",
"gpt5-nano": "openai",
"gpt5-chat-latest": "openai",
"gpt5-pro": "openai",
"o1": "openai",
"o1-mini": "openai",
"o1-pro": "openai",
@@ -229,6 +233,7 @@ export const MODEL_FAMILY_SERVICE: {
"azure-gpt5-mini": "azure",
"azure-gpt5-nano": "azure",
"azure-gpt5-chat-latest": "azure",
"azure-gpt5-pro": "azure",
"azure-dall-e": "azure",
"azure-o1": "azure",
"azure-o1-mini": "azure",
@@ -256,6 +261,7 @@ export const OPENAI_MODEL_FAMILY_MAP: { [regex: string]: OpenAIModelFamily } = {
"^gpt-5-mini(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-mini",
"^gpt-5-nano(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-nano",
"^gpt-5-chat-latest(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-chat-latest",
"^gpt-5-pro(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-pro",
"^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$": "gpt45",
"^gpt-4\\.1(-\\d{4}-\\d{2}-\\d{2})?$": "gpt41",
"^gpt-4\\.1-mini(-\\d{4}-\\d{2}-\\d{2})?$": "gpt41-mini",
+2
View File
@@ -20,6 +20,8 @@ const MODEL_PRICING: Record<ModelFamily, { input: number; output: number } | und
"azure-gpt5-nano": { input: 0.05, output: 0.40 },
"gpt5-chat-latest": { input: 1.25, output: 10.00 },
"azure-gpt5-chat-latest": { input: 1.25, output: 10.00 },
"gpt5-pro": { input: 15.00, output: 120.00 },
"azure-gpt5-pro": { input: 15.00, output: 120.00 },
"gpt45": { input: 75.00, output: 150.00 }, // Example, needs verification if this model family is still current with this pricing
"azure-gpt45": { input: 75.00, output: 150.00 }, // Example, needs verification
"gpt4o": { input: 2.50, output: 10.00 },