diff --git a/src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts b/src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts index a450cb3cf..a4730d7b8 100644 --- a/src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts +++ b/src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts @@ -620,42 +620,27 @@ export namespace OpenAIWire_API_Images_Generations { export type Request = z.infer; const Request_schema = z.object({ - /** - * The maximum length is - * - 32000 characters for gpt-image-1 - * - 4000 for dall-e-3 - * - 1000 for dall-e-2 - */ + // 32,000 for gpt-image-1, 4,000 for dall-e-3, 1,000 for dall-e-2 prompt: z.string().max(32000), - /** - * The model to use for image generation. - * The default is `dall-e-2` unless a parameter specific to `gpt-image-1` is set. - */ model: z.enum([ 'gpt-image-1', 'dall-e-3', 'dall-e-2', // default ]).optional(), - /** - * The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. - */ + // The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. n: z.number().min(1).max(10).nullable().optional(), - /** - * The quality of the image that will be generated - */ + // Image quality quality: z.enum([ 'auto', // default 'high', 'medium', 'low', // gpt-image-1 'hd', 'standard', // dall-e-3: hd | standard, dall-e-2: only standard ]).optional(), - /** - * The format in which generated images with dall-e-2 and dall-e-3 are returned. - * `gpt-image-1` will always return base64-encoded images and does NOT support this parameter. - */ + // The format in which generated images with dall-e-2 and dall-e-3 are returned. + //`gpt-image-1` will always return base64-encoded images and does NOT support this parameter. response_format: z.enum(['url', 'b64_json']).optional(), // size of the generated images @@ -684,12 +669,12 @@ export namespace OpenAIWire_API_Images_Generations { // Control the content-moderation level for images generated by gpt-image-1. moderation: z.enum(['low', 'auto' /* default */]).optional(), - // WEBP/JPEG compression level for gpt-image-1 - output_compression: z.number().min(0).max(100).int().optional(), - // The format in which the generated images are returned output_format: z.enum(['png' /* default */, 'jpeg', 'webp']).optional(), + // WEBP/JPEG compression level for gpt-image-1 + output_compression: z.number().min(0).max(100).int().optional(), + // -- Dall-E 3 Specific Parameters -- @@ -721,6 +706,62 @@ export namespace OpenAIWire_API_Images_Generations { } +// Images > Edit Image +export namespace OpenAIWire_API_Images_Edits { + + export type Request = z.infer; + + /** + * This API method only accepts 'multipart/form-data' requests. + * The request body must be a FormData object, which we build outside. + * The spec below represents the first part. + */ + export const Request_schema = z.object({ + + // 32,000 for gpt-image-1, 1,000 for dall-e-2 + prompt: z.string().max(32000), + + // image: file | file[] - REQUIRED - Handled as file uploads in FormData ('image' field) + + // mask: file - OPTIONAL - Handled as file upload in FormData ('mask' field) + + model: z.enum(['gpt-image-1', 'dall-e-2']).optional(), + + // Number of images to generate, between 1 and 10 + n: z.number().min(1).max(10).nullable().optional(), + + // Image quality + quality: z.enum([ + 'auto', // default + 'high', 'medium', 'low', // gpt-image-1 + 'standard', // dall-e-2: only standard + ]).optional(), + + // response_format: string - OPTIONAL - Defaults to 'url'. Only for DALL-E 2. gpt-image-1 always returns b64_json. + // OMITTED here as we'll enforce b64_json or handle it based on model if DALL-E 2 edit were supported. + + // size of the generated images + size: z.enum([ + 'auto', // GI (or default if omitted) + '256x256', // D2 + '512x512', // D2 + '1024x1024', // GI D2 + // landscape + '1536x1024', // GI + // portrait + '1024x1536', // GI + ]).optional(), + + // optional unique identifier representing your end-user + user: z.string().optional(), + + }); + + // The response schema is identical to OpenAIWire_API_Images_Generations.Response_schema + export type Response = OpenAIWire_API_Images_Generations.Response; + +} + // // Models > List Models