Compare commits

...

5 Commits

Author SHA1 Message Date
Enrico Ros 14724a864c OpenRouter: compensate for older typescript 2025-04-05 10:30:15 -07:00
Enrico Ros 5e2b196c4d OpenRouter: models list: prevent schema changes from breaking working models. Fixes #787 2025-04-05 10:26:44 -07:00
Enrico Ros e7686f60b1 OpenRouter: models list: ignore missing fields on 'openrouter/auto'. Fixes #787 2025-04-05 10:25:35 -07:00
Enrico Ros 380f666d35 Roll Gemini descriptions. Fixes #783 2025-03-29 12:35:29 -07:00
Enrico Ros 3e277b1a35 Optional desc. #783 2025-03-29 12:35:05 -07:00
6 changed files with 386 additions and 131 deletions
+366 -121
View File
@@ -1,6 +1,6 @@
import type { GeminiModelSchema } from './gemini.wiretypes';
import type { ModelDescriptionSchema } from '../llm.server.types';
import { LLM_IF_OAI_Chat, LLM_IF_OAI_Json, LLM_IF_OAI_Vision } from '../../store-llms';
import { LLM_IF_OAI_Chat, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Vision } from '../../store-llms';
// dev options
@@ -12,7 +12,21 @@ const geminiChatInterfaces: GeminiModelSchema['supportedGenerationMethods'] = ['
// unsupported interfaces
const filterUnallowedNames = ['Legacy'];
const filterUnallowedInterfaces: GeminiModelSchema['supportedGenerationMethods'] = ['generateAnswer', 'embedContent', 'embedText'];
const filterUnallowedInterfaces: GeminiModelSchema['supportedGenerationMethods'] = [
'generateAnswer', // e.g. removes "models/aqa"
'embedContent', // e.g. removes "models/embedding-001"
'embedText', // e.g. removes "models/text-embedding-004"
'predict', // e.g. removes "models/imagen-3.0-generate-002" (appeared on 2025-02-09)
];
const filterLyingModelNames: GeminiModelSchema['name'][] = [
// 2025-02-27: verified, old model is no more
'models/gemini-2.0-flash-exp', // verified, replaced by gemini-2.0-flash, which is non-free anymore
// 2025-02-09 update: as of now they cleared the list, so we restart
// 2024-12-10: name of models that are not what they say they are (e.g. 1114 is actually 1121 as of )
'models/gemini-1.5-flash-8b-exp-0924', // replaced by non-free
'models/gemini-1.5-flash-8b-exp-0827', // replaced by non-free
];
/* Manual models details
@@ -20,175 +34,405 @@ const filterUnallowedInterfaces: GeminiModelSchema['supportedGenerationMethods']
- Latest version gemini-1.0-pro-latest <model>-<generation>-<variation>-latest
- Latest stable version gemini-1.0-pro <model>-<generation>-<variation>
- Stable versions gemini-1.0-pro-001 <model>-<generation>-<variation>-<version>
Gemini capabilities chart (updated 2024-10-01):
- [table stakes] System instructions
- JSON Mode, with optional JSON Schema [NOTE: JSON Schema is poorly supported?]
- Adjustable Safety Settings
- Caching
- Tuning
- [good] Function calling, with configuration
- [great] Code execution
*/
// Experimental Gemini models are Free of charge
const geminiExpPricingFree: ModelDescriptionSchema['pricing'] = {
// input: 'free', output: 'free',
};
const gemini20FlashPricing: ModelDescriptionSchema['pricing'] = {
chatIn: 0.10, // inputAudio: 0.70,
chatOut: 0.40,
};
const gemini20FlashLitePricing: ModelDescriptionSchema['pricing'] = {
chatIn: 0.075,
chatOut: 0.30,
};
const gemini15FlashPricing: ModelDescriptionSchema['pricing'] = {
chatIn: 0.075,
chatOut: 0.30,
};
const gemini15Flash8BPricing: ModelDescriptionSchema['pricing'] = {
chatIn: 0.0375,
chatOut: 0.15,
};
const gemini15ProPricing: ModelDescriptionSchema['pricing'] = {
chatIn: 1.25,
chatOut: 5.00,
};
const _knownGeminiModels: ({
id: string,
labelOverride?: string,
isNewest?: boolean,
isPreview?: boolean
symLink?: string
} & Pick<ModelDescriptionSchema, 'interfaces' | 'pricing' | 'trainingDataCutoff' | 'hidden'>)[] = [
isPreview?: boolean,
symLink?: string,
deprecated?: string, // Gemini may provide deprecation dates
_delete?: boolean, // some gemini models are not acknowledged by Google Docs anymore, and leaving them in the list will confuse users
} & Pick<ModelDescriptionSchema, 'interfaces' | 'pricing' | 'hidden' | 'benchmark'>)[] = [
// Generation 1.5
/// Generation 2.5
// 2.5 Pro Experimental
{
id: 'models/gemini-2.5-pro-exp-03-25',
isPreview: true,
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1443 },
},
/// Generation 2.0
// 2.0 Experimental - Pro
{
hidden: true, // showing the 2.5 instead
id: 'models/gemini-2.0-pro-exp-02-05', // Base model: Gemini 2.0 Pro
isPreview: true,
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1380 },
},
{
hidden: true, // only keeping the latest
id: 'models/gemini-2.0-pro-exp',
symLink: 'models/gemini-2.0-pro-exp-02-05',
// copied from symlink
isPreview: true,
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1380 },
},
{
_delete: true, // replaced by gemini-2.0-pro-exp-02-05, 2025-02-27: verified, old model is no more
id: 'models/gemini-exp-1206',
labelOverride: 'Gemini 2.0 Pro Experimental 1206',
isPreview: true,
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1373 },
},
// 2.0 Experimental - Flash Thinking
{
hidden: true, // only keeping the latest
id: 'models/gemini-2.0-flash-thinking-exp', // alias to the latest Flash Thinking model
labelOverride: 'Gemini 2.0 Flash Thinking Experimental',
symLink: 'models/gemini-2.0-flash-thinking-exp-01-21',
// copied from symlink
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision],
benchmark: { cbaElo: 1385 },
isPreview: true,
},
{
id: 'models/gemini-2.0-flash-thinking-exp-01-21',
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision],
benchmark: { cbaElo: 1385 },
isPreview: true,
},
{
hidden: true, // replaced by gemini-2.0-flash-thinking-exp-01-21 - 2025-02-27: seems still different on the API, hence no deletion yet
id: 'models/gemini-2.0-flash-thinking-exp-1219',
labelOverride: 'Gemini 2.0 Flash Thinking Experimental 12-19',
pricing: geminiExpPricingFree,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision],
benchmark: { cbaElo: 1363 },
isPreview: true,
},
// 2.0 Experimental - Flash Image Generation
{
id: 'models/gemini-2.0-flash-exp-image-generation',
// labelOverride: 'Gemini 2.0 Flash Native Image Generation',
pricing: geminiExpPricingFree,
interfaces: [
LLM_IF_OAI_Chat, LLM_IF_OAI_Vision,
// LLM_IF_HOTFIX_StripSys0, // This first Gemini Image Generation model does not support the developer instruction
],
isPreview: true,
},
// 2.0 Flash
{
id: 'models/gemini-2.0-flash-001',
pricing: gemini20FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1358 },
},
{
id: 'models/gemini-2.0-flash',
symLink: 'models/gemini-2.0-flash-001',
// copied from symlink
pricing: gemini20FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1358 },
},
// 2.0 Flash Lite
{
id: 'models/gemini-2.0-flash-lite',
pricing: gemini20FlashLitePricing,
symLink: 'models/gemini-2.0-flash-lite-001',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1309 },
},
{
id: 'models/gemini-2.0-flash-lite-001',
pricing: gemini20FlashLitePricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1309 },
},
{
hidden: true, // discouraged, as the official is out
id: 'models/gemini-2.0-flash-lite-preview-02-05',
isPreview: true,
pricing: gemini20FlashLitePricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1309 },
},
{
id: 'models/gemini-2.0-flash-lite-preview',
symLink: 'models/gemini-2.0-flash-lite-preview-02-05',
// coped from symlink
isPreview: true,
pricing: gemini20FlashLitePricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1309 },
},
/// Generation 1.5
// Gemini 1.5 Flash Models
{
id: 'models/gemini-1.5-flash-latest', // updated regularly and might be a preview version
isNewest: true,
isPreview: true,
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'May 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
pricing: gemini15FlashPricing,
// symLink: '-002 or newer',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
},
{
id: 'models/gemini-1.5-flash',
// copied from above
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
// Defaults to version 002 on Oct 8, 2024
symLink: 'models/gemini-1.5-flash-002',
pricing: gemini15FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1271 },
},
{
id: 'models/gemini-1.5-flash-002', // new stable version
pricing: gemini15FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1271 },
hidden: true,
},
{
id: 'models/gemini-1.5-flash-001',
// copied from above
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
id: 'models/gemini-1.5-flash-001', // previous stable version
pricing: gemini15FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1227 },
hidden: true,
},
{
id: 'models/gemini-1.5-flash-001-tuning', // supports model tuning
pricing: gemini15FlashPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn /* Tuning ... */],
hidden: true,
},
// Gemini 1.5 Flash-8B Models
{
id: 'models/gemini-1.5-pro-latest', // updated regularly and might be a preview version
isNewest: true,
isPreview: true,
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'May 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
id: 'models/gemini-1.5-flash-8b-latest',
isPreview: false,
pricing: gemini15Flash8BPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
},
{
id: 'models/gemini-1.5-pro', // latest stable -> 001
// copied from above
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json],
id: 'models/gemini-1.5-flash-8b',
symLink: 'models/gemini-1.5-flash-8b-001',
pricing: gemini15Flash8BPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1213 },
},
{
id: 'models/gemini-1.5-flash-8b-001',
pricing: gemini15Flash8BPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1213 },
hidden: true,
},
// Gemini 1.5 Pro Models
{
id: 'models/gemini-1.5-pro-latest', // updated to latest stable version
pricing: gemini15ProPricing,
// symLink: '-002 or newer',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
},
{
id: 'models/gemini-1.5-pro',
symLink: 'models/gemini-1.5-pro-002',
pricing: gemini15ProPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1302 },
},
{
id: 'models/gemini-1.5-pro-002',
pricing: gemini15ProPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1302 },
hidden: true,
},
{
id: 'models/gemini-1.5-pro-001', // stable snapshot
// copied from above
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json],
id: 'models/gemini-1.5-pro-001',
pricing: gemini15ProPricing,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json, LLM_IF_OAI_Fn],
benchmark: { cbaElo: 1260 },
hidden: true,
},
// Generation 1.0
{
id: 'models/gemini-1.0-pro-latest',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
},
{
id: 'models/gemini-1.0-pro',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
},
{
id: 'models/gemini-1.0-pro-001',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
},
/// Generation 1.0
// Generation 1.0 + Vision
// Gemini 1.0 Pro Vision Model
{
id: 'models/gemini-1.0-pro-vision-latest',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision], // Text and Images
hidden: true,
},
// Older symlinks
{
id: 'models/gemini-pro',
symLink: 'models/gemini-1.0-pro',
// copied from symlinked
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision],
hidden: true,
_delete: true, // confusing
},
{
id: 'models/gemini-pro-vision',
// copied from symlinked
symLink: 'models/gemini-1.0-pro-vision',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision], // Text and Images
hidden: true,
_delete: true, // confusing
},
/// Experimental
// LearnLM Experimental Model
{
id: 'models/learnlm-1.5-pro-experimental',
isPreview: true,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision],
pricing: geminiExpPricingFree,
// hidden: true,
// _delete: true,
},
{
id: 'models/gemma-3-27b-it',
isPreview: true,
interfaces: [
LLM_IF_OAI_Chat,
// LLM_IF_HOTFIX_StripImages, /* "Image input modality is not enabled for models/gemma-3-27b-it" */
// LLM_IF_HOTFIX_Sys0ToUsr0, /* "Developer instruction is not enabled for models/gemma-3-27b-it" */
],
// pricing: geminiExpPricingFree,
// hidden: true,
// _delete: true,
},
];
export function geminiFilterModels(geminiModel: GeminiModelSchema): boolean {
const isAllowed = !filterUnallowedNames.some(name => geminiModel.displayName.includes(name));
const isSupported = !filterUnallowedInterfaces.some(iface => geminiModel.supportedGenerationMethods.includes(iface));
return isAllowed && isSupported;
const isWhatItSaysItIs = !filterLyingModelNames.includes(geminiModel.name);
return isAllowed && isSupported && isWhatItSaysItIs;
}
const _sortOderIdPrefix: string[] = [
'models/gemini-exp',
'models/gemini-2.5-pro',
'models/gemini-2.0-pro',
'models/gemini-2.0-flash-exp-image-generation',
'models/gemini-2.0-flash-thinking',
'models/gemini-2.0-flash-0',
'models/gemini-2.0-flash',
'models/gemini-2.0-flash-lite',
'models/gemini-1.5-pro',
'models/gemini-1.5-flash',
'models/gemini-1.5-flash-8b',
'models/gemini-1.0-pro',
'models/gemini-pro',
'models/gemma',
'models/learnlm',
] as const;
export function geminiSortModels(a: ModelDescriptionSchema, b: ModelDescriptionSchema): number {
// links to the bottom
const aIsLink = a.label.startsWith('🔗');
const bIsLink = b.label.startsWith('🔗');
if (aIsLink && !bIsLink) return 1;
if (!aIsLink && bIsLink) return -1;
// hidden to the bottom, then names descending
if (a.hidden && !b.hidden) return 1;
if (!a.hidden && b.hidden) return -1;
// if (a.hidden && !b.hidden) return 1;
// if (!a.hidden && b.hidden) return -1;
// models beginning with 'gemini-' to the top
// const aGemini = a.label.startsWith('Gemini');
// const bGemini = b.label.startsWith('Gemini');
// if (aGemini && !bGemini) return -1;
// if (!aGemini && bGemini) return 1;
// sort by sortOrderIdPrefix
const aSortIdx = _sortOderIdPrefix.findIndex(p => a.id.startsWith(p));
const bSortIdx = _sortOderIdPrefix.findIndex(p => b.id.startsWith(p));
if (aSortIdx !== -1 && bSortIdx !== -1) {
if (aSortIdx < bSortIdx) return -1;
if (aSortIdx > bSortIdx) return 1;
}
// sort by label descending
return b.label.localeCompare(a.label);
}
export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): ModelDescriptionSchema {
export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): ModelDescriptionSchema | null {
const { description, displayName, name: modelId, supportedGenerationMethods } = geminiModel;
if (DEV_DEBUG_GEMINI_MODELS)
console.log('geminiModelToModelDescription', geminiModel);
// if (DEV_DEBUG_GEMINI_MODELS)
// console.log('geminiModelToModelDescription', geminiModel);
// find known manual mapping
const knownModel = _knownGeminiModels.find(m => m.id === modelId);
if (!knownModel && DEV_DEBUG_GEMINI_MODELS)
console.warn('geminiModelToModelDescription: unknown model', modelId, geminiModel);
// handle _delete
if (knownModel?._delete)
return null;
// handle symlinks
const label = knownModel?.symLink
? `🔗 ${displayName.replace('1.0', '')}${knownModel.symLink}`
: displayName;
let label = knownModel?.symLink
? `🔗 ${knownModel?.labelOverride || displayName}${knownModel.symLink}`
: knownModel?.labelOverride || displayName;
// FIX: the Gemini 1114 model now returns 1121 as the version.. highlight the issue
// if (geminiModel.name.endsWith('1114') && label.endsWith('1121'))
// label += ' (really: 1114)';
// handle hidden models
const hasChatInterfaces = supportedGenerationMethods.some(iface => geminiChatInterfaces.includes(iface));
@@ -200,14 +444,13 @@ export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): M
// description
const { version, topK, topP, temperature } = geminiModel;
const descriptionLong = description + ` (Version: ${version}, Defaults: temperature=${temperature}, topP=${topP}, topK=${topK}, interfaces=[${supportedGenerationMethods.join(',')}])`;
const descriptionLong = (description || 'No description.') + ` (Version: ${version}, Defaults: temperature=${temperature}, topP=${topP}, topK=${topK}, interfaces=[${supportedGenerationMethods.join(',')}])`;
// use known interfaces, or add chat if this is a generateContent model
const interfaces: ModelDescriptionSchema['interfaces'] = knownModel?.interfaces || [];
if (!interfaces.length && hasChatInterfaces) {
interfaces.push(LLM_IF_OAI_Chat);
// if (geminiVisionNames.some(name => modelId.includes(name)))
// interfaces.push(LLM_IF_OAI_Vision);
// newer models get good capabilities by default
interfaces.push(LLM_IF_OAI_Chat, LLM_IF_OAI_Fn, LLM_IF_OAI_Vision, LLM_IF_OAI_Json);
}
return {
@@ -218,11 +461,13 @@ export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): M
description: descriptionLong,
contextWindow: contextWindow,
maxCompletionTokens: outputTokenLimit,
trainingDataCutoff: knownModel?.trainingDataCutoff,
// trainingDataCutoff: knownModel?.trainingDataCutoff, // disabled as we don't get this from Gemini
interfaces,
// parameterSpecs: knownModel?.parameterSpecs,
// rateLimits: isGeminiPro ? { reqPerMinute: 60 } : undefined,
// benchmarks: ...
pricing: knownModel?.pricing, // TODO: needs <>128k, and per-character and per-image pricing
benchmark: knownModel?.benchmark,
pricing: knownModel?.pricing,
hidden,
// deprecated: knownModel?.deprecated,
};
}
@@ -8,7 +8,7 @@ import { createTRPCRouter, publicProcedure } from '~/server/api/trpc.server';
import { fetchJsonOrTRPCError } from '~/server/api/trpc.router.fetchers';
import { fixupHost } from '~/common/util/urlUtils';
import { llmsChatGenerateOutputSchema, llmsGenerateContextSchema, llmsListModelsOutputSchema } from '../llm.server.types';
import { llmsChatGenerateOutputSchema, llmsGenerateContextSchema, llmsListModelsOutputSchema, type ModelDescriptionSchema } from '../llm.server.types';
import { OpenAIHistorySchema, openAIHistorySchema, OpenAIModelSchema, openAIModelSchema } from '../openai/openai.router';
@@ -148,9 +148,10 @@ export const llmGeminiRouter = createTRPCRouter({
// as the List API already all the info on all the models
// map to our output schema
const models = detailedModels
const models = (detailedModels
.filter(geminiFilterModels)
.map(geminiModel => geminiModelToModelDescription(geminiModel))
.filter(model => !!model) as ModelDescriptionSchema[])
.sort(geminiSortModels);
return {
@@ -29,7 +29,7 @@ const geminiModelSchema = z.object({
name: z.string(),
version: z.string(),
displayName: z.string(),
description: z.string(),
description: z.string().optional(),
inputTokenLimit: z.number().int().min(1),
outputTokenLimit: z.number().int().min(1),
supportedGenerationMethods: z.array(z.union([Methods_enum, z.string()])), // relaxed with z.union to not break on expansion
@@ -742,10 +742,14 @@ export function openRouterModelFamilySortFn(a: { id: string }, b: { id: string }
return aPrefixIndex !== -1 ? -1 : 1;
}
export function openRouterModelToModelDescription(wireModel: object): ModelDescriptionSchema {
export function openRouterModelToModelDescription(wireModel: object): ModelDescriptionSchema | null {
// parse the model
const model = wireOpenrouterModelsListOutputSchema.parse(wireModel);
const { data: model, error } = wireOpenrouterModelsListOutputSchema.safeParse(wireModel);
if (error) {
console.warn(`openrouterModelToModelDescription: Failed to parse model: ${error}`);
return null;
}
// parse pricing
const pricing: ModelDescriptionSchema['pricing'] = {
@@ -256,7 +256,8 @@ export const llmOpenAIRouter = createTRPCRouter({
case 'openrouter':
models = openAIModels
.sort(openRouterModelFamilySortFn)
.map(openRouterModelToModelDescription);
.map(openRouterModelToModelDescription)
.filter(desc => !!desc) as ModelDescriptionSchema[];
break;
}
@@ -5,16 +5,20 @@ export const wireOpenrouterModelsListOutputSchema = z.object({
id: z.string(),
name: z.string(),
description: z.string(),
// NOTE: for 'openrouter/auto', this is: {
// "prompt": "-1",
// "completion": "-1"
// }
pricing: z.object({
prompt: z.string(),
completion: z.string(),
image: z.string(),
request: z.string(),
image: z.string().optional(),
request: z.string().optional(),
}),
context_length: z.number(),
architecture: z.object({
modality: z.string(), // z.enum(['text', 'multimodal']),
tokenizer: z.string(), // e.g. 'Mistral'
modality: z.string(), // z.enum(['text', 'multimodal', 'text+image->text]),
tokenizer: z.string(), // e.g. 'Mistral', 'Claude'
instruct_type: z.string().nullable(),
}),
top_provider: z.object({