223 Commits

Author SHA1 Message Date
reanon 9c0a4fd3a7 401 is universal 2025-08-08 13:14:54 +02:00
reanon bc85a71c2a No more error user messages 2025-08-08 12:58:41 +02:00
reanon 1604246cf1 more stupid mistakes (auto reenqueue) 2025-08-08 12:45:32 +02:00
reanon 82028d14b7 stupid mistake 2025-08-08 11:47:48 +02:00
reanon f23315d233 deepseek quota - retry internally instead of throwing error 2025-08-08 11:42:59 +02:00
reanon 1bf6d6ac99 gemini wtf 2025-08-08 02:18:20 +02:00
reanon 09ce6a70d2 sam.. 2025-08-07 23:51:49 +02:00
reanon 0f8581d340 forgot o4 mini 2025-08-07 23:25:47 +02:00
reanon e8c5d06cd7 proper streaming filter 2025-08-07 23:22:34 +02:00
reanon 20c9920199 gpt5 streaming = use only verified orgs 2025-08-07 22:51:38 +02:00
reanon 253a2af13f juust in case 2025-08-07 22:23:11 +02:00
reanon 2af4a02b15 forgot some models in order listing 2025-08-07 21:54:04 +02:00
reanon c8dab8786a gpt5 2025-08-07 21:03:03 +02:00
reanon 9cc86c2d68 opus 4.1 2025-08-05 19:51:02 +02:00
reanon e974da8a58 gemini penalties 2025-07-27 07:36:48 +02:00
reanon f114469057 google is idiotic 2025-07-23 23:19:54 +02:00
reanon 6e02db4bd7 another firebase fix? 2025-07-20 04:03:11 +02:00
reanon 1f9af4374d Revert "grok update"
This reverts commit 79a7dee586
2025-07-14 15:00:34 -08:00
reanon 79a7dee586 grok update 2025-07-14 14:59:11 -08:00
reanon e1bd960bb7 moon blinded me 2025-07-15 00:08:04 +02:00
reanon 867fda430b Moon shot back 2025-07-14 21:21:41 +02:00
reanon bbd2b88503 Lets shoot the moon 2025-07-14 21:10:36 +02:00
reanon 08400db220 grok4 2025-07-10 08:18:20 +02:00
reanon 5249e1c904 gemini -exp whitelist 2025-07-09 20:14:01 +02:00
reanon c18df6a546 firebase fix? 2025-07-05 17:23:30 +02:00
reanon ceedb52478 less stupid -ultra detection 2025-06-24 22:24:20 -08:00
reanon fa13d06f45 imagegen ultra ISNT gemini-ultra 2025-06-25 08:12:15 +02:00
reanon 0c0dc09020 fix disabled users being able to get to info page 2025-06-22 10:17:24 -08:00
reanon 317ef03ab4 2.0 flash has better limits, lets continue using that 2025-06-21 07:46:34 -08:00
reanon 7def7c17e4 gemini pro base instead of preview 2025-06-21 07:44:12 -08:00
reanon e201c2cf5e Merge branch 'main' into 'main'
disable thinking budget validation on gemini

See merge request reanon/nonono!6
2025-06-18 12:25:24 -08:00
Nopm edbbf056a0 remove zod validation 2025-06-18 17:20:11 -03:00
Nopm 4c214305af disable thinking budget validation on gemini 2025-06-18 17:11:39 -03:00
reanon cb8f2669ac update dep. 2025-06-17 21:42:43 +02:00
reanon ed737e43a5 Merge branch 'main' into 'main'
more quota fixes

See merge request reanon/nonono!5
2025-06-16 10:42:43 -08:00
Nopm 2bc1a7dbea moar fixes 2025-06-16 15:24:24 -03:00
Nopm 5ad22145a0 Merge branch 'main' of https://gitgud.io/reanon/nonono 2025-06-16 15:05:28 -03:00
Nopm b7ad5f1dae more quota fixes 2025-06-16 15:05:21 -03:00
reanon 2405be71c1 Merge branch 'main' into 'main'
fix quota handling with new user schema

See merge request reanon/nonono!4
2025-06-16 09:42:39 -08:00
Nopm aec3927c94 fix quota handling with new user schema 2025-06-16 11:51:39 -03:00
reanon ec82599e24 re-enqueue amazon bedrock 503 failed requests 2025-06-13 04:33:18 -08:00
Nopm 21294abd8e re-enqueue amazon bedrock 503 failed requests 2025-06-13 09:27:47 -03:00
reanon ca4a1f3252 magistral 2025-06-11 11:17:13 +02:00
reanon e0270f99ee Edit stats.ts 2025-06-11 00:49:49 -08:00
reanon 38e2980419 o3-pro 2025-06-11 10:47:48 +02:00
reanon 0102c7a6a5 o3 pricing 2025-06-10 11:53:51 -08:00
reanon b89439287e refactor: simplify Google AI model fetching by using synthetic response from existing keys 2025-06-08 13:41:29 +02:00
reanon 508bb3e08b claude error messages 2025-06-07 08:41:32 +02:00
reanon a17d087928 fix user json imports 2025-06-06 21:56:42 -08:00
Nopm 3f32a9b14d fix user json imports 2025-06-07 02:32:49 -03:00
reanon 3e11b0bf49 Merge branch 'main' into 'main'
Big update

See merge request reanon/nonono!1
2025-06-03 18:25:40 -08:00
Nopm 64d26c5c6c doc update 2025-06-03 23:19:09 -03:00
Nopm 41bc4998fc model pricing update 2025-06-03 23:09:39 -03:00
Nopm 4e3fb9d152 stop sqlite from dumping queries to console 2025-06-03 22:42:51 -03:00
Nopm 8c98fca56d fix pointless alt text when logo is empty 2025-06-03 21:49:37 -03:00
Nopm 2389b30e68 doc update 2025-06-03 21:47:39 -03:00
Nopm c066a7d46b password based service info auth (better than the first one we had) 2025-06-03 21:44:43 -03:00
Nopm 7b3cf409e4 google is dumb 2025-06-03 21:05:12 -03:00
Nopm 74cbafbb3b pro-exp BEGONE 2025-06-03 20:57:51 -03:00
Nopm 0411b4c3a6 I should have made all these commits separately but oops 2025-06-03 20:14:07 -03:00
reanon 5988cd7e45 dont false revoke ratelimited gemini 2025-05-29 19:15:41 +02:00
reanon f80873ef8a fuck if I know (aws bs) 2025-05-25 21:31:41 +02:00
reanon 45c0b99f20 Fuckoff jew 2025-05-24 21:04:39 -08:00
reanon 692da2b457 I hate aws v2 2025-05-24 20:07:59 +02:00
reanon 32bc797216 Edit claude-models.ts 2025-05-22 23:12:03 -08:00
reanon 1a6ce7ea04 I hate aws v2 2025-05-23 08:48:36 +02:00
reanon fdba7cd7e4 I hate aws 2025-05-23 08:44:37 +02:00
reanon 64d2f78526 fix my fuckup 2025-05-23 05:39:19 +02:00
reanon 566d42da07 hopefully last edit 2025-05-23 05:30:38 +02:00
reanon 74bb88daa3 forgot 2025-05-23 05:18:32 +02:00
reanon 2ea5fdf902 fix 2025-05-23 05:06:05 +02:00
reanon d5ec6fe1f9 aws opus shown 2025-05-23 05:01:23 +02:00
reanon ce9c8ec8b6 aws claude show variant sonnet 2025-05-23 00:16:53 +02:00
reanon 29323fd7bf claude4 2025-05-23 00:00:15 +02:00
reanon af162e567a feat: add Google Search tool support and improve token counting safety 2025-05-20 22:50:55 +02:00
reanon 87c6dd90cb dunno 2025-05-20 12:24:51 +02:00
reanon 1d8b13ba70 scuffed codex-mini 2025-05-19 16:05:55 +02:00
reanon 8344fd2e2a Workaround for qwen3 think/nonthink auto ST compatibility 2025-05-19 10:54:17 +02:00
reanon 8c30088383 Check for gemini pro/invalid key fix 2025-05-19 08:14:48 +02:00
reanon dde0183d7d quick dirty qwen3 test 2025-05-17 14:03:29 -08:00
reanon d64edbb3b7 Gemini overquota handling 2025-05-15 04:39:05 +02:00
reanon 5f0b5cc4e5 fuck it, dont care about websearch anyways 2025-05-09 06:30:57 -08:00
reanon 99269b7cd6 Edit anthropic.ts 2025-05-09 06:27:23 -08:00
reanon 6870a36a6e actual last try 2025-05-09 16:18:42 +02:00
reanon 45535de6ae last try for websearch results 2025-05-09 16:08:28 +02:00
reanon 8ea6fe463b again 2025-05-09 16:02:07 +02:00
reanon 4fd5d08ed8 ui test 2025-05-09 15:55:49 +02:00
reanon 4496afe7a1 claude websearch, might redo/revert 2025-05-09 15:30:49 +02:00
reanon cc0ece32d0 Edit mistral-ai.ts 2025-05-07 09:54:40 -08:00
reanon be8accbc37 2505 2025-05-07 09:52:42 -08:00
reanon 1d6f3dbf10 strip models/ 2025-05-06 17:21:17 +02:00
reanon f2b55ebabb small cohere fix 2025-05-05 13:59:53 +02:00
reanon 6374bfdee1 no forced redirect 2025-05-05 07:54:35 +02:00
reanon eb66f6b149 turbo 2025-05-02 04:09:01 +02:00
reanon 551a13498b mega basic qwen 2025-05-02 04:06:44 +02:00
reanon 780b885aeb basic cohere 2025-05-02 01:41:20 +02:00
reanon d9645025c9 xai update 2025-04-30 21:26:03 +02:00
reanon c1cb395020 mistral update 2025-04-30 20:03:40 +02:00
reanon 80d09f470b Edit service-info.ts 2025-04-26 05:55:54 -08:00
reanon 44338652fd Update file add-key.ts 2025-04-26 04:36:12 -08:00
reanon 8ef272f8b3 Update 2 files
- /src/shared/key-management/openai/checker.ts
- /src/shared/key-management/openai/provider.ts
2025-04-26 04:28:37 -08:00
reanon 9c804c0560 Update file checker.ts 2025-04-26 04:15:47 -08:00
reanon 2dc7fda2dd I am a tard 2025-04-26 04:15:21 -08:00
reanon 68b199e712 Update file checker.ts 2025-04-26 04:12:29 -08:00
reanon 1b110d3269 Update file checker.ts 2025-04-26 04:11:24 -08:00
reanon abfde6f684 verification test again 2025-04-26 04:07:38 -08:00
reanon d2d6ff3d52 cute dog 2025-04-26 01:43:27 -08:00
reanon a5eda7685b LAST TRY 2025-04-26 01:37:01 -08:00
reanon cbca37dd77 last test for image verif 2025-04-26 01:26:50 -08:00
reanon fc55518cd1 dunno anymore 2025-04-26 11:21:04 +02:00
reanon 925a81de43 auto 2025-04-26 01:11:34 -08:00
reanon 989bfc0ca3 verif2 2025-04-26 01:08:42 -08:00
reanon a1c04234ab openai verification 2025-04-26 00:57:21 -08:00
reanon dc0e7498e8 Edit openai-image.ts 2025-04-26 00:42:09 -08:00
reanon 6628498d5e again 2025-04-26 00:37:12 -08:00
reanon 31f9b4d536 tt 2025-04-26 10:29:40 +02:00
reanon afe6ad8ac9 gpt-image fix maybe 2025-04-26 10:29:01 +02:00
reanon a16d66a45b prelim gpt-image (cant test, no access) 2025-04-25 10:38:23 +02:00
reanon 465b13e5fb Edit config.ts 2025-04-24 15:01:47 -08:00
reanon 6c8b19651d Edit config.ts 2025-04-24 14:21:44 -08:00
reanon d3292d8a76 Edit .env.example 2025-04-24 14:15:56 -08:00
reanon dab5c1bbf0 no limiting on models available, limit it in env 2025-04-24 14:13:31 -08:00
reanon 2ffce3eff8 .. 2025-04-23 20:14:36 +00:00
reanon 8197192223 yea fuck it 2025-04-23 19:49:21 +00:00
reanon 7e6857fcf5 last attempt, otherwise fuck thinking for now 2025-04-23 19:39:17 +00:00
reanon 719cbc3cfa tf is this 2025-04-23 19:36:36 +00:00
reanon 3beea5dcfc wtf 2025-04-23 19:22:46 +00:00
reanon 9213b7088b Cohee, when I catch you Cohee 2025-04-23 19:10:13 +00:00
reanon 86ed19af99 longer wait for openai check 2025-04-22 06:24:31 +02:00
reanon bb75cc668c deepseek update (automodels/checking) 2025-04-21 14:44:06 +02:00
reanon a6d095dcda xai edits 2025-04-21 10:38:09 +02:00
reanon 588aaae5d9 codebase 2025-04-21 08:24:46 +02:00
reanon 6eec7ff7e6 oops 2025-04-21 02:02:49 +00:00
reanon 272b812db3 Using weighted averages for pricing 2025-04-21 02:00:44 +00:00
reanon 0bcc0c1037 Edit stats.ts 2025-04-21 01:24:01 +00:00
reanon af58d25fb5 sloppytoppy 2025-04-21 01:17:37 +00:00
reanon 15dc2514ee Api scale is bad sometimes 2025-04-20 01:33:18 +00:00
reanon d650038f7e thinking budget according to ST 2025-04-19 02:24:33 +00:00
reanon 6efe09b62e claude checker to latest sonnet, checking interval from 6 to 24 hours 2025-04-18 22:41:02 +00:00
reanon 14a1203be7 New thinking budget for 2.5 flash 2025-04-17 21:13:36 +00:00
reanon 1e8f55f96d Revert "New 2.5 flash thinking budget parameter"
This reverts commit 2f8538519b
2025-04-17 21:12:39 +00:00
reanon 2f8538519b New 2.5 flash thinking budget parameter 2025-04-17 21:11:58 +00:00
reanon 1b7ce423a6 Edit google-ai.ts 2025-04-17 21:03:43 +00:00
reanon 799a73655c Edit google-ai.ts 2025-04-17 20:55:01 +00:00
reanon 96645ba529 Edit google-ai.ts 2025-04-17 20:15:22 +00:00
reanon de631d3d91 Edit google-ai.ts 2025-04-17 20:13:06 +00:00
reanon bf2c0dd3d9 Edit google-ai.ts 2025-04-17 20:08:01 +00:00
reanon 2415be7c51 Gemini 2.5 Flash Preview 04-17 2025-04-17 20:01:03 +00:00
reanon 4c9a3678ae Edit config.ts - o1-pro disabled by default 2025-04-17 12:09:39 +00:00
reanon 19df23f342 .. 2025-04-17 11:51:57 +00:00
reanon 85fafb8edb fix? 2025-04-17 11:47:25 +00:00
reanon 5eb4858c69 o1-pro test 2025-04-17 11:33:58 +00:00
reanon 8081d9516d Update 2 files
- /src/config.ts
- /.env.example
2025-04-17 08:13:31 +00:00
reanon 5473ef903e support -preview in the regex 2025-04-17 03:03:08 +00:00
reanon 568288c180 tookens 2025-04-17 02:40:26 +00:00
reanon 65f4e14d3b o4-mini and o3 I hope 2025-04-17 02:37:37 +00:00
reanon 6479cefe07 Update file block-zoomer-origins.ts 2025-04-15 00:37:50 +00:00
reanon 94e2c907b5 Update file block-zoomer-origins.ts 2025-04-15 00:36:16 +00:00
reanon af53fc9913 pricing 2025-04-15 00:26:08 +00:00
reanon e6cc393296 Update file block-zoomer-origins.ts 2025-04-14 23:14:07 +00:00
reanon a9811c2886 ... dumbo 2025-04-14 21:44:37 +00:00
reanon 64e07a0429 4.1 maybe 2025-04-14 21:07:38 +00:00
reanon 83676caa8b Edit xai.ts 2025-04-12 07:19:18 +00:00
reanon a76f8a3c87 Edit checker.ts 2025-04-11 10:09:13 +00:00
reanon ecae252df4 Edit checker.ts 2025-04-10 00:04:15 +00:00
reanon d951989a57 Edit xai.ts 2025-04-10 00:03:20 +00:00
reanon 9deafb445b Edit checker.ts 2025-04-08 00:27:07 +00:00
reanon ee1d8ab1a2 Revert "remove hardcoded model list"
This reverts commit c2bfcdc744
2025-04-07 23:47:47 +00:00
reanon c2bfcdc744 remove hardcoded model list 2025-04-07 23:37:43 +00:00
reanon 24b6a090d8 ... dude 2025-04-06 22:35:38 +00:00
reanon 758ccbf23b Edit README.md 2025-04-06 19:33:37 +00:00
reanon 4ad3c217a4 Update file google-ai.ts 2025-04-06 14:11:17 +00:00
reanon ab1fb89ab9 Update file key-pool.ts 2025-04-04 03:25:50 +00:00
reanon ac79935205 Update file checker.ts 2025-04-04 03:03:35 +00:00
reanon 2b7c901951 Update 3 files
- /src/shared/key-management/xai/checker.ts
- /src/shared/key-management/key-pool.ts
- /src/service-info.ts
2025-04-04 02:48:25 +00:00
reanon ad13928383 grok not xai 2025-04-04 01:32:44 +00:00
SternAnon a3869c2d67 Added gemini-2.5 2025-03-26 02:02:24 +00:00
SternAnon 6ebc2f5126 Revert "Added gemma-3-27b-it"
This reverts commit d551f86020
2025-03-14 20:22:55 +00:00
SternAnon d551f86020 Added gemma-3-27b-it 2025-03-14 19:56:58 +00:00
SternAnon 7cfaf5777e Update file checker.ts 2025-03-06 16:53:32 +00:00
SternAnon 4f6ef38222 fix xai 2 2025-03-06 16:36:36 +00:00
SternAnon d21b232a8e fixes xai 2025-03-06 16:33:12 +00:00
SternAnon 72c9516679 Update 13 files
- /src/config.ts
- /src/info-page.ts
- /src/proxy/xai.ts
- /src/proxy/middleware/request/mutators/add-key.ts
- /src/proxy/middleware/request/preprocessors/validate-context-size.ts
- /src/proxy/middleware/response/index.ts
- /src/proxy/routes.ts
- /src/service-info.ts
- /src/shared/key-management/xai/checker.ts
- /src/shared/key-management/xai/provider.ts
- /src/shared/key-management/index.ts
- /src/shared/key-management/key-pool.ts
- /src/shared/models.ts
2025-03-06 16:25:48 +00:00
user fcaad65ccb Slop AI code to unify Anthropic model list and give Anthropic-style IDs for AWS /v1/models requests, needed for e.g. big-AGI 2025-02-27 20:25:30 +00:00
user b3d4650275 Initial GPT 4.5 bringup, separate model family due to extreme price 2025-02-27 20:25:30 +00:00
user 70c7f2aae9 aws sign fix for 3.7 2025-02-25 20:59:49 +00:00
user aecc934fad untested 3.7 sonnet, treating it like another 3.5 sonnet model 2025-02-24 18:38:56 +00:00
user a8d36f832e Check tool_result images for vision 2025-02-12 14:09:22 +00:00
user c1db122016 Simplify model reassignment in GCP 2025-02-12 13:27:15 +00:00
user e9bd6127a4 merge 2025-02-12 13:27:15 +00:00
user e230e9acec Remove 3.5 Sonnet v1 from GCP checking 2025-02-12 13:27:15 +00:00
penurin 239f95e8a1 Merge branch 'patch-1' into 'main'
Fix anthropic content schema

See merge request penurin/oai-reverse-proxy!1
2025-02-12 13:18:21 +00:00
W92k6zuinOCClyWS 17475447a0 Fix anthropic content schema (penurin/oai-reverse-proxy!1) 2025-02-12 13:18:21 +00:00
user d2b37b8455 Fix Gemini key checking: old code didn't properly check for the error message. Swapped the check to use 2.0 Flash because it catches more 429 keys 2025-02-05 17:11:27 +00:00
user cec66cdc44 Newer Gemini 2.0 models in the list 2025-02-05 17:11:27 +00:00
user a5c9e95929 Add all o models to the OpenAI model list 2025-01-31 20:22:06 +00:00
user c5d4fe44e6 Fix for the timeout workaround for o1 2025-01-31 20:22:06 +00:00
user 8ed883eaff o3 mini 2025-01-31 20:22:06 +00:00
user 6de338c6ac Properly separate deepseek keys from the generic ones in service info 2025-01-25 11:06:38 +00:00
user 45576db441 [Deepseek] Properly handle over-quota keys 2025-01-25 11:59:41 +00:00
user bcc83f30d9 Properly count DS reasoning tokens and properly save them per-user 2025-01-25 11:06:38 +00:00
user e5a26215e1 Add native Gemini model list endpoint 2025-01-24 08:34:12 +00:00
user cd6cc76a46 Attempt to improve the o1 timeout hack 2025-01-22 15:23:17 +00:00
faggot 613bb789fb fix error checking o1 deployments 2025-01-22 09:59:11 +00:00
user f1c698388e [Gemini] Support the new thinking config for 2.0 Flash Thinking 2025-01-22 09:59:11 +00:00
user 75605a2bfb Add preliminary deepseek-reasoner support 2025-01-20 07:28:30 +00:00
user 58e67d40e2 Check logging for AWS keys (untested) 2025-01-20 11:28:53 +00:00
user 796b4eee47 Make the OpenAI checker properly clone orgs again, and fix the error with the liveness check 2025-01-06 06:55:41 +00:00
user 0f482e67d2 Fix OpenAI -> Google AI conversion 2025-01-05 14:02:17 +00:00
user 496ec09905 Add v1alpha support (needed for 2.0 flash thinking with the new 'thought' parameter), already used by ST 2025-01-04 19:11:02 +00:00
user f522dba6a3 Fix errors with o1-preview and o1-mini 2025-01-03 04:41:29 +00:00
user 25ba8447d9 And add display names as well 2025-01-03 04:41:29 +00:00
user 91b8c01a9d Do the same for the AWS endpoint 2025-01-03 04:41:29 +00:00
user 82b88764ba Fix Anthropic model list to be actually compatible with Anthropic API (required by some frontends), remove old models 2025-01-03 04:50:55 +00:00
user 6ea9235ff8 Actually camelCase is canon for Gemini, oops 2024-12-31 08:23:45 +00:00
user 372ad85283 Support camelCase Gemini params and validate vision 2024-12-31 10:16:04 +00:00
user c2f5d2fbf3 Add /v1/models to deepseek 2024-12-31 08:23:45 +00:00
user c264413495 Leave a comment about concurrency for "special" user tokens 2024-12-31 08:23:45 +00:00
user 8d27082ad0 Fix formatting changes with upstream 2024-12-31 08:23:45 +00:00
user e2b602fd52 Adjust chunked transfer to send 4KB (CF's buffer size) of data every 49 seconds 2024-12-31 08:23:45 +00:00
user b00fb88cab Don't overwrite the reasoning effort by default 2024-12-31 07:45:11 +00:00
user 1cc281f6fe Add automatic prefill for Deepseek - works the same way as with Claude 2024-12-31 07:45:11 +00:00
user 8f4d00ed26 Init commit, some things:
- 'Transfer-Encoding: chunked' for o1 requests to prevent CF's 100 second limit
- Better tool/function call support
- Deepseek support
- Handling system as an array for AWS Claude
- Image support for Gemini
- Better o1 support (reasoning effort, developer role, context size)
2024-12-31 00:00:00 +00:00
102 changed files with 9159 additions and 1195 deletions
+26 -7
View File
@@ -17,6 +17,23 @@ NODE_ENV=production
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# URL for the image displayed on the login page.
# If not set, no image will be displayed.
# LOGIN_IMAGE_URL=https://example.com/your-logo.png
# Whether to enable the token-based or password-based login for the main info page.
# Defaults to true. Set to false to disable login and make the info page public.
# ENABLE_INFO_PAGE_LOGIN=true
# Authentication mode for the service info page. (token | password)
# If 'token', any valid user token is used (requires GATEKEEPER='user_token' mode).
# If 'password', SERVICE_INFO_PASSWORD is used.
# Defaults to 'token' if ENABLE_INFO_PAGE_LOGIN is true.
# SERVICE_INFO_AUTH_MODE=token
# Password for the service info page if SERVICE_INFO_AUTH_MODE is 'password'.
# SERVICE_INFO_PASSWORD=your-service-info-password
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
@@ -48,15 +65,14 @@ NODE_ENV=production
# | mistral-small | mistral-medium | mistral-large | aws-claude |
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
# By default, all models are allowed except for dall-e and o1.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
# | azure-gpt45 | azure-o1-mini | azure-o3-mini | deepseek | xai | o3 | o4-mini | gpt41 | gpt41-mini | gpt41-nano
# By default, all models are allowed
# To dissalow any, uncomment line below and edit
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt45,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o,azure-gpt45,azure-o1-mini,azure-o3-mini,deepseek
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai | xai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
@@ -120,8 +136,11 @@ NODE_ENV=production
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# Which persistence method to use. (memory | firebase_rtdb | sqlite)
# GATEKEEPER_STORE=memory
# If using sqlite store, path to the SQLite database file for user data.
# Defaults to data/user-store.sqlite in the project directory.
# SQLITE_USER_STORE_PATH=data/user-store.sqlite3
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
+1 -1
View File
@@ -7,5 +7,5 @@
build
greeting.md
node_modules
.windsurfrules
http-client.private.env.json
+33
View File
@@ -0,0 +1,33 @@
You are a Senior Full Stack Developer and an Expert in ReactJS, NextJS, JavaScript, TypeScript, HTML, CSS and modern UI/UX frameworks (e.g., TailwindCSS, Shadcn, Radix). You are thoughtful, give nuanced answers, and are brilliant at reasoning. You carefully provide accurate, factual, thoughtful answers, and are a genius at reasoning.
- Follow the users requirements carefully & to the letter.
- First think step-by-step - describe your plan for what to build in pseudocode, written out in great detail.
- Confirm, then write code!
- Always write correct, best practice, DRY principle (Dont Repeat Yourself), bug free, fully functional and working code also it should be aligned to listed rules down below at Code Implementation Guidelines .
- Focus on easy and readability code, over being performant.
- Fully implement all requested functionality.
- Leave NO todos, placeholders or missing pieces.
- Ensure code is complete! Verify thoroughly finalised.
- Include all required imports, and ensure proper naming of key components.
- Be concise Minimize any other prose.
- If you think there might not be a correct answer, you say so.
- If you do not know the answer, say so, instead of guessing.
### Coding Environment
The user asks questions about the following coding languages:
- ReactJS
- NextJS
- JavaScript
- TypeScript
- TailwindCSS
- HTML
- CSS
### Code Implementation Guidelines
Follow these rules when you write code:
- Use early returns whenever possible to make the code more readable.
- Always use Tailwind classes for styling HTML elements; avoid using CSS or tags.
- Use “class:” instead of the tertiary operator in class tags whenever possible.
- Use descriptive variable and function/const names. Also, event functions should be named with a “handle” prefix, like “handleClick” for onClick and “handleKeyDown” for onKeyDown.
- Implement accessibility features on elements. For example, a tag should have a tabindex=“0”, aria-label, on:click, and on:keydown, and similar attributes.
- Use consts instead of functions, for example, “const toggle = () =>”. Also, define a type if possible.
+321
View File
@@ -0,0 +1,321 @@
# Project Codebase Guide
This document serves as a guide and index for the project codebase, designed to help developers and AI agents quickly understand its structure, components, and how to contribute.
## Table of Contents
1. [Project Overview](#project-overview)
2. [Directory Structure](#directory-structure)
3. [Core Components](#core-components)
* [Configuration (`src/config.ts`)](#configuration)
* [Server Entry Point (`src/server.ts`)](#server-entry-point)
* [Proxy Layer (`src/proxy/`)](#proxy-layer)
* [User Management (`src/user/`)](#user-management)
* [Admin Interface (`src/admin/`)](#admin-interface)
* [Shared Utilities (`src/shared/`)](#shared-utilities)
4. [Proxy Functionality](#proxy-functionality)
* [Routing (`src/proxy/routes.ts`)](#proxy-routing)
* [Supported Models & Providers](#supported-models--providers)
* [Middleware (`src/proxy/middleware/`)](#proxy-middleware)
* [Adding New Models](#adding-new-models)
* [Adding New APIs/Providers](#adding-new-apisproviders)
5. [Model Management](#model-management)
* [Model Family Definitions](#model-family-definitions)
* [Adding OpenAI Models](#adding-openai-models)
* [Model Mapping & Routing](#model-mapping--routing)
* [Service Information](#service-information)
* [Step-by-Step Guide for Adding a New Model](#step-by-step-guide-for-adding-a-new-model)
* [Model Patterns and Versioning](#model-patterns-and-versioning)
* [Response Format Handling](#response-format-handling)
6. [Key Management](#key-management)
* [Key Pool System](#key-pool-system)
* [Provider-Specific Key Management](#provider-specific-key-management)
* [Key Rotation and Health Checks](#key-rotation-and-health-checks)
7. [Data Management](#data-management)
* [Database (`src/shared/database/`)](#database)
* [File Storage (`src/shared/file-storage/`)](#file-storage)
8. [Authentication & Authorization](#authentication--authorization)
9. [Logging & Monitoring](#logging--monitoring)
10. [Deployment](#deployment)
11. [Contributing](#contributing)
## Project Overview
This project provides a proxy layer for various Large Language Models (LLMs) and potentially other AI APIs. It aims to offer a unified interface, manage API keys securely, handle rate limiting, usage tracking, and potentially add features like response caching or prompt modification.
## Directory Structure
```
.
├── .env.example # Example environment variables
├── .gitattributes # Git attributes
├── .gitignore # Git ignore rules
├── .husky/ # Git hooks
├── .prettierrc # Code formatting rules
├── CODEBASE_GUIDE.md # This file
├── README.md # Project README
├── data/ # Data files (e.g., SQLite DB)
├── docker/ # Docker configuration
├── docs/ # Documentation files
├── http-client.env.json # HTTP client environment
├── package-lock.json # NPM lock file
├── package.json # Project dependencies and scripts
├── patches/ # Patches for dependencies
├── public/ # Static assets served by the web server
├── render.yaml # Render deployment configuration
├── scripts/ # Utility scripts
├── src/ # Source code
│ ├── admin/ # Admin interface logic
│ ├── config.ts # Application configuration
│ ├── info-page.ts # Logic for the info page
│ ├── logger.ts # Logging setup
│ ├── proxy/ # Core proxy logic for different providers
│ ├── server.ts # Express server setup and main entry point
│ ├── service-info.ts # Service information logic
│ ├── shared/ # Shared utilities, types, and modules
│ └── user/ # User management logic
├── tsconfig.json # TypeScript configuration
```
## Core Components
### Configuration (`src/config.ts`)
* Loads environment variables and defines application settings.
* Contains configuration for database connections, API keys (placeholders/retrieval methods), logging levels, rate limits, etc.
* Uses `dotenv` and potentially a schema validation library (like Zod) to ensure required variables are present.
### Server Entry Point (`src/server.ts`)
* Initializes the Express application.
* Sets up core middleware (e.g., body parsing, CORS, logging).
* Mounts routers for different parts of the application (admin, user, proxy).
* Starts the HTTP server.
### Proxy Layer (`src/proxy/`)
* The heart of the application, handling requests to downstream AI APIs.
* Contains individual modules for each supported provider (e.g., `openai.ts`, `anthropic.ts`).
* Handles request transformation, authentication against the target API, and response handling.
* Uses middleware for common proxy tasks.
### User Management (`src/user/`)
* Handles user registration, login, session management, and potentially API key generation/management for end-users.
* Likely interacts with the database (`src/shared/database/`).
### Admin Interface (`src/admin/`)
* Provides an interface for administrators to manage users, monitor usage, configure settings, etc.
* May have its own set of routes and views.
### Shared Utilities (`src/shared/`)
* Contains reusable code across different modules.
* `api-schemas/`: Zod schemas for API request/response validation.
* `database/`: Database connection, schemas (e.g., Prisma), and query logic.
* `errors.ts`: Custom error classes.
* `key-management/`: Logic for managing API keys (if applicable).
* `models.ts`: Core data models/types used throughout the application.
* `prompt-logging/`: Logic for logging prompts and responses.
* `tokenization/`: Utilities for counting tokens.
* `utils.ts`: General utility functions.
## Proxy Functionality
### Proxy Routing (`src/proxy/routes.ts`)
* Defines the API endpoints for the proxy service (e.g., `/v1/chat/completions`).
* Maps incoming requests to the appropriate provider-specific handler based on the request path, headers, or body content (e.g., model requested).
* Applies relevant middleware (authentication, rate limiting, queuing, etc.).
### Supported Models & Providers
* **OpenAI:** Handled in `src/proxy/openai.ts`. Supports models like GPT-4, GPT-3.5-turbo, as well as o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini). Handles chat completions and potentially image generation (`src/proxy/openai-image.ts`).
* **Anthropic:** Handled in `src/proxy/anthropic.ts`. Supports Claude models. May use AWS Bedrock (`src/proxy/aws-claude.ts`) or Anthropic's direct API.
* **Google AI / Vertex AI:** Handled in `src/proxy/google-ai.ts` and `src/proxy/gcp.ts`. Supports Gemini models (gemini-flash, gemini-pro, gemini-ultra).
* **Mistral AI:** Handled in `src/proxy/mistral-ai.ts`. Supports Mistral models via their API or potentially AWS (`src/proxy/aws-mistral.ts`).
* **Azure OpenAI:** Handled in `src/proxy/azure.ts`. Provides an alternative endpoint for OpenAI models via Azure.
* **Deepseek:** Handled in `src/proxy/deepseek.ts`.
* **Xai:** Handled in `src/proxy/xai.ts`.
* **AWS (General):** `src/proxy/aws.ts` might contain shared AWS logic (e.g., authentication).
### Middleware (`src/proxy/middleware/`)
* **`gatekeeper.ts`:** Likely handles initial request validation, authentication, and authorization checks before hitting provider logic. Checks origin (`check-origin.ts`), potentially custom tokens (`check-risu-token.ts`).
* **`rate-limit.ts`:** Implements rate limiting logic, potentially per-user or per-key.
* **`queue.ts`:** Manages request queuing, possibly to handle concurrency limits or prioritize requests.
### Adding New Models
1. **Identify the Provider:** Determine if the new model belongs to an existing provider (e.g., a new OpenAI model) or a new one.
2. **Update Provider Logic (if existing):**
* Modify the relevant provider file (e.g., `src/proxy/openai.ts`).
* Update model lists or logic that selects/validates models.
* Adjust any request/response transformations if the new model has a different API schema.
* Update model information in shared files like `src/shared/models.ts` if necessary.
3. **Update Routing (if necessary):** Modify `src/proxy/routes.ts` if the new model requires a different endpoint or routing logic.
4. **Configuration:** Add any new API keys or configuration parameters to `.env.example` and `src/config.ts`.
5. **Testing:** Add unit or integration tests for the new model.
### Adding New APIs/Providers
1. **Create Provider Module:** Create a new file in `src/proxy/` (e.g., `src/proxy/new-provider.ts`).
2. **Implement Handler:**
* Write the core logic to handle requests for this provider. This typically involves:
* Receiving the standardized request from the router.
* Transforming the request into the format expected by the new provider's API.
* Authenticating with the new provider's API (fetching keys from config).
* Making the API call (consider using a robust HTTP client like `axios` or `node-fetch`).
* Handling streaming responses if applicable (using helpers from `src/shared/streaming.ts`).
* Transforming the provider's response back into a standardized format.
* Handling errors gracefully.
3. **Add Routing:**
* Import the new handler in `src/proxy/routes.ts`.
* Add new routes or modify existing routing logic to direct requests to the new handler based on model name, path, or other criteria.
* Apply necessary middleware (gatekeeper, rate limiter, queue).
4. **Create Key Management:**
* Create a new directory in `src/shared/key-management/` for the provider.
* Implement provider-specific key management (key checkers, token counters).
5. **Configuration:**
* Add configuration variables (API keys, base URLs) to `.env.example` and `src/config.ts`.
* Update `src/config.ts` to load and validate the new variables.
6. **Model Information:** Add details about the new provider and its models to `src/shared/models.ts` or similar shared locations.
7. **Tokenization (if applicable):** If token counting is needed, add or update tokenization logic in `src/shared/tokenization/`.
8. **Testing:** Implement thorough tests for the new provider integration.
9. **Documentation:** Update this guide and any other relevant documentation.
## Model Management
### Model Family Definitions
* **Model Family Definitions:** The project uses a family-based approach to group similar models together. These are defined in `src/shared/models.ts`.
* Each model is part of a model family (e.g., "gpt4", "claude", "gemini-pro") which helps with routing, key management, and feature support.
* The `MODEL_FAMILIES` array contains all supported model families, and the `MODEL_FAMILY_SERVICE` mapping connects each family to its provider service.
### Adding OpenAI Models
When adding new OpenAI models to the codebase, there are several files that must be updated:
1. **Update Model Types (`src/shared/models.ts`):**
- Add the new model to the `OpenAIModelFamily` type
- Add the model to the `MODEL_FAMILIES` array
- Add the Azure variants for the model if applicable
- Add the model to `MODEL_FAMILY_SERVICE` mapping
- Update `OPENAI_MODEL_FAMILY_MAP` with regex patterns to match the model names
2. **Update Context Size Limits (`src/proxy/middleware/request/preprocessors/validate-context-size.ts`):**
- Add regex matching for the new model
- Set the appropriate context token limit for the model
3. **Update Token Cost Tracking (`src/shared/stats.ts`):**
- Add pricing information for the new model in the `getTokenCostUsd` function
- Include both input and output prices in the comments for clarity
4. **Update Feature Support Checks (`src/proxy/openai.ts`):**
- If the model supports special features like the reasoning API parameter (`isO1Model` function), update the appropriate function
- For model feature detection, prefer using regex patterns over explicit lists when possible, as this handles date-stamped versions better
5. **Update Display Names (`src/info-page.ts`):**
- Add friendly display names for the new models in the `MODEL_FAMILY_FRIENDLY_NAME` object
6. **Update Key Management Provider Files:**
- For OpenAI keys in `src/shared/key-management/openai/provider.ts`, add token counters for the new models
- For Azure OpenAI keys in `src/shared/key-management/azure/provider.ts`, add token counters for the Azure versions
### Model Patterns and Versioning
The codebase handles several patterns for model naming and versioning:
1. **Date-stamped Models:** Many models include date stamps (e.g., `gpt-4-0125-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$`.
2. **O-Series Models:** OpenAI's o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini) follow a different naming convention. The codebase handles these with dedicated model families and regex patterns.
3. **Preview/Non-Preview Variants:** Some models have preview variants (e.g., `gpt-4.5-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$`.
When adding new models, try to follow the existing patterns for consistency.
### Response Format Handling
The codebase includes special handling for different API response formats:
1. **Chat vs. Text Completions:** There's transformation logic in `openai.ts` to convert between chat completions and text completions formats (`transformTurboInstructResponse`).
2. **Newer API Formats:** For newer APIs like the Responses API, there's transformation logic (`transformResponsesApiResponse`) to convert responses to a format compatible with existing clients.
When adding support for new models or APIs, consider whether transformation is needed to maintain compatibility with existing clients.
## Key Management
### Key Pool System
The project uses a sophisticated key pool system (`src/shared/key-management/key-pool.ts`) to manage API keys for different providers. Key features include:
* **Key Selection:** The system selects the appropriate key based on model family, region preferences, and other criteria.
* **Rotation:** Keys are rotated to distribute usage and avoid hitting rate limits.
* **Health Checks:** Keys are checked periodically to ensure they're still valid and within rate limits.
### Provider-Specific Key Management
Each provider has its own key management module in `src/shared/key-management/`:
* **Key Checkers:** Each provider implements key checkers to validate keys and check their status.
* **Token Counters:** Providers implement token counting logic specific to their pricing model.
* **Models Support:** Keys are associated with specific model families they support.
When adding a new model or provider, you'll need to update or create the appropriate key management files.
### Key Rotation and Health Checks
The key pool system includes logic for:
* **Rotation Strategy:** Keys are selected based on a prioritization strategy (`prioritize-keys.ts`).
* **Disabling Unhealthy Keys:** Keys that fail health checks are temporarily disabled.
* **Rate Limit Awareness:** The system tracks usage to avoid hitting provider rate limits.
## Data Management
### Database (`src/shared/database/`)
* Likely uses Prisma or a similar ORM.
* Defines database schemas (e.g., for users, API keys, usage logs).
* Provides functions for interacting with the database.
* Configuration is managed in `src/config.ts`.
### File Storage (`src/shared/file-storage/`)
* May be used for storing logs, cached data, or user-uploaded files.
* Could integrate with local storage or cloud providers (e.g., S3, GCS).
## Authentication & Authorization
* **User Auth:** Handled in `src/user/` potentially using sessions (`src/shared/with-session.ts`) or JWTs.
* **Proxy Auth:** The `gatekeeper.ts` middleware likely verifies incoming requests to the proxy endpoints. This could involve checking:
* Custom API keys stored in the database (`src/shared/database/`).
* Specific tokens (`check-risu-token.ts`).
* HMAC signatures (`src/shared/hmac-signing.ts`).
* Origin checks (`check-origin.ts`).
* **Downstream Auth:** Each provider module (`src/proxy/*.ts`) handles authentication with the actual AI service API using keys from the configuration.
## Logging & Monitoring
* **Logging:** Configured in `src/logger.ts`, likely using a library like `pino` or `winston`. Logs requests, errors, and important events.
* **Prompt Logging:** Specific logic for logging prompts and responses might exist in `src/shared/prompt-logging/`.
* **Stats/Monitoring:** `src/shared/stats.ts` might handle collecting and exposing application metrics.
## Deployment
* **Docker:** The project likely includes Docker configuration for containerized deployment.
* **Render:** The `render.yaml` file suggests the project is or can be deployed on Render.
* **Environment Variables:** The `.env.example` file provides a template for required environment variables in production.
## Contributing
When contributing to this project:
1. **Follow Coding Standards:** Use the established patterns and standards in the codebase. The `.prettierrc` file defines code formatting rules.
2. **Update Documentation:** Keep this guide updated when adding new components or changing existing ones.
3. **Add Tests:** Ensure your changes are tested appropriately.
4. **Update Configuration:** If your changes require new environment variables, update `.env.example`.
*This guide provides a high-level overview. For detailed information, refer to the specific source code files.*
+2 -2
View File
@@ -1,4 +1,4 @@
# OAI Reverse Proxy
# OAI Reverse Proxy - just a shitty fork
Reverse proxy server for various LLM APIs.
### Table of Contents
@@ -23,7 +23,7 @@ This project allows you to run a reverse proxy server for various LLM APIs.
- [x] Support for multiple APIs
- [x] [OpenAI](https://openai.com/)
- [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/) (Claude4 is fucked, dont care)
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
+12
View File
@@ -12,6 +12,7 @@ Several of these features require you to set secrets in your environment. If usi
- [Memory](#memory)
- [Firebase Realtime Database](#firebase-realtime-database)
- [Firebase setup instructions](#firebase-setup-instructions)
- [SQLite Database](#sqlite-database)
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
## No user management (`GATEKEEPER=none`)
@@ -63,6 +64,17 @@ To use Firebase Realtime Database to persist user data, set the following enviro
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
### SQLite Database
To use a local SQLite database file to persist user data, set the following environment variables:
- `GATEKEEPER_STORE`: Set this to `sqlite`.
- `SQLITE_USER_STORE_PATH` (Optional): Specifies the path to the SQLite database file.
- If not set, it defaults to `data/user-store.sqlite` within the project directory.
- Ensure that the directory where the SQLite file will be created (e.g., the `data/` directory) is writable by the application process.
Using SQLite provides a simple way to persist user data locally without relying on external services. User data will be saved to the specified file and will be available across server restarts.
## Whitelisting admin IP addresses
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
+499 -325
View File
File diff suppressed because it is too large Load Diff
+2 -2
View File
@@ -53,7 +53,7 @@
"pino-http": "^8.3.3",
"proxy-agent": "^6.4.0",
"sanitize-html": "^2.13.0",
"sharp": "^0.32.6",
"sharp": "^0.34.2",
"showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
@@ -78,7 +78,7 @@
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1",
"concurrently": "^8.0.1",
"esbuild": "^0.17.16",
"esbuild": "^0.25.5",
"esbuild-register": "^3.4.2",
"husky": "^8.0.3",
"nodemon": "^3.0.1",
+38
View File
@@ -13,6 +13,7 @@ import { eventsApiRouter } from "./api/events";
import { usersApiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
import { logger } from "../logger";
import { keyPool } from "../shared/key-management";
const adminRouter = Router();
@@ -36,6 +37,43 @@ adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
// Special endpoint to validate organization verification status for all OpenAI keys
// This checks both gpt-image-1 and o3 streaming access which require verified organizations
adminRouter.post("/validate-gpt-image-keys", authorize({ via: "header" }), async (req, res) => {
try {
logger.info("Manual validation of organization verification status initiated");
// Use the specialized validation function that tests each key's organization verification
// status using o3 streaming and waits for the results
const results = await keyPool.validateGptImageAccess();
logger.info({
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length
}, "Manual organization verification check completed");
return res.json({
success: true,
message: "Organization verification check completed",
results: {
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length,
// Only include hashes, not full keys
verified_keys: results.verified,
removed_keys: results.removed,
error_details: results.errors
}
});
} catch (error) {
logger.error({ error }, "Error validating organization verification status for OpenAI keys");
return res.status(500).json({ error: "Failed to validate keys", details: error.message });
}
});
adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
adminRouter.use("/", loginRouter);
+76 -7
View File
@@ -132,10 +132,11 @@ router.post("/create-user", (req, res) => {
)
.transform((data: any) => {
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
limits[model] = data[`temporaryUserQuota_${model}`];
const tokenLimits = MODEL_FAMILIES.reduce((limits, modelFamily) => {
const quotaValue = data[`temporaryUserQuota_${modelFamily}`];
limits[modelFamily] = typeof quotaValue === 'number' ? quotaValue : 0;
return limits;
}, {} as UserTokenCounts);
}, {} as any);
return { ...data, expiresAt, tokenLimits };
});
@@ -189,7 +190,70 @@ router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) throw new HttpError(400, "No file uploaded");
const data = JSON.parse(req.file.buffer.toString());
const result = z.array(UserPartialSchema).safeParse(data.users);
// Transform old token count format to new format
const transformedUsers = data.users.map((user: any) => {
if (user.tokenCounts) {
const transformedTokenCounts: any = {};
for (const [family, value] of Object.entries(user.tokenCounts)) {
if (typeof value === 'number') {
// Old format: just a number (legacy_total)
transformedTokenCounts[family] = {
input: 0,
output: 0,
legacy_total: value
};
} else if (typeof value === 'object' && value !== null) {
// New format or partially new format
const transformedCounts: { input: number; output: number; legacy_total?: number } = {
input: (value as any).input || 0,
output: (value as any).output || 0
};
if ((value as any).legacy_total !== undefined) {
transformedCounts.legacy_total = (value as any).legacy_total;
}
transformedTokenCounts[family] = transformedCounts;
}
}
user.tokenCounts = transformedTokenCounts;
}
// Handle tokenLimits - should be flat numbers
if (user.tokenLimits) {
const transformedTokenLimits: any = {};
for (const [family, value] of Object.entries(user.tokenLimits)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenLimits[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenLimits[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenLimits = transformedTokenLimits;
}
// Handle tokenRefresh - should be flat numbers
if (user.tokenRefresh) {
const transformedTokenRefresh: any = {};
for (const [family, value] of Object.entries(user.tokenRefresh)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenRefresh[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenRefresh[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenRefresh = transformedTokenRefresh;
}
return user;
});
const result = z.array(UserPartialSchema).safeParse(transformedUsers);
if (!result.success) throw new HttpError(400, result.error.toString());
const upserts = result.data.map((user) => userStore.upsertUser(user));
@@ -547,9 +611,14 @@ router.post("/generate-stats", (req, res) => {
function getSumsForUser(user: User) {
const sums = MODEL_FAMILIES.reduce(
(s, model) => {
const tokens = user.tokenCounts[model] ?? 0;
s.sumTokens += tokens;
s.sumCost += getTokenCostUsd(model, tokens);
const counts = user.tokenCounts[model] ?? { input: 0, output: 0 };
// Ensure inputTokens and outputTokens are numbers, defaulting to 0 if NaN or undefined
const inputTokens = Number(counts.input) || 0;
const outputTokens = Number(counts.output) || 0;
// We could also consider legacy_total here if input and output are 0
// For now, sumTokens and sumCost will be based on current input/output.
s.sumTokens += inputTokens + outputTokens;
s.sumCost += getTokenCostUsd(model, inputTokens, outputTokens);
return s;
},
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
+11 -5
View File
@@ -18,13 +18,19 @@
</li>
<li>
<code>tokenCounts</code> (optional): the number of tokens the user has
consumed. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
consumed. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing an object with
<code>input</code> and <code>output</code> token counts.
</li>
<li>
<code>tokenLimits</code> (optional): the number of tokens the user can
consume. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
<code>tokenLimits</code> (optional): the maximum number of tokens the user can
consume. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing a single number
representing the total token quota.
</li>
<li>
<code>tokenRefresh</code> (optional): the amount of tokens to refresh when quotas
are reset. Same format as <code>tokenLimits</code>.
</li>
<li>
<code>createdAt</code> (optional): the timestamp when the user was created
+97 -11
View File
@@ -29,10 +29,40 @@ type Config = {
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Google AI experimental model names that are
* allowed to bypass the experimental model block. By default, all models
* containing "exp" are blocked, but specific models listed here will be
* permitted.
*
* @example "gemini-2.0-flash-exp,gemini-exp-1206"
*/
allowedExpModels?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/**
* Comma-delimited list of Deepseek API keys.
*/
deepseekKey?: string;
/**
* Comma-delimited list of Xai (Grok) API keys.
*/
xaiKey?: string;
/**
* Comma-delimited list of Cohere API keys.
*/
cohereKey?: string;
/**
* Comma-delimited list of Qwen API keys.
*/
qwenKey?: string;
/**
* Comma-delimited list of Moonshot API keys.
*/
moonshotKey?: string;
/**
* Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region.
@@ -73,11 +103,6 @@ type Config = {
* management mode is set to 'user_token'.
*/
adminKey?: string;
/**
* The password required to view the service info/status page. If not set, the
* info page will be publicly accessible.
*/
serviceInfoPassword?: string;
/**
* Which user management mode to use.
* - `none`: No user management. Proxy is open to all requests with basic
@@ -94,10 +119,14 @@ type Config = {
* - `memory`: Users are stored in memory and are lost on restart (default)
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
* - `sqlite`: Users are stored in an SQLite database; requires
* `sqliteUserStorePath` to be set.
*/
gatekeeperStore: "memory" | "firebase_rtdb";
gatekeeperStore: "memory" | "firebase_rtdb" | "sqlite";
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
firebaseRtdbUrl?: string;
/** Path to the SQLite database file for storing user data. */
sqliteUserStorePath?: string;
/**
* Base64-encoded Firebase service account key if using the Firebase RTDB
* store. Note that you should encode the *entire* JSON key file, not just the
@@ -356,7 +385,7 @@ type Config = {
*
* Defaults to no services, meaning image prompts are disabled. Use a comma-
* separated list. Available services are:
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure,xai
*/
allowedVisionServices: LLMService[];
/**
@@ -415,6 +444,14 @@ type Config = {
*/
proxyUrl?: string;
};
/** URL for the image on the login page. Defaults to empty string (no image). */
loginImageUrl?: string;
/** Whether to enable the token-based login page for the service info page. Defaults to true. */
enableInfoPageLogin?: boolean;
/** Authentication mode for the service info page. (token | password) */
serviceInfoAuthMode: "token" | "password";
/** Password for the service info page if serviceInfoAuthMode is 'password'. */
serviceInfoPassword?: string;
};
// To change configs, create a file called .env in the root directory.
@@ -424,14 +461,19 @@ export const config: Config = {
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
qwenKey: getEnvWithDefault("QWEN_KEY", ""),
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
allowedExpModels: getEnvWithDefault("ALLOWED_EXP_MODELS", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
deepseekKey: getEnvWithDefault("DEEPSEEK_KEY", ""),
xaiKey: getEnvWithDefault("XAI_KEY", ""),
cohereKey: getEnvWithDefault("COHERE_KEY", ""),
moonshotKey: getEnvWithDefault("MOONSHOT_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
sqliteDataPath: getEnvWithDefault(
"SQLITE_DATA_PATH",
path.join(DATA_DIR, "database.sqlite")
@@ -439,7 +481,11 @@ export const config: Config = {
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory") as Config["gatekeeperStore"],
sqliteUserStorePath: getEnvWithDefault(
"SQLITE_USER_STORE_PATH",
path.join(DATA_DIR, "user-store.sqlite")
),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
@@ -525,6 +571,10 @@ export const config: Config = {
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
},
loginImageUrl: getEnvWithDefault("LOGIN_IMAGE_URL", ""),
enableInfoPageLogin: getEnvWithDefault("ENABLE_INFO_PAGE_LOGIN", true),
serviceInfoAuthMode: getEnvWithDefault("SERVICE_INFO_AUTH_MODE", "token") as Config["serviceInfoAuthMode"],
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", undefined),
} as const;
function generateSigningKey() {
@@ -541,6 +591,8 @@ function generateSigningKey() {
config.anthropicKey,
config.googleAIKey,
config.mistralAIKey,
config.deepseekKey,
config.xaiKey,
config.awsCredentials,
config.gcpCredentials,
config.azureCredentials,
@@ -644,6 +696,12 @@ export async function assertConfigIsValid() {
);
}
if (config.gatekeeperStore === "sqlite" && !config.sqliteUserStorePath) {
throw new Error(
"SQLite user store requires `SQLITE_USER_STORE_PATH` to be set."
);
}
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
delete config.httpAgent;
} else if (config.httpAgent) {
@@ -654,6 +712,25 @@ export async function assertConfigIsValid() {
}
}
if (config.enableInfoPageLogin) {
if (!["token", "password"].includes(config.serviceInfoAuthMode)) {
throw new Error(
`Invalid SERVICE_INFO_AUTH_MODE: ${config.serviceInfoAuthMode}. Must be 'token' or 'password'.`
);
}
if (config.serviceInfoAuthMode === "password" && !config.serviceInfoPassword) {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'password' but SERVICE_INFO_PASSWORD is not set."
);
}
// If service info login is token-based, gatekeeper must be 'user_token' mode for getUser() to be effective.
if (config.serviceInfoAuthMode === "token" && config.gatekeeper !== "user_token") {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'token' for info page login, but GATEKEEPER is not 'user_token'. User token authentication will not work."
);
}
}
// Ensure forks which add new secret-like config keys don't unwittingly expose
// them to users.
for (const key of getKeys(config)) {
@@ -689,13 +766,17 @@ export const OMITTED_KEYS = [
"openaiKey",
"anthropicKey",
"googleAIKey",
"deepseekKey",
"xaiKey",
"cohereKey",
"qwenKey",
"moonshotKey",
"mistralAIKey",
"awsCredentials",
"gcpCredentials",
"azureCredentials",
"proxyKey",
"adminKey",
"serviceInfoPassword",
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
@@ -704,6 +785,7 @@ export const OMITTED_KEYS = [
"firebaseKey",
"firebaseRtdbUrl",
"sqliteDataPath",
"sqliteUserStorePath",
"eventLogging",
"eventLoggingTrim",
"gatekeeperStore",
@@ -722,6 +804,9 @@ export const OMITTED_KEYS = [
"adminWhitelist",
"ipBlacklist",
"powTokenPurgeHours",
"loginImageUrl",
"enableInfoPageLogin",
"serviceInfoPassword",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
@@ -784,6 +869,7 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
"AWS_CREDENTIALS",
"GCP_CREDENTIALS",
"AZURE_CREDENTIALS",
"QWEN_KEY",
].includes(String(env))
) {
return value as unknown as T;
@@ -810,6 +896,6 @@ function parseCsv(val: string): string[] {
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter(
(f) => !f.includes("dall-e") && !f.includes("o1")
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
) as ModelFamily[];
}
+239 -127
View File
@@ -1,4 +1,8 @@
/** This whole module kinda sucks */
/* ──────────────────────────────────────────────────────────────
Login-gated info page
drop-in replacement for src/info-page.ts
──────────────────────────────────────────────────────────── */
import fs from "fs";
import express, { Router, Request, Response } from "express";
import showdown from "showdown";
@@ -8,18 +12,49 @@ import { getLastNImages } from "./shared/file-storage/image-history";
import { keyPool } from "./shared/key-management";
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
import { withSession } from "./shared/with-session";
import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
import { injectCsrfToken, checkCsrfToken } from "./shared/inject-csrf";
import { getUser } from "./shared/users/user-store";
/* ──────────────── TYPES: extend express-session ──────────── */
declare module "express-session" {
interface Session {
infoPageAuthed?: boolean;
}
}
/* ──────────────── misc constants ─────────────────────────── */
const INFO_PAGE_TTL = 2_000; // ms
const LOGIN_ROUTE = "/";
const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
qwen: "Qwen",
cohere: "Cohere",
deepseek: "Deepseek",
xai: "Grok",
moonshot: "Moonshot",
turbo: "GPT-4o Mini / 3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
gpt4o: "GPT-4o",
gpt41: "GPT-4.1",
"gpt41-mini": "GPT-4.1 Mini",
"gpt41-nano": "GPT-4.1 Nano",
gpt5: "GPT-5",
"gpt5-mini": "GPT-5 Mini",
"gpt5-nano": "GPT-5 Nano",
"gpt5-chat-latest": "GPT-5 Chat Latest",
gpt45: "GPT-4.5",
o1: "OpenAI o1",
"o1-mini": "OpenAI o1 mini",
"o1-pro": "OpenAI o1 pro",
"o3-pro": "OpenAI o3 pro",
"o3-mini": "OpenAI o3 mini",
"o3": "OpenAI o3",
"o4-mini": "OpenAI o4 mini",
"codex-mini": "OpenAI Codex Mini",
"dall-e": "DALL-E",
"gpt-image": "GPT Image",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-flash": "Gemini Flash",
@@ -42,19 +77,101 @@ const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-gpt4o": "Azure GPT-4o",
"azure-gpt45": "Azure GPT-4.5",
"azure-gpt41": "Azure GPT-4.1",
"azure-gpt41-mini": "Azure GPT-4.1 Mini",
"azure-gpt41-nano": "Azure GPT-4.1 Nano",
"azure-gpt5": "Azure GPT-5",
"azure-gpt5-mini": "Azure GPT-5 Mini",
"azure-gpt5-nano": "Azure GPT-5 Nano",
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
"azure-o1": "Azure o1",
"azure-o1-mini": "Azure o1 mini",
"azure-o1-pro": "Azure o1 pro",
"azure-o3-pro": "Azure o3 pro",
"azure-o3-mini": "Azure o3 mini",
"azure-o3": "Azure o3",
"azure-o4-mini": "Azure o4 mini",
"azure-codex-mini": "Azure Codex Mini",
"azure-dall-e": "Azure DALL-E",
"azure-gpt-image": "Azure GPT Image",
};
const converter = new showdown.Converter();
/* optional markdown greeting */
const customGreeting = fs.existsSync("greeting.md")
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
: "";
/* ──────────────── Login page ──────────────────────── */
function renderLoginPage(csrf: string, error?: string) {
const errBlock = error
? `<div class="error-message">${escapeHtml(error)}</div>`
: "";
const pageTitle = getServerTitle();
return `<!DOCTYPE html>
<html>
<head>
<title>${pageTitle} Login</title>
<style>
body{font-family:Arial, sans-serif;display:flex;justify-content:center;
align-items:center;height:100vh;margin:0;padding:20px;background:#f5f5f5;}
.login-container{background:#fff;border-radius:8px;box-shadow:0 4px 8px rgba(0,0,0,.1);
padding:30px;width:100%;max-width:400px;text-align:center;}
.logo-image{max-width:200px;margin-bottom:20px;}
.form-group{margin-bottom:20px;}
input[type=text], input[type=password]{width:100%;padding:10px;border:1px solid #ddd;border-radius:4px;
box-sizing:border-box;font-size:16px;}
button{background:#4caf50;color:#fff;border:none;padding:12px 20px;border-radius:4px;
cursor:pointer;font-size:16px;width:100%;}
button:hover{background:#45a049;}
.error-message{color:#f44336;margin-bottom:15px;}
@media (prefers-color-scheme: dark) {
body { background: #2c2c2c; color: #e0e0e0; }
.login-container { background: #383838; box-shadow: 0 4px 12px rgba(0,0,0,0.4); border: 1px solid #4a4a4a; }
input[type=text], input[type=password] { background: #4a4a4a; color: #e0e0e0; border: 1px solid #5a5a5a; }
input[type=text]::placeholder, input[type=password]::placeholder { color: #999; }
button { background: #007bff; } /* Using a blue for dark mode button */
button:hover { background: #0056b3; }
.error-message { color: #ff8a80; } /* Lighter red for errors in dark mode */
}
</style>
</head>
<body>
<div class="login-container">
${config.loginImageUrl ? `<img src="${config.loginImageUrl}" alt="Logo" class="logo-image">` : ''}
${errBlock}
<form method="POST" action="${LOGIN_ROUTE}">
<div class="form-group">
${config.serviceInfoAuthMode === "password"
? `<input type="password" id="password" name="password" required placeholder="Service Password">`
: `<input type="text" id="token" name="token" required placeholder="Your token">`}
<input type="hidden" name="_csrf" value="${csrf}">
</div>
<button type="submit">Access Dashboard</button>
</form>
</div>
</body>
</html>`;
}
/* ──────────────── login-required middleware ──────────────── */
function requireLogin(
req: Request,
res: Response,
next: express.NextFunction
) {
if (req.session?.infoPageAuthed) return next();
return res.send(renderLoginPage(res.locals.csrfToken));
}
/* ──────────────── INFO PAGE CACHING ──────────────────────── */
let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0;
export const handleInfoPage = (req: Request, res: Response) => {
export function handleInfoPage(req: Request, res: Response) {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
return res.send(infoPageHtml);
}
@@ -69,60 +186,46 @@ export const handleInfoPage = (req: Request, res: Response) => {
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
};
}
/* ──────────────── RENDER FULL INFO PAGE ──────────────────── */
export function renderPage(info: ServiceInfo) {
const title = getServerTitle();
const headerHtml = buildInfoPageHeader(info);
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
<style>
body {
font-family: sans-serif;
padding: 1em;
max-width: 900px;
margin: 0;
}
.self-service-links {
display: flex;
justify-content: center;
margin-bottom: 1em;
padding: 0.5em;
font-size: 0.8em;
}
.self-service-links a {
margin: 0 0.5em;
}
</style>
</head>
<body>
${headerHtml}
<hr />
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
</body>
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" />
<link rel="stylesheet" href="/res/css/sakura.css" />
<link rel="stylesheet" href="/res/css/sakura-dark.css"
media="screen and (prefers-color-scheme: dark)" />
<style>
body{font-family:sans-serif;padding:1em;max-width:900px;margin:0;}
.self-service-links{display:flex;justify-content:center;margin-bottom:1em;
padding:0.5em;font-size:0.8em;}
.self-service-links a{margin:0 0.5em;}
</style>
</head>
<body>
${headerHtml}
<hr/>
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
</body>
</html>`;
}
/**
* If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page.
**/
/* ──────────────── header & helper functions ──────────────── */
/* (all copied verbatim from original file) */
function buildInfoPageHeader(info: ServiceInfo) {
const title = getServerTitle();
// TODO: use some templating engine instead of this mess
let infoBody = `# ${title}`;
if (config.promptLogging) {
infoBody += `\n## Prompt Logging Enabled
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
@@ -141,9 +244,9 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
for (const modelFamily of config.allowedModelFamilies) {
const service = MODEL_FAMILY_SERVICE[modelFamily];
const hasKeys = keyPool.list().some((k) => {
return k.service === service && k.modelFamilies.includes(modelFamily);
});
const hasKeys = keyPool.list().some(
(k) => k.service === service && k.modelFamilies.includes(modelFamily)
);
const wait = info[modelFamily]?.estimatedQueueTime;
if (hasKeys && wait) {
@@ -154,9 +257,7 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
}
infoBody += "\n\n" + waits.join(" / ");
infoBody += customGreeting;
infoBody += buildRecentImageSection();
return converter.makeHtml(infoBody);
@@ -164,63 +265,60 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
function getSelfServiceLinks() {
if (config.gatekeeper !== "user_token") return "";
const links = [["Check your user token", "/user/lookup"]];
if (config.captchaMode !== "none") {
links.unshift(["Request a user token", "/user/captcha"]);
}
return `<div class="self-service-links">${links
.map(([text, link]) => `<a href="${link}">${text}</a>`)
.map(([t, l]) => `<a href="${l}">${t}</a>`)
.join(" | ")}</div>`;
}
function getServerTitle() {
// Use manually set title if available
if (process.env.SERVER_TITLE) {
return process.env.SERVER_TITLE;
}
// Huggingface
if (process.env.SPACE_ID) {
if (process.env.SERVER_TITLE) return process.env.SERVER_TITLE;
if (process.env.SPACE_ID)
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
}
// Render
if (process.env.RENDER) {
if (process.env.RENDER)
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
}
return "OAI Reverse Proxy";
return "Tunnel";
}
function buildRecentImageSection() {
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
const imageModels: ModelFamily[] = [
"azure-dall-e",
"dall-e",
"gpt-image",
"azure-gpt-image",
];
// Condition 1: Is the feature enabled via config?
// Condition 2: Is at least one relevant image model family allowed in config?
if (
!config.showRecentImages ||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
imageModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return ""; // Exit if feature is disabled or no relevant models are allowed
}
// Condition 3: Are there any actual images to display?
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
// If the feature is enabled and models are allowed, but no images exist,
// do not render the section, including its title.
return "";
}
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
// If all conditions pass (feature enabled, models allowed, images exist), build and return the HTML
let html = `<h2>Recent Image Generations</h2>`;
html += `<div style="display:flex;flex-wrap:wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
html += `<div style="margin:0.5em" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}"
alt="${escapedPrompt}" style="max-width:150px;max-height:150px;"/></a></div>`;
}
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
html += `</div><p style="clear:both;text-align:center;">
<a href="/user/image-history">View all recent images</a></p>`;
return html;
}
@@ -235,57 +333,71 @@ function escapeHtml(unsafe: string) {
.replace(/]/g, "&#93;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
try {
const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
} catch (e) {
const [u, s] = spaceId.split("/");
return `https://${u}-${s.replace(/_/g, "-")}.hf.space`;
} catch {
return "";
}
}
function checkIfUnlocked(
req: Request,
res: Response,
next: express.NextFunction
) {
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
return res.redirect("/unlock-info");
}
next();
}
/* ──────────────── ROUTER ─────────────────────────────────── */
const infoPageRouter = Router();
if (config.serviceInfoPassword?.length) {
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
infoPageRouter.use(withSession);
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
infoPageRouter.post("/unlock-info", (req, res) => {
if (req.body.password !== config.serviceInfoPassword) {
return res.status(403).send("Incorrect password");
}
req.session!.unlocked = true;
res.redirect("/");
});
infoPageRouter.get("/unlock-info", (_req, res) => {
if (_req.session?.unlocked) return res.redirect("/");
res.send(`
<form method="post" action="/unlock-info">
<h1>Unlock Service Info</h1>
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
<input type="password" name="password" placeholder="Password" />
<button type="submit">Unlock</button>
</form>
`);
});
infoPageRouter.use(checkIfUnlocked);
}
infoPageRouter.get("/", handleInfoPage);
infoPageRouter.get("/status", (req, res) => {
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" }),
withSession,
injectCsrfToken,
checkCsrfToken
);
/* login attempt */
infoPageRouter.post(LOGIN_ROUTE, (req, res) => {
if (config.serviceInfoAuthMode === "password") {
const password = (req.body.password || "").trim();
// Simple string comparison; for production, consider a timing-safe comparison library
if (config.serviceInfoPassword && password === config.serviceInfoPassword) {
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else {
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid password. Please try again."));
}
} else {
// Token-based authentication (using any valid user token)
const token = (req.body.token || "").trim();
const user = getUser(token); // returns undefined if invalid
if (user && !user.disabledAt) {
// Only allow access if user exists AND is not disabled
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else if (user && user.disabledAt) {
// User exists but is disabled
const reason = user.disabledReason || "Your account has been disabled";
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, `Access denied: ${reason}`));
} else {
// User doesn't exist
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid token. Please try again."));
}
}
});
/* GET / either login form or info page */
if (config.enableInfoPageLogin) {
infoPageRouter.get(LOGIN_ROUTE, requireLogin, handleInfoPage);
} else {
infoPageRouter.get(LOGIN_ROUTE, handleInfoPage);
}
/* ─── Removed the public /status route : simply not added ─── */
export { infoPageRouter };
+1 -1
View File
@@ -2,7 +2,7 @@ import { NextFunction, Request, Response } from "express";
export function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/") && !req.path.startsWith("/v1beta/")) {
if (!req.path.startsWith("/v1/") && !req.path.match(/^\/(v1alpha|v1beta)\//)) {
req.url = `/v1${req.url}`;
}
next();
+144 -60
View File
@@ -9,6 +9,8 @@ import {
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { claudeModels } from "../shared/claude-models";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
let modelsCache: any = null;
let modelsCacheTime = 0;
@@ -18,45 +20,32 @@ const getModelsResponse = () => {
return modelsCache;
}
if (!config.anthropicKey) return { object: "list", data: [] };
if (!config.anthropicKey) return { object: "list", data: [], has_more: false, first_id: null, last_id: null };
const claudeVariants = [
"claude-v1",
"claude-v1-100k",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-v1.3",
"claude-v1.3-100k",
"claude-v1.2",
"claude-v1.0",
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
"claude-2",
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-opus-20240229",
"claude-3-opus-latest",
"claude-3-sonnet-20240229",
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
];
const models = claudeVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
const date = new Date()
const models = claudeModels.map(model => ({
// Common
id: model.anthropicId,
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
// Anthropic
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
// OpenAI
object: "model",
created: date.getTime(),
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
modelsCache = {
// Common
object: "list",
data: models,
// Anthropic
has_more: false,
first_id: models[0]?.id,
last_id: models[models.length - 1]?.id,
};
modelsCacheTime = date.getTime();
return modelsCache;
};
@@ -182,12 +171,91 @@ function maybeReassignModel(req: Request) {
* If client requests more than 4096 output tokens the request must have a
* particular version header.
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
*
* Also adds the required beta header for 1-hour cache duration if requested.
* Also validates Claude 4.1 Opus parameters (temperature/top_p).
*/
function setAnthropicBetaHeader(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const { max_tokens_to_sample } = req.body;
// Initialize beta headers array
const betaHeaders: string[] = [];
// Add max tokens beta header if needed
if (max_tokens_to_sample > 4096) {
req.headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15";
betaHeaders.push("max-tokens-3-5-sonnet-2024-07-15");
}
// Add extended cache TTL beta header if 1h cache is requested
if (req.body.cache_control?.ttl === "1h") {
betaHeaders.push("extended-cache-ttl-2025-04-11");
}
// Set the combined beta headers if any were added
if (betaHeaders.length > 0) {
req.headers["anthropic-beta"] = betaHeaders.join(",");
}
}
/**
* Adds web search tool for Claude-3.5 and Claude-3.7 models when enable_web_search is true
*
* Supports all optional parameters documented in the Claude API:
* - max_uses: Limit the number of searches per request
* - allowed_domains: Only include results from these domains
* - blocked_domains: Never include results from these domains
* - user_location: Localize search results
*/
function addWebSearchTool(req: Request) {
// Check if this is a Claude model that supports web search and if web search is enabled
const isClaude35 = req.body.model?.includes("claude-3-5") || req.body.model?.includes("claude-3.5");
const isClaude37 = req.body.model?.includes("claude-3-7") || req.body.model?.includes("claude-3.7");
const isClaude4 = req.body.model?.includes("claude-sonnet-4") || req.body.model?.includes("claude-opus-4");
const useWebSearch = (isClaude35 || isClaude37 || isClaude4) && Boolean(req.body.enable_web_search);
if (useWebSearch) {
// Create the base web search tool
const webSearchTool: any = {
'type': 'web_search_20250305',
'name': 'web_search',
};
// Add optional parameters if provided by the client
// max_uses: Limit the number of searches per request
if (typeof req.body.web_search_max_uses === 'number') {
webSearchTool.max_uses = req.body.web_search_max_uses;
delete req.body.web_search_max_uses;
}
// allowed_domains: Only include results from these domains
if (Array.isArray(req.body.web_search_allowed_domains)) {
webSearchTool.allowed_domains = req.body.web_search_allowed_domains;
delete req.body.web_search_allowed_domains;
}
// blocked_domains: Never include results from these domains
if (Array.isArray(req.body.web_search_blocked_domains)) {
webSearchTool.blocked_domains = req.body.web_search_blocked_domains;
delete req.body.web_search_blocked_domains;
}
// user_location: Localize search results
if (req.body.web_search_user_location) {
webSearchTool.user_location = req.body.web_search_user_location;
delete req.body.web_search_user_location;
}
// Add the web search tool to the tools array
req.body.tools = [...(req.body.tools || []), webSearchTool];
}
// Delete custom parameters as they're not standard Claude API parameters
delete req.body.enable_web_search;
delete req.body.reasoning_effort;
}
function selectUpstreamPath(manager: ProxyReqManager) {
@@ -218,44 +286,58 @@ const anthropicProxy = createQueuedProxyMiddleware({
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
{ afterTransform: [setAnthropicBetaHeader] }
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const nativeTextPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const textToChatPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
});
const textToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.startsWith("claude-3")) {
const model = req.body.model;
const isClaude4Model = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.startsWith("claude-3") || isClaude4Model) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
});
const oaiToTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader] }
);
const oaiToChatPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
@@ -263,7 +345,9 @@ const oaiToChatPreprocessor = createPreprocessorMiddleware({
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
if (req.body.model?.includes("claude-3")) {
const model = req.body.model;
const isClaude4 = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.includes("claude-3") || isClaude4) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
+93 -9
View File
@@ -12,6 +12,8 @@ import {
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
@@ -167,6 +169,9 @@ awsClaudeRouter.post(
* strategies are used to try to map a non-AWS model name to AWS model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
// If it looks like an AWS model, use it as-is
@@ -177,25 +182,72 @@ function maybeReassignModel(req: Request) {
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620
// - claude-3-opus-latest
// - claude-3-5-sonnet-20240620 (old format: number-model)
// - claude-3-opus-latest (old format: number-model)
// - claude-sonnet-4-20250514 (new format: model-number)
// - claude-opus-4-latest (new format: model-number)
// - anthropic.claude-3-sonnet-20240229-v1:0 (AWS format with old naming)
// - anthropic.claude-sonnet-4-20250514-v1:0 (AWS format with new naming)
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*)/i;
/^(?:anthropic\.)?claude-(?:(?:(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*))|(?:(sonnet-|opus-|haiku-)(\d+)([.-](\d))?(-\d+k)?-(latest|\d+)))(?:-v\d+(?::\d+)?)?$/i;
const match = model.match(pattern);
if (!match) {
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, rawName, rev] = match;
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
// Check which format matched (old or new)
// New format: claude-sonnet-4-20250514 or anthropic.claude-sonnet-4-20250514-v1:0
// Old format: claude-3-sonnet-20240229 or anthropic.claude-3-sonnet-20240229-v1:0
const isNewFormat = !!match[9];
let major, minor, name, rev;
if (isNewFormat) {
// New format: claude-sonnet-4-20250514
// match[9] = sonnet-/opus-/haiku-
// match[10] = 4 (major version)
// match[12] = minor version (if any, from [.-](\d) pattern)
// match[14] = revision (latest or date)
const modelType = match[9]?.match(/([a-z]+)/)?.[1] || "";
name = modelType;
major = match[10];
minor = match[12];
rev = match[14];
// Special case: if revision is a single digit and no minor version,
// treat revision as minor version (e.g., claude-opus-4-1 -> version 4.1)
if (!minor && rev && /^\d$/.test(rev)) {
minor = rev;
rev = undefined;
}
// Handle instant case for completeness
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
} else {
// Old format: claude-3-sonnet-20240229
// match[1] = instant- (if any)
// match[3] = 3 (major version)
// match[5] = minor version (if any)
// match[7] = -sonnet-/-opus-/-haiku- (if any)
// match[8] = revision (latest or date)
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
major = match[3];
minor = match[5];
name = match[7]?.match(/([a-z]+)/)?.[1] || "";
rev = match[8];
}
const ver = minor ? `${major}.${minor}` : major;
const name = rawName?.match(/([a-z]+)/)?.[1] || "";
switch (ver) {
case "1":
@@ -249,6 +301,38 @@ function maybeReassignModel(req: Request) {
// Add after model id is announced never
break;
}
case "3.7":
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-7-sonnet-20250219-v1:0";
return;
}
break;
case "4":
case "4.0":
// Mapping "claude-4-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-sonnet-4-20250514-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-opus-4-20250514-v1:0";
return;
// No case for "haiku" here, as "claude-4-haiku" is not defined
// in claude-models.ts. It will fall through and throw an error.
}
break;
case "4.1":
// Mapping "claude-4.1-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "opus":
req.body.model = "anthropic.claude-opus-4-1-20250805-v1:0";
return;
// No sonnet or haiku variants for 4.1 yet
}
break;
}
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
+47 -26
View File
@@ -6,6 +6,7 @@ import { addV1 } from "./add-v1";
import { awsClaude } from "./aws-claude";
import { awsMistral } from "./aws-mistral";
import { AwsBedrockKey, keyPool } from "../shared/key-management";
import { claudeModels, findByAwsId } from "../shared/claude-models";
const awsRouter = Router();
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
@@ -29,47 +30,67 @@ function handleModelsRequest(req: Request, res: Response) {
return res.json(modelsCache[vendor]);
}
const availableModelIds = new Set<string>();
const availableAwsModelIds = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "aws") continue;
(key as AwsBedrockKey).modelIds.forEach((id) => availableModelIds.add(id));
(key as AwsBedrockKey).modelIds.forEach((id) => availableAwsModelIds.add(id));
}
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const models = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-opus-20240229-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-large-2407-v1:0",
"mistral.mistral-small-2402-v1:0",
]
.filter((id) => availableModelIds.has(id))
.map((id) => {
const vendor = id.match(/^(.*)\./)?.[1];
const mistralMappings = new Map([
["mistral.mistral-7b-instruct-v0:2", "Mistral 7B Instruct"],
["mistral.mixtral-8x7b-instruct-v0:1", "Mixtral 8x7B Instruct"],
["mistral.mistral-large-2402-v1:0", "Mistral Large 2402"],
["mistral.mistral-large-2407-v1:0", "Mistral Large 2407"],
["mistral.mistral-small-2402-v1:0", "Mistral Small 2402"],
]);
const date = new Date();
const claudeModelsList = claudeModels
.filter(model => availableAwsModelIds.has(model.awsId))
.map(model => ({
id: model.anthropicId,
owned_by: "anthropic",
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
object: "model",
created: date.getTime(),
permission: [],
root: "anthropic",
parent: null,
}));
const mistralModelsList = Array.from(mistralMappings.keys())
.filter(id => availableAwsModelIds.has(id))
.map(id => {
return {
id,
owned_by: "mistral",
type: "model",
display_name: mistralMappings.get(id) || id.split('.')[1],
created_at: date.toISOString(),
object: "model",
created: new Date().getTime(),
owned_by: vendor,
created: date.getTime(),
permission: [],
root: vendor,
root: "mistral",
parent: null,
};
});
const allModels = [...claudeModelsList, ...mistralModelsList];
const filteredModels = vendor === "all"
? allModels
: allModels.filter(m => m.root === vendor);
modelsCache[vendor] = {
object: "list",
data: models.filter((m) => vendor === "all" || m.root === vendor),
data: filteredModels,
has_more: false,
first_id: filteredModels[0]?.id,
last_id: filteredModels[filteredModels.length - 1]?.id,
};
modelsCacheTime[vendor] = new Date().getTime();
modelsCacheTime[vendor] = date.getTime();
return res.json(modelsCache[vendor]);
}
+222
View File
@@ -0,0 +1,222 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { CohereKey, keyPool } from "../shared/key-management";
import { isCohereModel, normalizeMessages } from "../shared/api-schemas/cohere";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "cohere" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const cohereResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Cohere key directly
const modelToUse = "command"; // Use any Cohere model here - just for key selection
const cohereKey = keyPool.get(modelToUse, "cohere") as CohereKey;
if (!cohereKey || !cohereKey.key) {
log.warn("No valid Cohere key available for model listing");
throw new Error("No valid Cohere API key available");
}
// Fetch models directly from Cohere API
const response = await axios.get("https://api.cohere.com/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${cohereKey.key}`,
"Cohere-Version": "2022-12-06"
},
});
if (!response.data || !response.data.models) {
throw new Error("Unexpected response format from Cohere API");
}
// Extract models and filter by those that support the chat endpoint
const filteredModels = response.data.models
.filter((model: any) => {
return model.endpoints && model.endpoints.includes("chat");
})
.map((model: any) => ({
id: model.name,
name: model.name,
// Adding additional OpenAI-compatible fields
context_window: model.context_window_size || 4096,
max_tokens: model.max_tokens || 4096
}));
log.debug({ modelCount: filteredModels.length, models: filteredModels.map((m: any) => m.id) }, "Filtered models from Cohere API");
// Format response to ensure OpenAI compatibility
const models = {
object: "list",
data: filteredModels.map((model: any) => ({
id: model.id,
object: "model",
created: Math.floor(Date.now() / 1000),
owned_by: "cohere",
permission: [],
root: model.id,
parent: null,
context_length: model.context_window,
})),
};
log.debug({ modelCount: filteredModels.length }, "Retrieved models from Cohere API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
// Provide detailed logging for better troubleshooting
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Cohere models"
);
} else {
log.error({ error }, "Unknown error fetching Cohere models");
}
// Return empty list as fallback
return {
object: "list",
data: [],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to prepare messages for Cohere API
function prepareMessages(req: Request) {
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages = normalizeMessages(req.body.messages);
}
}
// Function to remove parameters not supported by Cohere models
function removeUnsupportedParameters(req: Request) {
const model = req.body.model;
// Remove parameters that Cohere doesn't support
if (req.body.logit_bias !== undefined) {
delete req.body.logit_bias;
}
if (req.body.top_logprobs !== undefined) {
delete req.body.top_logprobs;
}
if (req.body.max_completion_tokens !== undefined) {
delete req.body.max_completion_tokens;
}
// Handle structured output format
if (req.body.response_format && req.body.response_format.schema) {
// Transform to Cohere's format if needed
const jsonSchema = req.body.response_format.schema;
req.body.response_format = {
type: "json_object",
schema: jsonSchema
};
}
// Logging for debugging
if (process.env.NODE_ENV !== 'production') {
log.debug({ body: req.body }, "Request after parameter cleanup");
}
}
// Set up count token functionality for Cohere models
function countCohereTokens(req: Request) {
const model = req.body.model;
if (isCohereModel(model)) {
// Count tokens using prompt tokens (simplified)
if (req.promptTokens) {
req.log.debug(
{ tokens: req.promptTokens },
"Estimated token count for Cohere prompt"
);
}
}
}
const cohereProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
// Add Cohere-Version header to every request
(manager) => {
manager.setHeader("Cohere-Version", "2022-12-06");
},
finalizeBody
],
target: "https://api.cohere.ai/compatibility",
blockingResponseHandler: cohereResponseHandler,
});
const cohereRouter = Router();
cohereRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [ prepareMessages, removeUnsupportedParameters, countCohereTokens ] }
),
cohereProxy
);
cohereRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [] }
),
cohereProxy
);
cohereRouter.get("/v1/models", handleModelRequest);
export const cohere = cohereRouter;
+135
View File
@@ -0,0 +1,135 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { DeepseekKey, keyPool } from "../shared/key-management";
let modelsCache: any = null;
let modelsCacheTime = 0;
const deepseekResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Deepseek key directly using keyPool.get()
const modelToUse = "deepseek-chat"; // Use any Deepseek model here - just for key selection
const deepseekKey = keyPool.get(modelToUse, "deepseek") as DeepseekKey;
if (!deepseekKey || !deepseekKey.key) {
throw new Error("Failed to get valid Deepseek key");
}
// Fetch models from Deepseek API with authorization
const response = await axios.get("https://api.deepseek.com/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${deepseekKey.key}`
},
});
// If successful, update the cache
if (response.data && response.data.data) {
modelsCache = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
owned_by: "deepseek",
})),
};
} else {
throw new Error("Unexpected response format from Deepseek API");
}
} catch (error) {
console.error("Error fetching Deepseek models:", error);
throw error; // No fallback - error will be passed to caller
}
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const modelsResponse = await getModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const deepseekProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.deepseek.com/beta",
blockingResponseHandler: deepseekResponseHandler,
});
const deepseekRouter = Router();
// combines all the assistant messages at the end of the context and adds the
// beta 'prefix' option, makes prefills work the same way they work for Claude
function enablePrefill(req: Request) {
// If you want to disable
if (process.env.NO_DEEPSEEK_PREFILL) return
const msgs = req.body.messages;
if (msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// maybe we should also add a newline between messages? no for now.
content = msgs[i--].content + content;
}
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
}
function removeReasonerStuff(req: Request) {
if (req.body.model === "deepseek-reasoner") {
// https://api-docs.deepseek.com/guides/reasoning_model
delete req.body.presence_penalty;
delete req.body.frequency_penalty;
delete req.body.temperature;
delete req.body.top_p;
delete req.body.logprobs;
delete req.body.top_logprobs;
}
}
deepseekRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "deepseek" },
{ afterTransform: [ enablePrefill, removeReasonerStuff ] }
),
deepseekProxy
);
deepseekRouter.get("/v1/models", handleModelRequest);
export const deepseek = deepseekRouter;
+6
View File
@@ -25,6 +25,12 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
delete req.headers["x-api-key"];
return token;
}
if (req.headers["x-goog-api-key"]) {
const token = req.headers["x-goog-api-key"]?.toString();
delete req.headers["x-goog-api-key"];
return token;
}
if (req.query.key) {
const token = req.query.key?.toString();
+100 -39
View File
@@ -9,6 +9,7 @@ import {
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
@@ -26,10 +27,12 @@ const getModelsResponse = () => {
const variants = [
"claude-3-haiku@20240307",
"claude-3-5-haiku@20241022",
"claude-3-sonnet@20240229",
"claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-v2@20241022",
"claude-3-opus@20240229",
"claude-3-7-sonnet@20250219",
"claude-sonnet-4@20250514",
"claude-opus-4@20250514",
"claude-opus-4-1@20250805",
];
const models = variants.map((id) => ({
@@ -128,69 +131,127 @@ gcpRouter.post(
* strategies are used to try to map a non-GCP model name to GCP model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
const DEFAULT_MODEL = "claude-3-5-sonnet-v2@20241022";
// If it looks like an GCP model, use it as-is
// if (model.includes("anthropic.claude")) {
if (model.startsWith("claude-") && model.includes("@")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620-v1:0
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d{1}))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
// - claude-3-sonnet
// - claude-3.5-sonnet
// - claude-3-5-haiku
// - claude-3-5-haiku-latest
// - claude-3-5-sonnet-20240620
// - claude-opus-4-1 (new format)
// - claude-4.1-opus (alternative format)
const pattern = /^claude-(?:(\d+)[.-]?(\d)?-(sonnet|opus|haiku)(?:-(latest|\d+))?|(opus|sonnet|haiku)-(\d+)[.-]?(\d)?(?:-(latest|\d+))?)/i;
const match = model.match(pattern);
// If there's no match, fallback to Claude3 Sonnet as it is most likely to be
// available on GCP.
if (!match) {
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
req.body.model = DEFAULT_MODEL;
return;
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, rev] = match;
// TODO: rework this to function similarly to aws-claude.ts maybeReassignModel
// Handle both formats: claude-3-5-sonnet and claude-opus-4-1
const [_, major1, minor1, flavor1, rev1, flavor2, major2, minor2, rev2] = match;
let major, minor, flavor, rev;
if (major1) {
// Old format: claude-3-5-sonnet
major = major1;
minor = minor1;
flavor = flavor1;
rev = rev1;
} else {
// New format: claude-opus-4-1
major = major2;
minor = minor2;
flavor = flavor2;
rev = rev2;
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "3":
case "3.0":
if (name.includes("opus")) {
req.body.model = "claude-3-opus@20240229";
} else if (name.includes("haiku")) {
req.body.model = "claude-3-haiku@20240307";
} else {
req.body.model = "claude-3-sonnet@20240229";
switch (flavor) {
case "haiku":
req.body.model = "claude-3-haiku@20240307";
break;
case "opus":
req.body.model = "claude-3-opus@20240229";
break;
case "sonnet":
req.body.model = "claude-3-sonnet@20240229";
break;
default:
req.body.model = "claude-3-sonnet@20240229";
}
return;
case "3.5":
switch (name) {
case "sonnet":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "claude-3-5-sonnet-v2@20241022";
return;
case "20240620":
req.body.model = "claude-3-5-sonnet@20240620";
return;
}
break;
switch (flavor) {
case "haiku":
req.body.model = "claude-3-5-haiku@20241022";
return;
case "opus":
// Add after model ids are announced late 2024
break;
// no 3.5 opus yet
req.body.model = DEFAULT_MODEL;
return;
case "sonnet":
if (rev === "20240620") {
req.body.model = "claude-3-5-sonnet@20240620";
} else {
// includes -latest, edit if anthropic actually releases 3.5 sonnet v3
req.body.model = DEFAULT_MODEL;
}
return;
default:
req.body.model = DEFAULT_MODEL;
}
}
return;
case "3.7":
switch (flavor) {
case "sonnet":
req.body.model = "claude-3-7-sonnet@20250219";
return;
}
break;
// Fallback to Claude3 Sonnet
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
return;
case "4":
case "4.0":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4@20250514";
return;
case "sonnet":
req.body.model = "claude-sonnet-4@20250514";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
case "4.1":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4-1@20250805";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
default:
req.body.model = DEFAULT_MODEL;
}
}
export const gcp = gcpRouter;
+161 -41
View File
@@ -1,4 +1,4 @@
import { Request, RequestHandler, Router } from "express";
import { Request, RequestHandler, Router, Response, NextFunction } from "express";
import { v4 } from "uuid";
import { GoogleAIKey, keyPool } from "../shared/key-management";
import { config } from "../config";
@@ -10,10 +10,15 @@ import {
import { ProxyResHandlerWithBody } from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import axios from "axios";
let modelsCache: any = null;
let modelsCacheTime = 0;
// Cache for native Google AI models
let nativeModelsCache: any = null;
let nativeModelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
@@ -33,11 +38,15 @@ const getModelsResponse = () => {
return modelsCache;
}
// Get all model IDs from keys, excluding any with "bard" in the name
const modelIds = Array.from(
new Set(keys.map((k) => k.modelIds).flat())
).filter((id) => id.startsWith("models/gemini"));
).filter((id) => id.startsWith("models/") && !id.includes("bard"));
// Strip "models/" prefix from IDs before creating model objects
const models = modelIds.map((id) => ({
id,
// Strip "models/" prefix from ID for consistency with request processing
id: id.startsWith("models/") ? id.slice("models/".length) : id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
@@ -52,10 +61,50 @@ const getModelsResponse = () => {
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
// Function to fetch native models from Google AI API
const getNativeModelsResponse = async () => {
// Return cached value if it was refreshed in the last minute
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
return nativeModelsCache;
}
/*
* The official Google API requires an API key. However SillyTavern only needs
* a list of model IDs and does not care about any other model metadata. We
* can therefore generate a **synthetic** response from the keys already
* loaded into the proxy (same source we use for the OpenAI-compatible
* endpoint) and completely avoid the outbound request. This removes the
* need for the frontend to supply the proxy password as an API key and
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
* is missing.
*/
const openaiStyle = getModelsResponse();
const models = (openaiStyle.data || []).map((m: any) => ({
// Google AI Studio returns names in the format "models/<id>"
name: `models/${m.id}`,
supportedGenerationMethods: ["generateContent"],
}));
nativeModelsCache = { models };
nativeModelsCacheTime = new Date().getTime();
return nativeModelsCache;
};
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
res.status(200).json(getModelsResponse());
};
// Native Gemini API model list request
const handleNativeModelRequest: RequestHandler = async (_req: Request, res: any) => {
try {
const modelsResponse = await getNativeModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleNativeModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
@@ -80,8 +129,30 @@ function transformGoogleAIResponse(
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
// Handle the case where content might have different structures
let content = "";
// Check if the response has the expected structure
if (resBody.candidates && resBody.candidates[0]) {
const candidate = resBody.candidates[0];
// Extract content text with multiple fallbacks
if (candidate.content?.parts && candidate.content.parts[0]?.text) {
// Regular format with parts array containing text
content = candidate.content.parts[0].text;
} else if (candidate.content?.text) {
// Alternate format with direct text property
content = candidate.content.text;
} else if (typeof candidate.content?.parts?.[0] === 'string') {
// Some formats might have string parts
content = candidate.content.parts[0];
}
// Apply cleanup to the content if needed
content = content.replace(/^(.{0,50}?): /, () => "");
}
return {
id: "goo-" + v4(),
object: "chat.completion",
@@ -95,7 +166,7 @@ function transformGoogleAIResponse(
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates[0].finishReason,
finish_reason: resBody.candidates?.[0]?.finishReason || "STOP",
index: 0,
},
],
@@ -103,7 +174,7 @@ function transformGoogleAIResponse(
}
const googleAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
target: ({ signedRequest }: { signedRequest: any }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { protocol, hostname} = signedRequest;
return `${protocol}//${hostname}`;
@@ -114,28 +185,16 @@ const googleAIProxy = createQueuedProxyMiddleware({
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
googleAIRouter.get("/:apiVersion(v1alpha|v1beta)/models", handleNativeModelRequest);
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/v1beta/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
{ beforeTransform: [maybeReassignModel], afterTransform: [setStreamFlag] }
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{ afterTransform: [maybeReassignModel] }
),
googleAIProxy
);
/**
* Processes the thinking budget for Gemini 2.5 Flash model.
* Validation has been disabled - budget is passed through without limits.
*/
function processThinkingBudget(req: Request) {
// Validation disabled - budget is passed through without any range limits
// Previously enforced 0-24576 token limit
}
function setStreamFlag(req: Request) {
const isStreaming = req.url.includes("streamGenerateContent");
@@ -149,8 +208,8 @@ function setStreamFlag(req: Request) {
}
/**
* Replaces requests for non-Google AI models with gemini-1.5-pro-latest.
* Also strips models/ from the beginning of the model IDs.
* Strips 'models/' prefix from the beginning of model IDs if present.
* No longer forces redirection to gemini-1.5-pro-latest for non-Gemini models.
**/
function maybeReassignModel(req: Request) {
// Ensure model is on body as a lot of middleware will expect it.
@@ -160,17 +219,78 @@ function maybeReassignModel(req: Request) {
}
req.body.model = model;
const requested = model;
if (requested.startsWith("models/")) {
req.body.model = requested.slice("models/".length);
// Only strip the 'models/' prefix if present
if (model.startsWith("models/")) {
req.body.model = model.slice("models/".length);
req.log.info({ originalModel: model, updatedModel: req.body.model }, "Stripped 'models/' prefix from model ID");
}
if (requested.includes("gemini")) {
return;
}
req.log.info({ requested }, "Reassigning model to gemini-1.5-pro-latest");
req.body.model = "gemini-1.5-pro-latest";
// No longer redirecting non-Gemini models to gemini-1.5-pro-latest
// This allows the original model to be passed through to the API
// If it's an invalid model, the Google AI API will return the appropriate error
}
/**
* Middleware to check for and block requests to experimental models.
* This function is intended to be used as a RequestPreprocessor.
* It throws an error if an experimental model is detected, which should be
* caught by the proxy's onError handler.
*
* Models can be allowed through the ALLOWED_EXP_MODELS environment variable.
*/
function checkAndBlockExperimentalModels(req: Request) { // Changed signature
const modelId = req.body.model as string | undefined;
// Check if the model ID contains "exp" (case-insensitive)
if (modelId && modelId.toLowerCase().includes("exp")) {
// Check if this specific model is in the allowlist
const allowedModels = config.allowedExpModels
?.split(",")
.map(model => model.trim())
.filter(model => model.length > 0) || [];
const isAllowed = allowedModels.some(allowedModel =>
modelId.toLowerCase() === allowedModel.toLowerCase()
);
if (isAllowed) {
req.log.info({ modelId }, "Allowing experimental Google AI model via allowlist.");
return; // Allow the request to proceed
}
req.log.warn({ modelId }, "Blocking request to experimental Google AI model.");
const err: any = new Error("Experimental models are too unstable to be supported in proxy code. Please use preview models instead.");
err.statusCode = 400;
throw err;
}
// If no experimental model, do nothing, allowing request to proceed.
}
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/:apiVersion(v1alpha|v1beta)/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
{
beforeTransform: [maybeReassignModel],
afterTransform: [checkAndBlockExperimentalModels, setStreamFlag, processThinkingBudget]
}
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{
afterTransform: [maybeReassignModel, checkAndBlockExperimentalModels, processThinkingBudget]
}
),
googleAIProxy
);
export const googleAI = googleAIRouter;
+23 -2
View File
@@ -12,11 +12,13 @@ const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const OPENAI_RESPONSES_ENDPOINT = "/v1/responses";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
const GOOGLE_AI_COMPLETION_ENDPOINT = "/v1beta/models";
const GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT = "/v1alpha/models";
const GOOGLE_AI_BETA_COMPLETION_ENDPOINT = "/v1beta/models";
export function isTextGenerationRequest(req: Request) {
return (
@@ -24,11 +26,13 @@ export function isTextGenerationRequest(req: Request) {
[
OPENAI_CHAT_COMPLETION_ENDPOINT,
OPENAI_TEXT_COMPLETION_ENDPOINT,
OPENAI_RESPONSES_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
GOOGLE_AI_COMPLETION_ENDPOINT,
GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT,
GOOGLE_AI_BETA_COMPLETION_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint))
);
}
@@ -234,6 +238,22 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
// - choices[0].message.content
// - choices[0].message with no content if model is invoking a tool
return body.choices?.[0]?.message?.content || "";
case "openai-responses":
// Handle the original Responses API format
if (body.output && Array.isArray(body.output)) {
// Look for a message type in the output array
for (const item of body.output) {
if (item.type === "message" && item.content && Array.isArray(item.content)) {
// Extract text content from each content item
return item.content
.filter((contentItem: any) => contentItem.type === "output_text")
.map((contentItem: any) => contentItem.text)
.join("");
}
}
}
// If we've been transformed to chat completion format already
return body.choices?.[0]?.message?.content || "";
case "mistral-text":
return body.outputs?.[0]?.text || "";
case "openai-text":
@@ -285,6 +305,7 @@ export function getModelFromBody(req: Request, resBody: Record<string, any>) {
switch (format) {
case "openai":
case "openai-text":
case "openai-responses":
return resBody.model;
case "mistral-ai":
case "mistral-text":
@@ -25,6 +25,9 @@ export const addGoogleAIKey: ProxyReqMutator = (manager) => {
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
const payload = { ...req.body, stream: undefined, model: undefined };
// For OpenAI -> Google conversion we don't actually have the API version
const apiVersion = req.params.apiVersion || "v1beta"
// TODO: this isn't actually signed, so the manager api is a little unclear
// with the ProxyReqManager refactor, it's probably no longer necesasry to
// do this because we can modify the path using Manager.setPath.
@@ -32,7 +35,7 @@ export const addGoogleAIKey: ProxyReqMutator = (manager) => {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${
path: `/${apiVersion}/models/${model}:${
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
}key=${key.key}`,
headers: {
@@ -31,7 +31,9 @@ export const addKey: ProxyReqMutator = (manager) => {
}
if (inboundApi === outboundApi) {
assignedKey = keyPool.get(body.model, service, needsMultimodal);
// Pass streaming information for GPT-5 models that require verified keys for streaming
const isStreaming = body.stream === true;
assignedKey = keyPool.get(body.model, service, needsMultimodal, isStreaming);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
@@ -49,7 +51,12 @@ export const addKey: ProxyReqMutator = (manager) => {
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
assignedKey = keyPool.get("dall-e-3", service);
// Use the actual model from the request body instead of defaulting to dall-e-3
// This ensures that gpt-image-1 requests get keys that are verified for gpt-image-1
assignedKey = keyPool.get(body.model, service);
break;
case "openai-responses":
assignedKey = keyPool.get(body.model, service);
break;
case "openai":
throw new Error(
@@ -88,6 +95,21 @@ export const addKey: ProxyReqMutator = (manager) => {
const azureKey = assignedKey.key;
manager.setHeader("api-key", azureKey);
break;
case "deepseek":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "xai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "cohere":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "qwen":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "moonshot":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "aws":
case "gcp":
case "google-ai":
@@ -13,6 +13,51 @@ export const finalizeBody: ProxyReqMutator = (manager) => {
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
// For OpenAI Responses API, ensure messages is in the correct format
if (req.outboundApi === "openai-responses") {
// Format messages for the Responses API
if (req.body.messages) {
req.log.info("Formatting messages for Responses API in finalizeBody");
// The Responses API expects input to be an array, not an object
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API in finalizeBody");
// If input already exists but contains a messages object, replace input with the messages array
req.body.input = req.body.input.messages;
}
// Final check to ensure max_completion_tokens is converted to max_output_tokens
if (req.body.max_completion_tokens) {
req.log.info("Converting max_completion_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_completion_tokens;
}
delete req.body.max_completion_tokens;
}
// Final check to ensure max_tokens is converted to max_output_tokens
if (req.body.max_tokens) {
req.log.info("Converting max_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
}
delete req.body.max_tokens;
}
// Remove all parameters not supported by Responses API
const unsupportedParams = [
'frequency_penalty',
'presence_penalty',
];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
delete req.body[param];
}
}
}
const serialized =
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
@@ -27,6 +27,14 @@ export const signAwsRequest: ProxyReqMutator = async (manager) => {
const key = keyPool.get(model, "aws") as AwsBedrockKey;
manager.setKey(key);
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
req.body.system = system;
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
@@ -130,6 +138,9 @@ function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
temperature: true,
top_k: true,
top_p: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
@@ -48,6 +48,9 @@ export const signGcpRequest: ProxyReqMutator = async (manager) => {
top_k: true,
top_p: true,
stream: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
@@ -34,4 +34,4 @@ export const applyQuotaLimits: RequestPreprocessor = (req) => {
}
);
}
};
};
@@ -1,6 +1,6 @@
import { RequestPreprocessor } from "../index";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai,vip.jewproxy.tech,jewproxy.tech".split(",");
class ZoomerForbiddenError extends Error {
constructor(message: string) {
@@ -14,7 +14,7 @@ class ZoomerForbiddenError extends Error {
* stop getting emails asking for tech support.
*/
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
const origin = req.headers.origin || req.headers.referer;
const origin = req.headers.origin || req.headers.referer || req.headers.host;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working.
// We don't want to block that just yet.
@@ -1,11 +1,18 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import { OpenAIChatMessage } from "../../../../shared/api-schemas";
import { GoogleAIChatMessage } from "../../../../shared/api-schemas/google-ai";
import {
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-schemas";
AnthropicChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas/anthropic";
import {
MistralAIChatMessage,
ContentItem,
isMistralVisionModel
} from "../../../../shared/api-schemas/mistral-ai";
import { isGrokVisionModel } from "../../../../shared/api-schemas/xai";
/**
* Given a request with an already-transformed body, counts the number of
@@ -22,6 +29,12 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, prompt, service });
break;
}
case "openai-responses": {
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
@@ -55,9 +68,47 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
case "mistral-ai":
case "mistral-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string | MistralAIChatMessage[] =
req.body.messages ?? req.body.prompt;
// Handle multimodal content (vision) in Mistral models
const isVisionModel = isMistralVisionModel(req.body.model);
const messages = req.body.messages;
// Check if this is a vision request with images
const hasImageContent = Array.isArray(messages) && messages.some(
(msg: MistralAIChatMessage) => Array.isArray(msg.content) &&
msg.content.some((item: ContentItem) => item.type === "image_url")
);
// For vision content, we add a fixed token count per image
// This is an estimate as the actual token count depends on image size and complexity
const TOKENS_PER_IMAGE = 1200; // Conservative estimate
let imageTokens = 0;
if (hasImageContent && Array.isArray(messages)) {
// Count images in the request
for (const msg of messages) {
if (Array.isArray(msg.content)) {
const imageCount = msg.content.filter(
(item: ContentItem) => item.type === "image_url"
).length;
imageTokens += imageCount * TOKENS_PER_IMAGE;
}
}
req.log.debug(
{ imageCount: imageTokens / TOKENS_PER_IMAGE, tokenEstimate: imageTokens },
"Estimated token count for Mistral vision images"
);
}
const prompt: string | MistralAIChatMessage[] = messages ?? req.body.prompt;
result = await countTokens({ req, prompt, service });
// Add the image tokens to the total count
if (imageTokens > 0) {
result.token_count += imageTokens;
}
break;
}
case "openai-image": {
@@ -65,6 +116,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, service });
break;
}
// Handle XAI (Grok) vision models
// Since it uses the OpenAI API format, it's caught in the "openai" case,
// but we need to add additional handling for image tokens after that
default:
assertNever(service);
}
@@ -78,14 +78,15 @@ function getPromptFromRequest(req: Request) {
.join("\n\n");
case "anthropic-text":
case "openai-text":
case "openai-responses":
case "openai-image":
case "mistral-text":
return body.prompt;
case "google-ai": {
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
return [
b.systemInstruction?.parts.map((p) => p.text),
...b.contents.flatMap((c) => c.parts.map((p) => p.text)),
b.systemInstruction?.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text),
...b.contents.flatMap((c) => c.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text)),
].join("\n");
}
default:
@@ -4,7 +4,7 @@ import {
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-schemas";
import { BadRequestError } from "../../../../shared/errors";
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
import { fixMistralPrompt, isMistralVisionModel } from "../../../../shared/api-schemas/mistral-ai";
import {
isImageGenerationRequest,
isTextGenerationRequest,
@@ -30,6 +30,8 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
}
applyMistralPromptFixes(req);
applyGoogleAIKeyTransforms(req);
applyOpenAIResponsesTransform(req);
// Native prompts are those which were already provided by the client in the
// target API format. We don't need to transform them.
@@ -55,6 +57,58 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
);
};
// Handle OpenAI Responses API transformation
function applyOpenAIResponsesTransform(req: Request): void {
if (req.outboundApi === "openai-responses") {
req.log.info("Transforming request to OpenAI Responses API format");
// Store the original body for reference if needed
const originalBody = { ...req.body };
// Map standard OpenAI chat completions format to Responses API format
// The main differences are:
// 1. Endpoint is /v1/responses instead of /v1/chat/completions
// 2. 'messages' field moves to 'input.messages'
// Move messages to input.messages
if (req.body.messages && !req.body.input) {
req.body.input = {
messages: req.body.messages
};
delete req.body.messages;
}
// Keep all the original properties of the request but ensure compatibility
// with Responses API specifics
if (!req.body.previousResponseId && req.body.conversation_id) {
req.body.previousResponseId = req.body.conversation_id;
delete req.body.conversation_id;
}
// Convert max_tokens to max_output_tokens if present and not already set
if (req.body.max_tokens && !req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
delete req.body.max_tokens;
}
// Set the correct tools format if needed
if (req.body.tools) {
// Tools structure is maintained but might need conversion if non-standard
if (!req.body.tools.some((tool: any) => tool.type === "function" || tool.type === "web_search")) {
req.body.tools = req.body.tools.map((tool: any) => ({
...tool,
type: tool.type || "function"
}));
}
}
req.log.info({
originalModel: originalBody.model,
newFormat: "openai-responses"
}, "Successfully transformed request to Responses API format");
}
}
// handles weird cases that don't fit into our abstractions
function applyMistralPromptFixes(req: Request): void {
if (req.inboundApi === "mistral-ai") {
@@ -63,12 +117,66 @@ function applyMistralPromptFixes(req: Request): void {
// mistral prompt and try to fix it if it fails. It will be re-validated
// after this function returns.
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
// Check if this is a vision model request
const isVisionModel = isMistralVisionModel(req.body.model);
// Check if the request contains image content
const hasImageContent = result.messages?.some((msg: {content: string | any[]}) =>
Array.isArray(msg.content) &&
msg.content.some((item: any) => item.type === "image_url")
);
// For vision requests, normalize the image_url format
if (hasImageContent && Array.isArray(result.messages)) {
// Process each message with image content
result.messages.forEach((msg: any) => {
if (Array.isArray(msg.content)) {
// Process each content item
msg.content.forEach((item: any) => {
if (item.type === "image_url") {
// Normalize the image_url field to a string format that Mistral expects
if (typeof item.image_url === "object") {
// If it's an object, extract the URL or base64 data
if (item.image_url.url) {
item.image_url = item.image_url.url;
} else if (item.image_url.data) {
item.image_url = item.image_url.data;
}
req.log.info(
{ model: req.body.model },
"Normalized object-format image_url to string format"
);
}
}
});
}
});
}
// Apply Mistral prompt fixes while preserving multimodal content
req.body.messages = fixMistralPrompt(result.messages);
req.log.info(
{ n: req.body.messages.length, prev: result.messages.length },
{
n: req.body.messages.length,
prev: result.messages.length,
isVisionModel,
hasImageContent
},
"Applied Mistral chat prompt fixes."
);
// If this is a vision model with image content, it MUST use the chat API
// and cannot be converted to text completions
if (hasImageContent) {
req.log.info(
{ model: req.body.model },
"Detected Mistral vision request with image content. Keeping as chat format."
);
return;
}
// If the prompt relies on `prefix: true` for the last message, we need to
// convert it to a text completions request because AWS Mistral support for
// this feature is broken.
@@ -87,3 +195,43 @@ function applyMistralPromptFixes(req: Request): void {
}
}
}
function toCamelCase(str: string): string {
return str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
}
function transformKeysToCamelCase(obj: any, hasTransformed = { value: false }): any {
if (Array.isArray(obj)) {
return obj.map(item => transformKeysToCamelCase(item, hasTransformed));
}
if (obj !== null && typeof obj === 'object') {
return Object.fromEntries(
Object.entries(obj).map(([key, value]) => {
const camelKey = toCamelCase(key);
if (camelKey !== key) {
hasTransformed.value = true;
}
return [
camelKey,
transformKeysToCamelCase(value, hasTransformed)
];
})
);
}
return obj;
}
function applyGoogleAIKeyTransforms(req: Request): void {
// Google (Gemini) API in their infinite wisdom accepts both snake_case and camelCase
// for some params even though in the docs they use snake_case.
// Some frontends (e.g. ST) use snake_case and camelCase so we normalize all keys to camelCase
if (req.outboundApi === "google-ai") {
const hasTransformed = { value: false };
req.body = transformKeysToCamelCase(req.body, hasTransformed);
if (hasTransformed.value) {
req.log.info("Applied Gemini camelCase -> snake_case transform");
}
}
}
@@ -28,6 +28,7 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
switch (req.outboundApi) {
case "openai":
case "openai-text":
case "openai-responses":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic-chat":
@@ -58,6 +59,22 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 16384;
} else if (model.match(/^gpt-4o/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4.5/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4\.1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-5(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-chat-latest$/)) {
modelMax = 400000;
} else if (model.match(/^chatgpt-4o/)) {
modelMax = 128000;
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
@@ -68,9 +85,23 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^o3-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o4-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000; // 200k context window for codex-mini-latest
} else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/^o1(-preview)?(-\d{4}-\d{2}-\d{2})?$/)) {
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 16384;
@@ -88,14 +119,38 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^gemini-/)) {
modelMax = 1024000;
} else if (model.match(/^anthropic\.claude-3/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
modelMax = 100000;
} else if (model.match(/^deepseek/)) {
modelMax = 64000;
} else if (model.match(/^kimi-k2/)) {
// Kimi K2 models have 131k context window
modelMax = 131000;
} else if (model.match(/moonshot/)) {
// Moonshot models typically have 200k context window
modelMax = 200000;
} else if (model.match(/command[\w-]*-03-202[0-9]/)) {
// Cohere's command-a-03 models have 256k context window
modelMax = 256000;
} else if (model.match(/command/) || model.match(/cohere/)) {
// Default for all other Cohere models
modelMax = 128000;
} else if (model.match(/^grok-4/)) {
modelMax = 256000;
} else if (model.match(/^grok/)) {
modelMax = 128000;
} else if (model.match(/^magistral/)) {
modelMax = 40000;
} else if (model.match(/tral/)) {
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
// no name convention and wildly different context windows so this is a
@@ -136,4 +191,4 @@ function assertRequestHasTokenCounts(
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
}
@@ -3,6 +3,7 @@ import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
import { containsImageContent as containsImageContentGoogleAI } from "../../../../shared/api-schemas/google-ai";
import { ForbiddenError } from "../../../../shared/errors";
/**
@@ -22,11 +23,16 @@ export const validateVision: RequestPreprocessor = async (req) => {
case "openai":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "openai-responses":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "anthropic-chat":
hasImage = containsImageContentAnthropic(req.body.messages);
break;
case "anthropic-text":
case "google-ai":
hasImage = containsImageContentGoogleAI(req.body.contents);
break;
case "anthropic-text":
case "mistral-ai":
case "mistral-text":
case "openai-image":
@@ -194,6 +194,21 @@ export function buildSpoofedCompletion({
switch (format) {
case "openai":
case "openai-responses":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "mistral-ai":
return {
id: "error-" + id,
@@ -283,6 +298,15 @@ export function buildSpoofedSSE({
switch (format) {
case "openai":
case "openai-responses":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
+301 -33
View File
@@ -4,8 +4,9 @@ import { Request, Response } from "express";
import * as http from "http";
import { config } from "../../../config";
import { HttpError, RetryableError } from "../../../shared/errors";
import { keyPool } from "../../../shared/key-management";
import { getOpenAIModelFamily } from "../../../shared/models";
import { keyPool, GoogleAIKey } from "../../../shared/key-management";
import { logger } from "../../../logger";
import { getOpenAIModelFamily, GoogleAIModelFamily } from "../../../shared/models";
import { countTokens } from "../../../shared/tokenization";
import {
incrementPromptCount,
@@ -246,6 +247,12 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
errorPayload.proxy_note = `The upstream API rejected the request. Check the error message for details.`;
}
break;
case "deepseek":
await handleDeepseekBadRequestError(req, errorPayload);
break;
case "xai":
await handleXaiBadRequestError(req, errorPayload);
break;
case "anthropic":
case "aws":
case "gcp":
@@ -254,13 +261,37 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "google-ai":
await handleGoogleAIBadRequestError(req, errorPayload);
break;
case "cohere":
errorPayload.proxy_note = `The upstream Cohere API rejected the request. Check the error message for details.`;
break;
case "qwen":
// No special handling yet
break;
case "moonshot":
errorPayload.proxy_note = `The Moonshot API rejected the request. Check the error message for details.`;
break;
default:
assertNever(service);
}
} else if (statusCode === 401) {
// Key is invalid or was revoked
// Universal 401 handling - authentication failed, retry with different key
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
await reenqueueRequest(req);
throw new RetryableError(`${service} key authentication failed, retrying with different key.`);
} else if (statusCode === 402) {
// Deepseek specific - insufficient balance
if (service === "deepseek") {
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("Deepseek key has insufficient balance, retrying with different key.");
}
} else if (statusCode === 405) {
// Xai specific - insufficient balance
if (service === "xai") {
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("XAI key has insufficient balance, retrying with different key.");
}
} else if (statusCode === 403) {
switch (service) {
case "anthropic":
@@ -283,7 +314,8 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "UnrecognizedClientException":
// Key is invalid.
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
await reenqueueRequest(req);
throw new RetryableError("AWS key is invalid, retrying with different key.");
break;
case "AccessDeniedException":
const isModelAccessError =
@@ -304,8 +336,12 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "mistral-ai":
case "gcp":
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
return;
await reenqueueRequest(req);
throw new RetryableError("GCP key is invalid, retrying with different key.");
case "moonshot":
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("Moonshot key is invalid, retrying with different key.");
}
} else if (statusCode === 429) {
switch (service) {
@@ -328,8 +364,24 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "google-ai":
await handleGoogleAIRateLimitError(req, errorPayload);
break;
case "deepseek":
await handleDeepseekRateLimitError(req, errorPayload);
break;
case "xai":
await handleXaiRateLimitError(req, errorPayload);
break;
case "cohere":
await handleCohereRateLimitError(req, errorPayload);
break;
case "qwen":
// Similar handling to OpenAI for rate limits
await handleOpenAIRateLimitError(req, errorPayload);
break;
case "moonshot":
await handleMoonshotRateLimitError(req, errorPayload);
break;
default:
assertNever(service);
assertNever(service as never);
}
} else if (statusCode === 404) {
// Most likely model not found
@@ -351,21 +403,27 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "aws":
case "gcp":
case "azure":
case "deepseek":
case "xai":
case "cohere":
case "qwen":
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model.`;
break;
default:
assertNever(service);
assertNever(service as never);
}
} else if (statusCode === 503) {
switch (service) {
case "aws":
if (
errorType === "ServiceUnavailableException" &&
errorPayload.error?.message?.match(/too many connections/i)
) {
errorPayload.proxy_note = `The requested AWS Bedrock model is overloaded. Try again in a few minutes, or try another model.`;
}
break;
// Re-enqueue on any 503 from AWS Bedrock
req.log.warn(
{ key: req.key?.hash, errorType, errorPayload },
`AWS Bedrock service unavailable (503). Re-enqueueing request.`
);
await reenqueueRequest(req);
throw new RetryableError(
"AWS Bedrock service unavailable (503), re-enqueued request."
);
default:
errorPayload.proxy_note = `Upstream service unavailable. Try again later.`;
break;
@@ -413,27 +471,32 @@ async function handleAnthropicAwsBadRequestError(
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
const isOverQuota =
error?.message?.match(/usage blocked until/i) ||
error?.message?.match(/credit balance is too low/i);
error?.message?.match(/credit balance is too low/i) ||
error?.message?.match(/You will regain access on/i) ||
error?.message?.match(/reached your specified API usage limits/i);
if (isOverQuota) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has hit spending limit and will be disabled."
);
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
await reenqueueRequest(req);
throw new RetryableError("Claude key hit spending limit, retrying with different key.");
return;
}
const isDisabled =
error?.message?.match(/organization has been disabled/i) ||
error?.message?.match(/^operation not allowed/i);
error?.message?.match(/^operation not allowed/i) ||
error?.message?.match(/credential is only authorized for use with Claude Code/i);
if (isDisabled) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic/AWS key has been disabled."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
await reenqueueRequest(req);
throw new RetryableError("Claude key has been disabled, retrying with different key.");
return;
}
@@ -484,6 +547,106 @@ async function handleGcpRateLimitError(
}
}
async function handleDeepseekRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Deepseek rate-limited request re-enqueued.");
}
async function handleDeepseekBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Based on the checker code, a 400 response means the key is valid but there was some other error
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
}
async function handleXaiRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Xai rate-limited request re-enqueued.");
}
async function handleXaiBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Based on the checker code, a 400 response means the key is valid but there was some other error
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
}
async function handleCohereRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Mark the current key as rate limited
keyPool.markRateLimited(req.key!);
// Store the original request attempt count or initialize it
req.retryCount = (req.retryCount || 0) + 1;
// Only retry up to 3 times
if (req.retryCount <= 3) {
try {
// Add a small delay before retrying (1-5 seconds)
const delayMs = 1000 + Math.floor(Math.random() * 4000);
await new Promise(resolve => setTimeout(resolve, delayMs));
// Re-enqueue the request to try with a different key
await reenqueueRequest(req);
req.log.info({ attempt: req.retryCount }, "Cohere rate-limited request re-enqueued");
throw new RetryableError(`Cohere rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
} catch (error) {
if (error instanceof RetryableError) {
throw error; // Rethrow RetryableError to continue the flow
}
req.log.error({ error }, "Failed to re-enqueue rate-limited Cohere request");
}
}
// If we've already retried 3 times, show the error to the user
errorPayload.proxy_note = "Too many requests to the Cohere API. Please try again later.";
}
async function handleMoonshotRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Mark the current key as rate limited
keyPool.markRateLimited(req.key!);
// Store the original request attempt count or initialize it
req.retryCount = (req.retryCount || 0) + 1;
// Only retry up to 3 times with different keys
if (req.retryCount <= 3) {
try {
// Add a small delay before retrying (2-6 seconds for Moonshot)
const delayMs = 2000 + Math.floor(Math.random() * 4000);
await new Promise(resolve => setTimeout(resolve, delayMs));
// Re-enqueue the request to try with a different key
await reenqueueRequest(req);
req.log.info({ attempt: req.retryCount }, "Moonshot rate-limited request re-enqueued");
throw new RetryableError(`Moonshot rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
} catch (error) {
if (error instanceof RetryableError) {
throw error; // Rethrow RetryableError to continue the flow
}
req.log.error({ error }, "Failed to re-enqueue rate-limited Moonshot request");
}
}
// If we've already retried 3 times, show the error to the user
errorPayload.proxy_note = "Too many requests to the Moonshot API. Please try again later.";
}
async function handleOpenAIRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
@@ -494,17 +657,20 @@ async function handleOpenAIRateLimitError(
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
// Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. Please try again.`;
await reenqueueRequest(req);
throw new RetryableError("Google AI key quota exceeded, retrying with different key.");
break;
case "access_terminated":
// Account banned (key is dead, disable it)
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. Please try again.`;
await reenqueueRequest(req);
throw new RetryableError("Google AI key banned for policy violations, retrying with different key.");
break;
case "billing_not_active":
// Key valid but account billing is delinquent
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. Please try again.`;
await reenqueueRequest(req);
throw new RetryableError("Google AI key billing not active, retrying with different key.");
break;
case "requests":
case "tokens":
@@ -563,7 +729,8 @@ async function handleGoogleAIBadRequestError(
"Google API key appears to be inoperative."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key cannot be used.`;
await reenqueueRequest(req);
throw new RetryableError("Google API key inoperative, retrying with different key.");
} else {
req.log.warn(
{ key: req.key?.hash, error: text },
@@ -609,6 +776,7 @@ async function handleGoogleAIRateLimitError(
) {
const status = errorPayload.error?.status;
const text = JSON.stringify(errorPayload.error);
const errorMessage = errorPayload.error?.message?.toLowerCase() || '';
// sometimes they block keys by rate limiting them to 0 requests per minute
// for some indefinite period of time
@@ -617,19 +785,112 @@ async function handleGoogleAIRateLimitError(
/"quota_limit_value":"0"/i,
];
// Quota exhaustion indicators in error messages
const quotaExhaustedMsgs = [
/quota exceeded/i,
/free tier|free_tier/i,
/quota limit/i
];
// If we don't have a key in the request, we can't process rate limits
if (!req.key) {
errorPayload.proxy_note = `Rate limit error but no key was found in the request.`;
return;
}
switch (status) {
case "RESOURCE_EXHAUSTED": {
if (keyDeadMsgs.every((msg) => text.match(msg))) {
// Hard disabled keys - these are completely blocked
if (keyDeadMsgs.some((msg) => msg.test(text))) {
req.log.warn(
{ key: req.key?.hash, error: text },
"Google API key appears to be temporarily inoperative and will be disabled."
{ key: req.key.hash, error: text },
"Google API key appears to be completely disabled and will be removed from rotation."
);
keyPool.disable(req.key!, "revoked");
keyPool.disable(req.key, "revoked");
errorPayload.proxy_note = `Assigned API key cannot be used.`;
return;
}
keyPool.markRateLimited(req.key!);
// Check if this is a quota exhaustion error rather than just a rate limit
const isQuotaExhausted = quotaExhaustedMsgs.some(pattern => pattern.test(text) || pattern.test(errorMessage));
if (isQuotaExhausted && req.body?.model) {
// Get model family for the current request
const modelName = req.body.model;
const isPro = modelName.includes('pro');
const isFlash = modelName.includes('flash');
const isUltra = modelName.includes('ultra');
req.log.warn(
{ key: req.key.hash, model: modelName, error: text },
"Google API key has exhausted its quota for this model family and will be marked as overquota."
);
// Create a filtered list of model families that excludes the over-quota family
let familyToRemove: GoogleAIModelFamily | null = null;
if (isPro) {
familyToRemove = 'gemini-pro';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Pro models.`;
} else if (isFlash) {
familyToRemove = 'gemini-flash';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Flash models.`;
} else if (isUltra) {
familyToRemove = 'gemini-ultra';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Ultra models.`;
} else {
// If model family can't be determined, just mark as rate limited
keyPool.markRateLimited(req.key);
errorPayload.proxy_note = `Assigned API key has exhausted quota but model family couldn't be determined.`;
}
// Update the modelFamilies in the key if we identified a family to remove
if (familyToRemove) {
// Get current model families, filter out the one that's over quota
const updatedFamilies = [...req.key.modelFamilies].filter(f => f !== familyToRemove);
// Cast the key to GoogleAIKey type to access its specific properties
const googleKey = req.key as GoogleAIKey;
// Track which families are over quota for future rechecking
const overQuotaFamilies = googleKey.overQuotaFamilies || [];
if (!overQuotaFamilies.includes(familyToRemove)) {
overQuotaFamilies.push(familyToRemove);
}
// Mark the key as over quota but still usable for other model families
req.log.info(
{ key: req.key.hash, family: familyToRemove },
"Marking Google AI key as over quota for specific model family"
);
// First make a typed update object that includes only the properties we want to update
interface GoogleAIPartialUpdate {
modelFamilies: GoogleAIModelFamily[];
isOverQuota: boolean;
overQuotaFamilies: GoogleAIModelFamily[];
}
// Create a properly typed update
const update: GoogleAIPartialUpdate = {
modelFamilies: updatedFamilies as GoogleAIModelFamily[],
isOverQuota: true,
overQuotaFamilies
};
// Use the standard KeyPool interface
// This gets around the TypeScript issues by letting KeyPool handle routing
const clonedKey = { ...req.key }; // Make a clone since we'll be modifying it
keyPool.update(clonedKey, update as any);
}
// Re-enqueue with a different key
await reenqueueRequest(req);
throw new RetryableError("Quota-exhausted request re-enqueued with a different key.");
}
// Standard rate limiting - just mark as rate limited temporarily
req.log.debug({ key: req.key.hash, error: text }, "Google API request rate limited, will retry.");
keyPool.markRateLimited(req.key);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
}
@@ -652,10 +913,12 @@ const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
},
`Incrementing usage for model`
);
keyPool.incrementUsage(req.key!, model, tokensUsed);
// Get modelFamily for the key usage log
const modelFamilyForKeyPool = req.modelFamily!; // Should be set by getModelFamilyForRequest earlier
keyPool.incrementUsage(req.key!, modelFamilyForKeyPool, { input: req.promptTokens!, output: req.outputTokens! });
if (req.user) {
incrementPromptCount(req.user.token);
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
incrementTokenCount(req.user.token, model, req.outboundApi, { input: req.promptTokens!, output: req.outputTokens! });
}
}
};
@@ -681,8 +944,8 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
const service = req.outboundApi;
const completion = getCompletionFromBody(req, body);
const tokens = await countTokens({ req, completion, service });
if (req.service === "openai" || req.service === "azure") {
if (req.service === "openai" || req.service === "azure" || req.service === "deepseek" || req.service === "cohere" || req.service === "qwen") {
// O1 consumes (a significant amount of) invisible tokens for the chain-
// of-thought reasoning. We have no way to count these other than to check
// the response body.
@@ -723,6 +986,8 @@ const omittedHeaders = new Set<string>([
"set-cookie",
"openai-organization",
"x-request-id",
"x-ds-request-id",
"x-ds-trace-id",
"cf-ray",
]);
const copyHttpHeaders: ProxyResHandlerWithBody = async (
@@ -730,6 +995,9 @@ const copyHttpHeaders: ProxyResHandlerWithBody = async (
_req,
res
) => {
// Hack: we don't copy headers since with chunked transfer we've already sent them.
if (_req.isChunkedTransfer) return;
Object.keys(proxyRes.headers).forEach((key) => {
if (omittedHeaders.has(key)) return;
res.setHeader(key, proxyRes.headers[key] as string);
+3 -1
View File
@@ -72,6 +72,8 @@ const getPromptForRequest = (
// format.
switch (req.outboundApi) {
case "openai":
case "openai-responses":
return req.body.messages;
case "mistral-ai":
return req.body.messages;
case "anthropic-chat":
@@ -120,7 +122,7 @@ const flattenMessages = (
if (isGoogleAIChatPrompt(val)) {
return val.contents
.map(({ parts, role }) => {
const text = parts.map((p) => p.text).join("\n");
const text = parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text).join("\n");
return `${role}: ${text}`;
})
.join("\n");
@@ -84,7 +84,8 @@ export class EventAggregator {
getFinalResponse() {
switch (this.responseFormat) {
case "openai":
case "google-ai": // TODO: this is probably wrong now that we support native Google Makersuite prompts
case "openai-responses":
case "google-ai":
return mergeEventsForOpenAIChat(this.events);
case "openai-text":
return mergeEventsForOpenAIText(this.events);
@@ -158,6 +158,8 @@ function getTransformer(
: mistralAIToOpenAI;
case "openai-image":
throw new Error(`SSE transformation not supported for ${responseApi}`);
case "openai-responses":
return passthroughToOpenAI;
default:
assertNever(responseApi);
}
+53 -27
View File
@@ -20,40 +20,66 @@ import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middlewa
// months of releasing them so this list is hard to keep up to date. 2024-07-28
// https://docs.mistral.ai/platform/endpoints
export const KNOWN_MISTRAL_AI_MODELS = [
/*
Mistral Nemo
"A 12B model built with the partnership with Nvidia. It is easy to use and a
drop-in replacement in any system using Mistral 7B that it supersedes."
*/
/* Premier models */
// Mistral Large (top-tier reasoning model)
"mistral-large-latest",
"mistral-large-2411",
"mistral-large-2407",
"mistral-large-2402", // older version
// Pixtral Large (multimodal/vision model)
"pixtral-large-latest",
"pixtral-large-2411",
// Mistral Saba (language-specialized model)
"mistral-saba-latest",
"mistral-saba-2502",
// Codestral (code model)
"codestral-latest",
"codestral-2501",
"codestral-2405",
// Ministral models (edge models)
"ministral-8b-latest",
"ministral-8b-2410",
"ministral-3b-latest",
"ministral-3b-2410",
// Embedding & Moderation
"mistral-embed",
"mistral-embed-2312",
"mistral-moderation-latest",
"mistral-moderation-2411",
/* Free models */
// Mistral Small (with vision in latest version)
"mistral-small-latest",
"mistral-small-2503", // v3.1 with vision
"mistral-small-2402", // older version
"magistral-small-latest",
// Pixtral 12B (vision model)
"pixtral-12b-latest",
"pixtral-12b-2409",
/* Research & Open Models */
// Mistral Nemo
"open-mistral-nemo",
"open-mistral-nemo-2407",
/*
Mistral Large
"Our flagship model with state-of-the-art reasoning, knowledge, and coding
capabilities."
*/
"mistral-large-latest",
"mistral-large-2407",
"mistral-large-2402", // deprecated
/*
Codestral
"A cutting-edge generative model that has been specifically designed and
optimized for code generation tasks, including fill-in-the-middle and code
completion."
note: this uses a separate bidi completion endpoint that is not implemented
*/
"codestral-latest",
"codestral-2405",
/* So-called "Research Models" */
// Earlier Mixtral & Mistral models
"open-mistral-7b",
"open-mixtral-8x7b",
"open-mistral-8x22b",
"open-mixtral-8x22b",
"open-codestral-mamba",
/* Deprecated production models */
"mistral-small-latest",
"mistral-small-2402",
"mathstral",
/* Other, too lazy to do it properly now */
"mistral-medium-latest",
"mistral-medium-2312",
"mistral-medium-2505",
"magistral-medium-latest",
"mistral-tiny",
"mistral-tiny-2312",
];
+219
View File
@@ -0,0 +1,219 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { MoonshotKey, keyPool } from "../shared/key-management";
import { isMoonshotModel, isMoonshotVisionModel } from "../shared/api-schemas/moonshot";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "moonshot" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const moonshotResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
const modelToUse = "moonshot-v1-8k";
const moonshotKey = keyPool.get(modelToUse, "moonshot") as MoonshotKey;
if (!moonshotKey || !moonshotKey.key) {
log.warn("No valid Moonshot key available for model listing");
throw new Error("No valid Moonshot API key available");
}
// Fetch models from Moonshot API
const response = await axios.get("https://api.moonshot.cn/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${moonshotKey.key}`
},
});
if (!response.data || !response.data.data) {
throw new Error("Unexpected response format from Moonshot API");
}
// Format response to ensure OpenAI compatibility
const models = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
created: model.created || Math.floor(Date.now() / 1000),
owned_by: model.owned_by || "moonshot",
permission: model.permission || [],
root: model.root || model.id,
parent: model.parent || null,
})),
};
log.debug({ modelCount: models.data.length }, "Retrieved models from Moonshot API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Moonshot models"
);
} else {
log.error({ error }, "Unknown error fetching Moonshot models");
}
// Return a default list of known Moonshot models as fallback
return {
object: "list",
data: [
{ id: "moonshot-v1-8k", object: "model", created: 1678888000, owned_by: "moonshot" },
{ id: "moonshot-v1-32k", object: "model", created: 1678888000, owned_by: "moonshot" },
{ id: "moonshot-v1-128k", object: "model", created: 1678888000, owned_by: "moonshot" },
],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to handle partial mode for Moonshot
function handlePartialMode(req: Request) {
if (!process.env.NO_MOONSHOT_PARTIAL && req.body.messages && Array.isArray(req.body.messages)) {
const msgs = req.body.messages;
if (msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// Consolidate consecutive assistant messages
content = msgs[i--].content + content;
}
// Replace consecutive assistant messages with single message with partial: true
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, partial: true });
log.debug("Consolidated assistant messages and enabled partial mode for Moonshot request");
}
}
// Function to handle vision model content transformation
function handleVisionContent(req: Request) {
const model = req.body.model;
if (isMoonshotVisionModel(model) && req.body.messages) {
// Ensure vision content is properly formatted
req.body.messages = req.body.messages.map((msg: any) => {
if (msg.content && typeof msg.content === 'string') {
// Keep string content as is for non-vision requests
return msg;
}
return msg;
});
}
}
// Function to count tokens for Moonshot models
function countMoonshotTokens(req: Request) {
const model = req.body.model;
if (isMoonshotModel(model)) {
if (req.promptTokens) {
log.debug(
{ tokens: req.promptTokens, model },
"Estimated token count for Moonshot prompt"
);
}
}
}
// Handle rate limit errors for Moonshot
async function handleMoonshotRateLimitError(req: Request, error: any) {
if (error.response?.status === 429) {
log.warn({ model: req.body.model }, "Moonshot rate limit hit, rotating key");
const currentKey = req.key as MoonshotKey;
keyPool.markRateLimited(currentKey);
// Try to get a new key
const newKey = keyPool.get(req.body.model, "moonshot") as MoonshotKey;
if (newKey.hash !== currentKey.hash) {
req.key = newKey;
return true; // Retry with new key
}
}
return false;
}
const moonshotProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
finalizeBody
],
target: "https://api.moonshot.cn",
blockingResponseHandler: moonshotResponseHandler,
});
const moonshotRouter = Router();
// Chat completions endpoint
moonshotRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "moonshot" },
{ afterTransform: [ handlePartialMode, handleVisionContent, countMoonshotTokens ] }
),
moonshotProxy
);
// Embeddings endpoint
moonshotRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "moonshot" },
{ afterTransform: [ countMoonshotTokens ] }
),
moonshotProxy
);
// Models endpoint
moonshotRouter.get("/v1/models", handleModelRequest);
export const moonshot = moonshotRouter;
+102 -9
View File
@@ -11,7 +11,7 @@ import { ProxyResHandlerWithBody } from "./middleware/response";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
const KNOWN_MODELS = ["dall-e-2", "dall-e-3", "gpt-image-1"];
let modelListCache: any = null;
let modelListValid = 0;
@@ -58,27 +58,46 @@ function transformResponseForChat(
req: Request
): Record<string, any> {
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
const isGptImage = req.body.model?.includes("gpt-image") || false;
const content = imageBody.data
.map((item) => {
const { url, b64_json } = item;
// The gpt-image-1 model always returns b64_json
// Format will depend on output_format parameter (defaults to png)
// For simplicity, we'll assume png if not specified
const format = req.body.output_format || "png";
if (b64_json) {
return `![${prompt}](data:image/png;base64,${b64_json})`;
return `![${prompt}](data:image/${format};base64,${b64_json})`;
} else {
return `![${prompt}](${url})`;
}
})
.join("\n\n");
// Prepare the usage information - gpt-image-1 includes detailed token usage
let usage = {
prompt_tokens: 0,
completion_tokens: req.outputTokens,
total_tokens: req.outputTokens,
};
// If this is a gpt-image-1 response, it includes detailed usage info
if (imageBody.usage) {
usage = {
prompt_tokens: imageBody.usage.input_tokens || 0,
completion_tokens: imageBody.usage.output_tokens || 0,
total_tokens: imageBody.usage.total_tokens || 0,
};
}
return {
id: "dalle-" + req.id,
id: req.body.model?.includes("gpt-image") ? "gptimage-" + req.id : "dalle-" + req.id,
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: 0,
completion_tokens: req.outputTokens,
total_tokens: req.outputTokens,
},
usage,
choices: [
{
message: { role: "assistant", content },
@@ -89,6 +108,69 @@ function transformResponseForChat(
};
}
// Filter parameters based on the model being used to avoid sending unsupported parameters
function filterModelParameters(manager: ProxyReqManager) {
const req = manager.request;
const originalBody = req.body;
const modelName = originalBody?.model || "";
// Skip if no body or it's not an object
if (!originalBody || typeof originalBody !== 'object') return;
// Create a deep copy of the body to filter
const filteredBody = { ...originalBody };
// Define allowed parameters for each model
if (modelName.includes('dall-e-2')) {
// DALL-E 2 parameters
const allowedParams = [
'model', 'prompt', 'n', 'size', 'response_format', 'user'
];
// Remove any parameter not in the allowed list
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
delete filteredBody[key];
}
});
req.log.info({ model: 'dall-e-2', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 2");
} else if (modelName.includes('dall-e-3')) {
// DALL-E 3 parameters
const allowedParams = [
'model', 'prompt', 'n', 'quality', 'size', 'style', 'response_format', 'user'
];
// Remove any parameter not in the allowed list
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
delete filteredBody[key];
}
});
req.log.info({ model: 'dall-e-3', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 3");
} else if (modelName.includes('gpt-image')) {
// Define allowed parameters for gpt-image-1
const allowedParams = [
'model', 'prompt', 'background', 'moderation', 'n', 'output_compression',
'output_format', 'quality', 'size', 'user', 'image', 'mask'
];
// Remove any parameter not in the allowed list, especially 'style' which is only for DALL-E 3
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
req.log.info({ model: 'gpt-image-1', removedParam: key }, "Removing unsupported parameter for GPT Image");
delete filteredBody[key];
}
});
req.log.info({ model: 'gpt-image-1', params: Object.keys(filteredBody) }, "Filtered parameters for GPT Image");
}
// Use the proper method to update the body
manager.setBody(filteredBody);
}
function replacePath(manager: ProxyReqManager) {
const req = manager.request;
const pathname = req.url.split("?")[0];
@@ -100,7 +182,7 @@ function replacePath(manager: ProxyReqManager) {
const openaiImagesProxy = createQueuedProxyMiddleware({
target: "https://api.openai.com",
mutations: [replacePath, addKey, finalizeBody],
mutations: [replacePath, filterModelParameters, addKey, finalizeBody],
blockingResponseHandler: openaiImagesResponseHandler,
});
@@ -116,6 +198,17 @@ openaiImagesRouter.post(
}),
openaiImagesProxy
);
// Add support for the /v1/images/edits endpoint (used by gpt-image-1 for image editing)
openaiImagesRouter.post(
"/v1/images/edits",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "openai",
}),
openaiImagesProxy
);
openaiImagesRouter.post(
"/v1/chat/completions",
ipLimiter,
+299 -5
View File
@@ -1,5 +1,6 @@
import { Request, RequestHandler, Router } from "express";
import { config } from "../config";
import { BadRequestError } from "../shared/errors";
import { AzureOpenAIKey, keyPool, OpenAIKey } from "../shared/key-management";
import { getOpenAIModelFamily } from "../shared/models";
import { ipLimiter } from "./rate-limit";
@@ -38,7 +39,7 @@ export function generateModelList(service: "openai" | "azure") {
.flatMap((k) => k.modelIds)
.filter((id) => {
const allowed = modelFamilies.has(getOpenAIModelFamily(id));
const known = ["gpt", "o1", "dall-e", "chatgpt", "text-embedding"].some(
const known = ["gpt", "o", "dall-e", "chatgpt", "text-embedding", "codex"].some(
(prefix) => id.startsWith(prefix)
);
const isFinetune = id.includes("ft");
@@ -109,10 +110,21 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
const interval = (req as any)._keepAliveInterval
if (interval) {
clearInterval(interval);
res.write(JSON.stringify(body));
res.end();
return;
}
let newBody = body;
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
req.log.info("Transforming Turbo-Instruct response to Chat format");
newBody = transformTurboInstructResponse(body);
} else if (req.outboundApi === "openai-responses" && req.inboundApi === "openai") {
req.log.info("Transforming Responses API response to Chat format");
newBody = transformResponsesApiResponse(body);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
@@ -135,6 +147,135 @@ function transformTurboInstructResponse(
return transformed;
}
function transformResponsesApiResponse(
responsesBody: Record<string, any>
): Record<string, any> {
// If the response is already in chat completion format, return it as is
if (responsesBody.choices && responsesBody.choices[0]?.message) {
return responsesBody;
}
// Create a compatible format for clients expecting chat completions format
const transformed: Record<string, any> = {
id: responsesBody.id || `chatcmpl-${Date.now()}`,
object: "chat.completion",
created: responsesBody.created_at || Math.floor(Date.now() / 1000),
model: responsesBody.model || "o1-pro",
choices: [],
usage: responsesBody.usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
// Extract content from the Responses API format - multiple possible structures
// Structure 1: output array with message objects
if (responsesBody.output && Array.isArray(responsesBody.output)) {
// Look for a message type in the output array
let messageOutput = null;
for (const output of responsesBody.output) {
if (output.type === "message") {
messageOutput = output;
break;
}
}
if (messageOutput) {
if (messageOutput.content && Array.isArray(messageOutput.content) && messageOutput.content.length > 0) {
// Handle text content
let content = "";
const toolCalls: any[] = [];
for (const contentItem of messageOutput.content) {
if (contentItem.type === "output_text") {
content += contentItem.text;
} else if (contentItem.type === "tool_calls" && Array.isArray(contentItem.tool_calls)) {
toolCalls.push(...contentItem.tool_calls);
}
}
const message: Record<string, any> = {
role: messageOutput.role || "assistant",
content: content
};
if (toolCalls.length > 0) {
message.tool_calls = toolCalls;
}
transformed.choices.push({
index: 0,
message,
finish_reason: "stop"
});
} else if (typeof messageOutput.content === 'string') {
// Simple string content
transformed.choices.push({
index: 0,
message: {
role: messageOutput.role || "assistant",
content: messageOutput.content
},
finish_reason: "stop"
});
}
}
}
// Structure 2: response object with content
else if (responsesBody.response && responsesBody.response.content) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: typeof responsesBody.response.content === 'string'
? responsesBody.response.content
: JSON.stringify(responsesBody.response.content)
},
finish_reason: responsesBody.response.finish_reason || "stop"
});
}
// Structure 3: look for 'content' field directly
else if (responsesBody.content) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: typeof responsesBody.content === 'string'
? responsesBody.content
: JSON.stringify(responsesBody.content)
},
finish_reason: "stop"
});
}
// If we couldn't extract content, create a basic response
if (transformed.choices.length === 0) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: ""
},
finish_reason: "stop"
});
}
// Copy usage information if available
if (responsesBody.usage) {
transformed.usage = {
prompt_tokens: responsesBody.usage.input_tokens || 0,
completion_tokens: responsesBody.usage.output_tokens || 0,
total_tokens: responsesBody.usage.total_tokens || 0
};
}
return transformed;
}
const openaiProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.openai.com",
@@ -146,6 +287,13 @@ const openaiEmbeddingsProxy = createQueuedProxyMiddleware({
target: "https://api.openai.com",
});
// New proxy middleware for the Responses API
const openaiResponsesProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.openai.com",
blockingResponseHandler: openaiResponseHandler,
});
const openaiRouter = Router();
openaiRouter.get("/v1/models", handleModelRequest);
// Native text completion endpoint, only for turbo-instruct.
@@ -172,16 +320,120 @@ openaiRouter.post(
),
openaiProxy
);
const setupChunkedTransfer: RequestHandler = (req, res, next) => {
req.log.info("Setting chunked transfer for o1 to prevent Cloudflare timeouts")
// Check if user is trying to use streaming with codex-mini models
if (req.body.model?.startsWith("codex-mini") && req.body.stream === true) {
return res.status(400).json({
error: {
message: "The codex-mini models do not support streaming. Please set 'stream: false' in your request.",
type: "invalid_request_error",
param: "stream",
code: "streaming_not_supported"
}
});
}
// Only o1 doesn't support streaming
if (req.body.model === "o1" || req.body.model === "o1-2024-12-17") {
req.isChunkedTransfer = true;
res.writeHead(200, {
'Content-Type': 'application/json',
'Transfer-Encoding': 'chunked'
});
// Higher values are required - otherwise Cloudflare will buffer and not pass
// the separate chunks, which means that a >100s response will get terminated anyway
const keepAlive = setInterval(() => {
res.write(' '.repeat(4096));
}, 48_000);
(req as any)._keepAliveInterval = keepAlive;
}
next();
};
// Functions to handle model-specific API routing
function shouldUseResponsesApi(model: string): boolean {
return model === "o1-pro" || model.startsWith("o1-pro-") ||
model === "o3-pro" || model.startsWith("o3-pro-") ||
model === "codex-mini-latest" || model.startsWith("codex-mini-");
}
// Preprocessor to redirect requests to the responses API
const routeToResponsesApi: RequestPreprocessor = (req) => {
if (shouldUseResponsesApi(req.body.model)) {
req.log.info(`Routing ${req.body.model} to OpenAI Responses API`);
req.url = "/v1/responses";
req.outboundApi = "openai-responses";
}
};
// General chat completion endpoint. Turbo-instruct is not supported here.
openaiRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "openai" },
{ afterTransform: [fixupMaxTokens] }
{
afterTransform: [
fixupMaxTokens,
filterGPT5UnsupportedParams,
routeToResponsesApi
]
}
),
setupChunkedTransfer,
(req, _res, next) => {
// Route to the responses endpoint if needed
if (req.outboundApi === "openai-responses") {
// Ensure messages is moved to input properly
req.log.info("Final check for Responses API format in chat completions");
if (req.body.messages) {
req.log.info("Moving 'messages' to 'input' for Responses API");
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API");
req.body.input = req.body.input.messages;
}
return openaiResponsesProxy(req, _res, next);
}
next();
},
openaiProxy
);
// New endpoint for OpenAI Responses API
openaiRouter.post(
"/v1/responses",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai-responses", service: "openai" },
{ afterTransform: [fixupMaxTokens, filterGPT5UnsupportedParams] }
),
// Add final check to ensure the body is in the correct format for Responses API
(req, _res, next) => {
req.log.info("Final check for Responses API format");
// Ensure messages is properly formatted for input
if (req.body.messages) {
req.log.info("Moving 'messages' to 'input' for Responses API");
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API");
req.body.input = req.body.input.messages;
}
next();
},
openaiResponsesProxy
);
// Embeddings endpoint.
openaiRouter.post(
"/v1/embeddings",
@@ -195,10 +447,52 @@ function forceModel(model: string): RequestPreprocessor {
}
function fixupMaxTokens(req: Request) {
if (!req.body.max_completion_tokens) {
req.body.max_completion_tokens = req.body.max_tokens;
// For Responses API, use max_output_tokens instead of max_completion_tokens
if (req.outboundApi === "openai-responses") {
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens || req.body.max_completion_tokens;
}
// Remove the other token params to avoid API errors
delete req.body.max_tokens;
delete req.body.max_completion_tokens;
// Remove other parameters not supported by Responses API
const unsupportedParams = ['frequency_penalty', 'presence_penalty'];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
delete req.body[param];
}
}
} else {
// Original behavior for other APIs
if (!req.body.max_completion_tokens) {
req.body.max_completion_tokens = req.body.max_tokens;
}
delete req.body.max_tokens;
}
}
// GPT-5, GPT-5-mini, and GPT-5-nano don't support certain parameters
// Remove them if present to prevent API errors
function filterGPT5UnsupportedParams(req: Request) {
const model = req.body.model;
// Only apply filtering to these specific models (gpt5-chat-latest supports all params)
const restrictedModels = /^gpt-5(-mini|-nano)?(-\d{4}-\d{2}-\d{2})?$/;
if (!restrictedModels.test(model)) {
return; // Not a restricted model, no filtering needed
}
// Remove unsupported parameters if they exist
const unsupportedParams = ['temperature', 'top_p', 'presence_penalty', 'frequency_penalty'];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
delete req.body[param];
}
}
delete req.body.max_tokens;
}
export const openai = openaiRouter;
+2 -2
View File
@@ -77,8 +77,8 @@ async function enqueue(req: Request) {
}
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT) {
// Do not apply concurrency limit to "special" users
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT && req.user?.type !== "special") {
throw new TooManyRequestsError(
"Your IP or user token already has another request in the queue."
);
+361
View File
@@ -0,0 +1,361 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { QwenKey, keyPool } from "../shared/key-management";
import {
isQwenModel,
isQwenThinkingModel,
normalizeMessages,
isQwen3Model,
isThinkingVariant,
isNonThinkingVariant,
getBaseModelName
} from "../shared/api-schemas/qwen";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "qwen" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const qwenResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Qwen key directly
const modelToUse = "qwen-plus"; // Use any Qwen model here - just for key selection
const qwenKey = keyPool.get(modelToUse, "qwen") as QwenKey;
if (!qwenKey || !qwenKey.key) {
log.warn("No valid Qwen key available for model listing");
throw new Error("No valid Qwen API key available");
}
// Fetch models directly from Qwen API
const response = await axios.get("https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${qwenKey.key}`
},
});
if (!response.data || !response.data.data) {
throw new Error("Unexpected response format from Qwen API");
}
// Extract models
const models = response.data;
// Ensure we have all known Qwen models in the list
const knownQwenModels = [
"qwen-max",
"qwen-max-latest",
"qwen-max-2025-01-25",
"qwen-plus",
"qwen-plus-latest",
"qwen-plus-2025-01-25",
"qwen-turbo",
"qwen-turbo-latest",
"qwen-turbo-2024-11-01",
"qwen3-235b-a22b",
"qwen3-32b",
"qwen3-30b-a3b"
];
// Add thinking capability flag to models that support it
if (models.data && Array.isArray(models.data)) {
// Create a set of existing model IDs for quick lookup
const existingModelIds = new Set(models.data.map((model: any) => model.id));
// Filter out base Qwen3 models since we'll add variants instead
models.data = models.data.filter((model: any) => {
return !isQwen3Model(model.id) || isThinkingVariant(model.id) || isNonThinkingVariant(model.id);
});
// Add any missing models from our known list
knownQwenModels.forEach(modelId => {
if (!existingModelIds.has(modelId)) {
models.data.push({
id: modelId,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
});
}
});
// Add thinking capability flag to existing models
const processedModelIds = new Set();
const originalModelsData = [...models.data];
models.data = originalModelsData.flatMap((model: any) => {
const modelId = model.id;
processedModelIds.add(modelId);
// Apply capabilities to all models
if (isQwenThinkingModel(modelId)) {
model.capabilities = model.capabilities || {};
model.capabilities.thinking = true;
}
// For Qwen3 models, add thinking and non-thinking variants, but not the original
if (isQwen3Model(modelId) &&
!isThinkingVariant(modelId) &&
!isNonThinkingVariant(modelId)) {
// Create thinking variant
const thinkingModel = {
id: `${modelId}-thinking`,
object: "model",
created: model.created || Date.now(),
owned_by: model.owned_by || "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${model.display_name || modelId} (Thinking Mode)`
};
// Create non-thinking variant
const nonThinkingModel = {
id: `${modelId}-nonthinking`,
object: "model",
created: model.created || Date.now(),
owned_by: model.owned_by || "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${model.display_name || modelId} (Standard Mode)`
};
// Only add variants, not the original model
return [thinkingModel, nonThinkingModel];
}
return [model];
});
} else {
// If the API response didn't include models, create our own list
models.data = knownQwenModels.flatMap(modelId => {
// For Qwen3 models, add only thinking and non-thinking variants (not the base model)
if (isQwen3Model(modelId) &&
!isThinkingVariant(modelId) &&
!isNonThinkingVariant(modelId)) {
return [
{
id: `${modelId}-thinking`,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${modelId} (Thinking Mode)`
},
{
id: `${modelId}-nonthinking`,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${modelId} (Standard Mode)`
}
];
}
// For non-Qwen3 models, return the base model
const baseModel = {
id: modelId,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
};
return [baseModel];
});
}
log.debug({ modelCount: models.data?.length }, "Retrieved models from Qwen API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
// Provide detailed logging for better troubleshooting
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Qwen models"
);
} else {
log.error({ error }, "Unknown error fetching Qwen models");
}
// Return empty list as fallback
return {
object: "list",
data: [],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to prepare messages for Qwen API
function prepareMessages(req: Request) {
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages = normalizeMessages(req.body.messages);
}
}
// Function to handle thinking capability for Qwen models
function handleThinkingCapability(req: Request) {
const model = req.body.model;
// Special handling for our proxy-managed variants
if (isThinkingVariant(model)) {
// Set the base model name without the suffix
req.body.model = getBaseModelName(model);
// Force enable thinking for the -thinking variant
req.body.enable_thinking = true;
// Log the transformation
log.debug(
{ originalModel: model, transformedModel: req.body.model, enableThinking: true },
"Transformed request for thinking variant"
);
return;
}
if (isNonThinkingVariant(model)) {
// Set the base model name without the suffix
req.body.model = getBaseModelName(model);
// Force disable thinking for the -nonthinking variant
req.body.enable_thinking = false;
// Log the transformation
log.debug(
{ originalModel: model, transformedModel: req.body.model, enableThinking: false },
"Transformed request for non-thinking variant"
);
return;
}
// For standard models with thinking capability
if (isQwenThinkingModel(model) && req.body.stream === true) {
// Only add enable_thinking if it's not already set
if (req.body.enable_thinking === undefined) {
req.body.enable_thinking = false; // Default to false, let users explicitly enable it
}
// If thinking_budget is provided but enable_thinking is false, enable thinking
if (req.body.thinking_budget !== undefined && req.body.enable_thinking === false) {
req.body.enable_thinking = true;
}
} else if (isQwenThinkingModel(model) && req.body.stream !== true) {
// For non-streaming requests with thinking-capable models, always disable thinking
req.body.enable_thinking = false;
}
}
// Function to remove parameters not supported by Qwen models
function removeUnsupportedParameters(req: Request) {
// Remove parameters that Qwen doesn't support
if (req.body.logit_bias !== undefined) {
delete req.body.logit_bias;
}
if (req.body.top_logprobs !== undefined) {
delete req.body.top_logprobs;
}
// Logging for debugging
if (process.env.NODE_ENV !== 'production') {
log.debug({ body: req.body }, "Request after parameter cleanup");
}
}
// Set up count token functionality for Qwen models
function countQwenTokens(req: Request) {
const model = req.body.model;
if (isQwenModel(model)) {
// Count tokens using prompt tokens (simplified)
if (req.promptTokens) {
req.log.debug(
{ tokens: req.promptTokens },
"Estimated token count for Qwen prompt"
);
}
}
}
const qwenProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
finalizeBody
],
target: "https://dashscope-intl.aliyuncs.com/compatible-mode",
blockingResponseHandler: qwenResponseHandler,
});
const qwenRouter = Router();
qwenRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "qwen" },
{ afterTransform: [ prepareMessages, handleThinkingCapability, removeUnsupportedParameters, countQwenTokens ] }
),
qwenProxy
);
qwenRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "qwen" },
{ afterTransform: [] }
),
qwenProxy
);
qwenRouter.get("/v1/models", handleModelRequest);
export const qwen = qwenRouter;
+10
View File
@@ -10,6 +10,11 @@ import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { openai } from "./openai";
import { openaiImage } from "./openai-image";
import { deepseek } from "./deepseek";
import { xai } from "./xai";
import { cohere } from "./cohere";
import { qwen } from "./qwen";
import { moonshot } from "./moonshot";
import { sendErrorToClient } from "./middleware/response/error-generator";
const proxyRouter = express.Router();
@@ -49,6 +54,11 @@ proxyRouter.use("/mistral-ai", addV1, mistralAI);
proxyRouter.use("/aws", aws);
proxyRouter.use("/gcp/claude", addV1, gcp);
proxyRouter.use("/azure/openai", addV1, azure);
proxyRouter.use("/deepseek", addV1, deepseek);
proxyRouter.use("/xai", addV1, xai);
proxyRouter.use("/cohere", addV1, cohere);
proxyRouter.use("/qwen", addV1, qwen);
proxyRouter.use("/moonshot", addV1, moonshot);
// Redirect browser requests to the homepage.
proxyRouter.get("*", (req, res, next) => {
+394
View File
@@ -0,0 +1,394 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { XaiKey, keyPool } from "../shared/key-management";
import { isGrokVisionModel, isGrokImageGenModel, isGrokReasoningModel, isGrokReasoningEffortModel, isGrokReasoningContentModel } from "../shared/api-schemas/xai";
let modelsCache: any = null;
let modelsCacheTime = 0;
const xaiResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
// Preserve the original body (including potential reasoning_content) for grok-3-mini models
// which support the reasoning feature
let newBody = body;
// Check if this is an image generation response (data array with url or b64_json)
if (body.data && Array.isArray(body.data)) {
req.log.debug(
{ imageCount: body.data.length },
"Grok image generation response detected"
);
// Transform the image generation response into a chat completion format
// that SillyTavern can display
const images = body.data;
// Create a chat completion style response
newBody = {
id: `grok-image-${Date.now()}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: req.body.model,
choices: images.map((image, index) => {
// Create markdown image content for each generated image
let content = '';
// Add the image using data URL for b64_json
if (image.b64_json) {
// If it doesn't start with data:image/, add the prefix
const imgData = image.b64_json.startsWith('data:image/')
? image.b64_json
: `data:image/jpeg;base64,${image.b64_json}`;
content = `![Generated Image](${imgData})`;
}
// Fall back to URL if b64_json isn't available
else if (image.url) {
content = `![Generated Image](${image.url})`;
}
return {
index,
message: {
role: "assistant",
content
},
finish_reason: "stop"
};
}),
usage: body.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
};
req.log.debug("Transformed image generation response to chat format");
}
// Check if this is a chat completion response with choices
else if (body.choices && Array.isArray(body.choices) && body.choices.length > 0) {
// Make sure each choice's message is preserved, especially reasoning_content
// Only grok-3-mini models return reasoning_content
const model = req.body.model;
if (isGrokReasoningContentModel(model)) {
body.choices.forEach(choice => {
if (choice.message && choice.message.reasoning_content) {
req.log.debug(
{ reasoning_length: choice.message.reasoning_content.length },
"Grok reasoning content detected"
);
}
});
}
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get an XAI key directly using keyPool.get()
const modelToUse = "grok-3"; // Use any XAI model here - just for key selection
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
if (!xaiKey || !xaiKey.key) {
throw new Error("Failed to get valid XAI key");
}
// Fetch models from XAI API with authorization
const response = await axios.get("https://api.x.ai/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${xaiKey.key}`
},
});
// If successful, update the cache
if (response.data && response.data.data) {
modelsCache = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
owned_by: "xai",
})),
};
} else {
throw new Error("Unexpected response format from XAI API");
}
} catch (error) {
console.error("Error fetching XAI models:", error);
throw error; // No fallback - error will be passed to caller
}
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const modelsResponse = await getModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const xaiProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.x.ai",
blockingResponseHandler: xaiResponseHandler,
});
const xaiRouter = Router();
// combines all the assistant messages at the end of the context and adds the
// beta 'prefix' option, makes prefills work the same way they work for Claude
function enablePrefill(req: Request) {
// If you want to disable
if (process.env.NO_XAI_PREFILL) return
// Skip if no messages (e.g., for image generation requests)
if (!req.body.messages || !Array.isArray(req.body.messages)) return;
const msgs = req.body.messages;
if (msgs.length === 0 || msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// maybe we should also add a newline between messages? no for now.
content = msgs[i--].content + content;
}
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
}
// Function to redirect image model requests to the image generations endpoint
function redirectImageRequests(req: Request) {
const model = req.body.model;
// If this is an image generation model but the endpoint is chat/completions,
// we need to transform the request to match the image generations endpoint format
if (isGrokImageGenModel(model) && req.path === "/v1/chat/completions") {
req.log.info(`Redirecting ${model} request to /v1/images/generations endpoint`);
// Save original URL and path for later
const originalUrl = req.url;
const originalPath = req.path;
// Change the request URL and path to the images endpoint
req.url = req.url.replace("/v1/chat/completions", "/v1/images/generations");
Object.defineProperty(req, 'path', { value: "/v1/images/generations" });
// Extract the prompt from the messages if present
if (req.body.messages && Array.isArray(req.body.messages)) {
// Find the last user message and use its content as the prompt
for (let i = req.body.messages.length - 1; i >= 0; i--) {
const msg = req.body.messages[i];
if (msg.role === 'user') {
// Extract text content
let prompt = "";
if (typeof msg.content === 'string') {
prompt = msg.content;
} else if (Array.isArray(msg.content)) {
// Collect all text content items
prompt = msg.content
.filter((item: any) => item.type === 'text')
.map((item: any) => item.text)
.join(" ");
}
if (prompt) {
// Create a new request body for image generation
req.body = {
model: model,
prompt: prompt,
n: req.body.n || 1,
response_format: "b64_json", // Always use b64_json for better client compatibility
user: req.body.user
};
req.log.debug({ newBody: req.body }, "Transformed request for image generation");
break;
}
}
}
}
// Log transformation
req.log.info(`Request transformed from ${originalUrl} to ${req.url}`);
}
}
// Function to remove parameters not supported by X.AI/Grok models and handle special cases
function removeUnsupportedParameters(req: Request) {
const model = req.body.model;
// Check if this is a reasoning model (grok-3-mini or grok-4-0709)
const isReasoningModel = isGrokReasoningModel(model);
const isReasoningEffortModel = isGrokReasoningEffortModel(model);
if (isReasoningModel) {
// List of parameters not supported by reasoning models
const unsupportedParams = [
'presence_penalty',
'frequency_penalty',
'stop' // stop parameter is not supported by reasoning models
];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for reasoning model ${model}: ${param}`);
delete req.body[param];
}
}
// Handle reasoning_effort parameter - only supported by grok-3-mini
if (isReasoningEffortModel) {
// This is grok-3-mini, handle reasoning_effort
if (req.body.reasoning_effort) {
// If reasoning_effort is already present in the request, validate it
if (!['low', 'medium', 'high'].includes(req.body.reasoning_effort)) {
req.log.warn(`Invalid reasoning_effort value: ${req.body.reasoning_effort}, removing it`);
delete req.body.reasoning_effort;
}
} else {
// Default to low reasoning effort if not specified
req.body.reasoning_effort = 'low';
req.log.debug(`Setting default reasoning_effort=low for Grok-3-mini model`);
}
} else {
// This is grok-4-0709 or other reasoning model that doesn't support reasoning_effort
if (req.body.reasoning_effort !== undefined) {
req.log.info(`Removing unsupported reasoning_effort parameter for model ${model}`);
delete req.body.reasoning_effort;
}
}
}
// Special handling for vision models
if (isGrokVisionModel(model)) {
req.log.debug(`Detected Grok vision model: ${model}`);
// Check that messages have proper format for vision models
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages.forEach((msg: { content: string | any[] }) => {
// If content is a string but the model is vision-capable,
// convert it to an array with a single text item for consistency
if (typeof msg.content === 'string') {
req.log.debug('Converting string content to array format for vision model');
msg.content = [{ type: 'text', text: msg.content }];
}
});
}
}
// Special handling for image generation models is handled by separate endpoint
}
// Handler for image generation requests
const handleImageGenerationRequest: RequestHandler = async (req, res) => {
try {
// Get an XAI key directly for image generation
const modelToUse = req.body.model || "grok-2-image"; // Default model
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
if (!xaiKey || !xaiKey.key) {
throw new Error("Failed to get valid XAI key for image generation");
}
// Forward the request to XAI API
const response = await axios.post("https://api.x.ai/v1/images/generations", req.body, {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${xaiKey.key}`
},
});
// Return the response directly
res.status(200).json(response.data);
} catch (error) {
req.log.error({ error }, "Error in image generation request");
// Pass through the error response if available
if (error.response && error.response.data) {
res.status(error.response.status || 500).json(error.response.data);
} else {
res.status(500).json({ error: "Failed to generate image", message: error.message });
}
}
};
// Set up count token functionality for XAI models
function countXaiTokens(req: Request) {
const model = req.body.model;
// For vision models, estimate image token usage
if (isGrokVisionModel(model) && req.body.messages && Array.isArray(req.body.messages)) {
// Initialize image count
let imageCount = 0;
// Count images in the request
for (const msg of req.body.messages) {
if (Array.isArray(msg.content)) {
const imagesInMessage = msg.content.filter(
(item: any) => item.type === "image_url"
).length;
imageCount += imagesInMessage;
}
}
// Apply token estimations for images
// Each image is approximately 1500 tokens based on documentation
const TOKENS_PER_IMAGE = 1500;
const imageTokens = imageCount * TOKENS_PER_IMAGE;
if (imageTokens > 0) {
req.log.debug(
{ imageCount, tokenEstimate: imageTokens },
"Estimated token count for Grok vision images"
);
// Add the image tokens to the existing token count if available
if (req.promptTokens) {
req.promptTokens += imageTokens;
}
}
}
}
xaiRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "xai" },
{ afterTransform: [ redirectImageRequests, enablePrefill, removeUnsupportedParameters, countXaiTokens ] }
),
xaiProxy
);
// Add endpoint for image generation
xaiRouter.post(
"/v1/images/generations",
ipLimiter,
handleImageGenerationRequest
);
xaiRouter.get("/v1/models", handleModelRequest);
export const xai = xaiRouter;
+1 -1
View File
@@ -92,7 +92,7 @@ app.use("/admin", adminRouter);
app.use((req, _, next) => {
// For whatever reason SillyTavern just ignores the path a user provides
// when using Google AI with reverse proxy. We'll fix it here.
if (req.path.startsWith("/v1beta/models/")) {
if (req.path.match(/^\/v1(alpha|beta)\/models(\/|$)/)) {
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
return next();
}
+328 -32
View File
@@ -2,9 +2,14 @@ import { config, listConfig } from "./config";
import {
AnthropicKey,
AwsBedrockKey,
DeepseekKey,
GcpKey,
keyPool,
OpenAIKey,
XaiKey,
CohereKey,
QwenKey,
MoonshotKey,
} from "./shared/key-management";
import {
AnthropicModelFamily,
@@ -19,6 +24,11 @@ import {
MODEL_FAMILY_SERVICE,
ModelFamily,
OpenAIModelFamily,
DeepseekModelFamily,
XaiModelFamily,
CohereModelFamily,
QwenModelFamily,
MoonshotModelFamily,
} from "./shared/models";
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
import { getUniqueIps } from "./proxy/rate-limit";
@@ -27,6 +37,87 @@ import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
const CACHE_TTL = 2000;
// Define the preferred order for model families in the service info display
// This ensures logical grouping (GPT-4 models together, then GPT-4.1, then GPT-5, etc.)
const MODEL_FAMILY_ORDER: ModelFamily[] = [
// OpenAI models in logical order
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"gpt4o",
"gpt41",
"gpt41-mini",
"gpt41-nano",
"gpt45",
"gpt5",
"gpt5-mini",
"gpt5-nano",
"gpt5-chat-latest",
"o1",
"o1-mini",
"o1-pro",
"o3",
"o3-mini",
"o3-pro",
"o4-mini",
"codex-mini",
"dall-e",
"gpt-image",
// Azure OpenAI models (same order as OpenAI)
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"azure-gpt4o",
"azure-gpt41",
"azure-gpt41-mini",
"azure-gpt41-nano",
"azure-gpt45",
"azure-gpt5",
"azure-gpt5-mini",
"azure-gpt5-nano",
"azure-gpt5-chat-latest",
"azure-o1",
"azure-o1-mini",
"azure-o1-pro",
"azure-o3",
"azure-o3-mini",
"azure-o3-pro",
"azure-o4-mini",
"azure-codex-mini",
"azure-dall-e",
"azure-gpt-image",
// Anthropic models
"claude",
"claude-opus",
// Google AI models
"gemini-flash",
"gemini-pro",
"gemini-ultra",
// Mistral AI models
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
// AWS Bedrock models
"aws-claude",
"aws-claude-opus",
"aws-mistral-tiny",
"aws-mistral-small",
"aws-mistral-medium",
"aws-mistral-large",
// GCP models
"gcp-claude",
"gcp-claude-opus",
// Other services
"deepseek",
"xai",
"cohere",
"qwen",
"moonshot"
];
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
@@ -34,6 +125,16 @@ const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
const keyIsDeepseekKey = (k: KeyPoolKey): k is DeepseekKey =>
k.service === "deepseek";
const keyIsXaiKey = (k: KeyPoolKey): k is XaiKey =>
k.service === "xai";
const keyIsCohereKey = (k: KeyPoolKey): k is CohereKey =>
k.service === "cohere";
const keyIsQwenKey = (k: KeyPoolKey): k is QwenKey =>
k.service === "qwen";
const keyIsMoonshotKey = (k: KeyPoolKey): k is MoonshotKey =>
k.service === "moonshot";
/** Stats aggregated across all keys for a given service. */
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
@@ -49,19 +150,27 @@ type ModelAggregates = {
awsClaude2?: number;
awsSonnet3?: number;
awsSonnet3_5?: number;
awsSonnet3_7?: number;
awsSonnet4?: number;
awsOpus3?: number;
awsOpus4?: number;
awsHaiku: number;
gcpSonnet?: number;
gcpSonnet35?: number;
gcpHaiku?: number;
queued: number;
tokens: number;
inputTokens: number; // Changed from tokens
outputTokens: number; // Added
legacyTokens?: number; // Added for migrated totals
};
/** All possible combinations of model family and aggregate type. */
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
type AllStats = {
proompts: number;
tokens: number;
inputTokens: number; // Changed from tokens
outputTokens: number; // Added
legacyTokens?: number; // Added
tokenCost: number;
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
@@ -96,6 +205,8 @@ export type ServiceInfo = {
uptime: number;
endpoints: {
openai?: string;
deepseek?: string;
xai?: string;
anthropic?: string;
"google-ai"?: string;
"mistral-ai"?: string;
@@ -116,8 +227,13 @@ export type ServiceInfo = {
& { [f in AwsBedrockModelFamily]?: AwsInfo }
& { [f in GcpModelFamily]?: GcpInfo }
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo & { overQuotaKeys?: number } }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo }
& { [f in DeepseekModelFamily]?: BaseFamilyInfo }
& { [f in XaiModelFamily]?: BaseFamilyInfo }
& { [f in CohereModelFamily]?: BaseFamilyInfo }
& { [f in QwenModelFamily]?: BaseFamilyInfo }
& { [f in MoonshotModelFamily]?: BaseFamilyInfo };
// https://stackoverflow.com/a/66661477
// type DeepKeyOf<T> = (
@@ -159,6 +275,21 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
azure: `%BASE%/azure/openai`,
"azure-image": `%BASE%/azure/openai`,
},
deepseek: {
deepseek: `%BASE%/deepseek`,
},
xai: {
xai: `%BASE%/xai`,
},
cohere: {
cohere: `%BASE%/cohere`,
},
qwen: {
qwen: `%BASE%/qwen`,
},
moonshot: {
moonshot: `%BASE%/moonshot`,
},
};
const familyStats = new Map<ModelAggregateKey, number>();
@@ -250,11 +381,14 @@ function getEndpoints(baseUrl: string, accessibleFamilies: Set<ModelFamily>) {
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
function getTrafficStats(): TrafficStats {
const tokens = serviceStats.get("tokens") || 0;
const inputTokens = serviceStats.get("inputTokens") || 0;
const outputTokens = serviceStats.get("outputTokens") || 0;
// const legacyTokens = serviceStats.get("legacyTokens") || 0; // Optional: include in total if desired
const totalTokens = inputTokens + outputTokens; // + legacyTokens;
const tokenCost = serviceStats.get("tokenCost") || 0;
return {
proompts: serviceStats.get("proompts") || 0,
tookens: `${prettyTokens(tokens)}${getCostSuffix(tokenCost)}`,
tookens: `${prettyTokens(totalTokens)}${getCostSuffix(tokenCost)}`, // Simplified to show aggregate and cost
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
};
}
@@ -270,16 +404,18 @@ function getServiceModelStats(accessibleFamilies: Set<ModelFamily>) {
if (!hasKeys) continue;
serviceInfo[`${service}Keys`] = hasKeys;
accessibleFamilies.forEach((f) => {
if (MODEL_FAMILY_SERVICE[f] === service) {
modelFamilyInfo[f] = getInfoForFamily(f);
}
});
if (service === "openai" && config.checkKeys) {
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
}
}
// Build model family info in the defined order for logical grouping
for (const family of MODEL_FAMILY_ORDER) {
if (accessibleFamilies.has(family)) {
modelFamilyInfo[family] = getInfoForFamily(family);
}
}
return { serviceInfo, modelFamilyInfo };
}
@@ -309,15 +445,45 @@ function addKeyToAggregates(k: KeyPoolKey) {
addToService("aws__keys", k.service === "aws" ? 1 : 0);
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
addToService("azure__keys", k.service === "azure" ? 1 : 0);
addToService("deepseek__keys", k.service === "deepseek" ? 1 : 0);
addToService("xai__keys", k.service === "xai" ? 1 : 0);
addToService("cohere__keys", k.service === "cohere" ? 1 : 0);
addToService("qwen__keys", k.service === "qwen" ? 1 : 0);
addToService("moonshot__keys", k.service === "moonshot" ? 1 : 0);
let sumTokens = 0;
let sumInputTokens = 0;
let sumOutputTokens = 0;
let sumLegacyTokens = 0; // Optional
let sumCost = 0;
const incrementGenericFamilyStats = (f: ModelFamily) => {
const tokens = (k as any)[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
addToFamily(`${f}__tokens`, tokens);
const usage = k.tokenUsage?.[f];
let familyInputTokens = 0;
let familyOutputTokens = 0;
let familyLegacyTokens = 0;
if (usage) {
familyInputTokens = usage.input || 0;
familyOutputTokens = usage.output || 0;
if (usage.legacy_total && familyInputTokens === 0 && familyOutputTokens === 0) {
// This is a migrated key with no new usage, use legacy_total as input for cost
familyLegacyTokens = usage.legacy_total;
sumCost += getTokenCostUsd(f, usage.legacy_total, 0);
} else {
sumCost += getTokenCostUsd(f, familyInputTokens, familyOutputTokens);
}
}
// If no k.tokenUsage[f], tokens are 0, cost is 0.
sumInputTokens += familyInputTokens;
sumOutputTokens += familyOutputTokens;
sumLegacyTokens += familyLegacyTokens; // Optional
addToFamily(`${f}__inputTokens`, familyInputTokens);
addToFamily(`${f}__outputTokens`, familyOutputTokens);
if (familyLegacyTokens > 0) {
addToFamily(`${f}__legacyTokens`, familyLegacyTokens); // Optional
}
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
};
@@ -351,10 +517,21 @@ function addKeyToAggregates(k: KeyPoolKey) {
k.modelIds.forEach((id) => {
if (id.includes("claude-3-sonnet")) {
addToFamily(`aws-claude__awsSonnet3`, 1);
// not ideal but whatever
} else if (id.includes("claude-3-5-sonnet")) {
addToFamily(`aws-claude__awsSonnet3_5`, 1);
} else if (id.includes("claude-3-7-sonnet")) {
addToFamily(`aws-claude__awsSonnet3_7`, 1);
} else if (id.includes("claude-3-haiku")) {
addToFamily(`aws-claude__awsHaiku`, 1);
} else if (id.includes("sonnet-4")) {
addToFamily(`aws-claude__awsSonnet4`, 1);
} else if (id.includes("claude-3-opus")) {
addToFamily(`aws-claude__awsOpus3`, 1);
addToFamily(`aws-claude-opus__awsOpus3`, 1);
} else if (id.includes("opus-4")) {
addToFamily(`aws-claude__awsOpus4`, 1);
addToFamily(`aws-claude-opus__awsOpus4`, 1);
} else if (id.includes("claude-v2")) {
addToFamily(`aws-claude__awsClaude2`, 1);
}
@@ -372,25 +549,111 @@ function addKeyToAggregates(k: KeyPoolKey) {
k.modelFamilies.forEach(incrementGenericFamilyStats);
// TODO: add modelIds to GcpKey
break;
case "deepseek":
if (!keyIsDeepseekKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "xai":
if (!keyIsXaiKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
if ('isOverQuota' in k) {
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
}
});
break;
case "cohere":
if (!keyIsCohereKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
if ('isOverQuota' in k) {
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
}
});
break;
// These services don't have any additional stats to track.
case "azure":
case "google-ai":
case "mistral-ai":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
case "google-ai":
// Cast to GoogleAIKey to access GoogleAI-specific properties
const googleKey = k as unknown as { overQuotaFamilies?: string[] };
// First handle general stats for all model families
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
});
// Create a set of model families that are over quota for this key
let overQuotaModelFamilies = new Set<string>();
// Add any model family that's listed in overQuotaFamilies
if (googleKey.overQuotaFamilies && Array.isArray(googleKey.overQuotaFamilies)) {
googleKey.overQuotaFamilies.forEach(family => {
overQuotaModelFamilies.add(family);
});
}
// If key is generally over quota and we don't have specific families, add all families
else if ('isOverQuota' in k && k.isOverQuota) {
k.modelFamilies.forEach(family => {
overQuotaModelFamilies.add(family);
});
}
// Now increment the over-quota counter for each affected family
// These model families are valid and already defined in the enum
overQuotaModelFamilies.forEach(family => {
if (family === 'gemini-pro' || family === 'gemini-flash' || family === 'gemini-ultra') {
addToFamily(`${family}__overQuota` as any, 1);
}
});
break;
case "qwen":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
case "moonshot":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
default:
assertNever(k.service);
}
addToService("tokens", sumTokens);
addToService("inputTokens", sumInputTokens);
addToService("outputTokens", sumOutputTokens);
if (sumLegacyTokens > 0) { // Optional
addToService("legacyTokens", sumLegacyTokens);
}
addToService("tokenCost", sumCost);
}
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const tokens = familyStats.get(`${family}__tokens`) || 0;
const cost = getTokenCostUsd(family, tokens);
const inputTokens = familyStats.get(`${family}__inputTokens`) || 0;
const outputTokens = familyStats.get(`${family}__outputTokens`) || 0;
const legacyTokens = familyStats.get(`${family}__legacyTokens`) || 0; // Optional
let cost = 0;
let displayTokens = 0;
let usageString = "";
if (inputTokens > 0 || outputTokens > 0) {
cost = getTokenCostUsd(family, inputTokens, outputTokens);
displayTokens = inputTokens + outputTokens;
usageString = `${prettyTokens(displayTokens)} (In: ${prettyTokens(inputTokens)}, Out: ${prettyTokens(outputTokens)})${getCostSuffix(cost)}`;
} else if (legacyTokens > 0) {
// Only show legacy if no new input/output has been recorded for this family aggregate
cost = getTokenCostUsd(family, legacyTokens, 0); // Cost legacy as all input
displayTokens = legacyTokens;
usageString = `${prettyTokens(displayTokens)} tokens (legacy total)${getCostSuffix(cost)}`;
} else {
usageString = `${prettyTokens(0)} tokens${getCostSuffix(0)}`;
}
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
usage: usageString,
activeKeys: familyStats.get(`${family}__active`) || 0,
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
};
@@ -418,25 +681,40 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
break;
case "aws":
if (family === "aws-claude") {
// Original behavior: get logged count from the same family
const logged = familyStats.get(`${family}__awsLogged`) || 0;
const variants = new Set<string>();
if (familyStats.get(`${family}__awsClaude2`) || 0)
variants.add("claude2");
if (familyStats.get(`${family}__awsSonnet3`) || 0)
variants.add("sonnet3");
if (familyStats.get(`${family}__awsSonnet3_5`) || 0)
variants.add("sonnet3.5");
if (familyStats.get(`${family}__awsHaiku`) || 0)
variants.add("haiku");
info.enabledVariants = variants.size
? `${Array.from(variants).join(",")}`
: undefined;
if (familyStats.get(`${family}__awsClaude2`) || 0) variants.add("claude2");
if (familyStats.get(`${family}__awsSonnet3`) || 0) variants.add("sonnet3");
if (familyStats.get(`${family}__awsSonnet3_5`) || 0) variants.add("sonnet3.5");
if (familyStats.get(`${family}__awsSonnet3_7`) || 0) variants.add("sonnet3.7");
if (familyStats.get(`${family}__awsHaiku`) || 0) variants.add("haiku");
if (familyStats.get(`${family}__awsSonnet4`) || 0) variants.add("sonnet4");
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
if (logged > 0) {
info.privacy = config.allowAwsLogging
? `AWS logging verification inactive. Prompts could be logged.`
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
}
} else if (family === "aws-claude-opus") {
// Get logging info from aws-claude family since that's where it's collected
const awsLogged = familyStats.get(`aws-claude__awsLogged`) || 0;
const variants = new Set<string>();
if (familyStats.get(`${family}__awsOpus3`) || 0) variants.add("opus3");
if (familyStats.get(`${family}__awsOpus4`) || 0) variants.add("opus4");
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
// Show privacy warning for Opus if there are active Opus keys AND some AWS keys are logged
if (awsLogged > 0 && info.activeKeys > 0) {
info.privacy = config.allowAwsLogging
? `AWS logging verification inactive. Prompts could be logged.`
: `Some AWS keys are potentially logged. Set ALLOW_AWS_LOGGING=true to override.`;
}
}
// TODO: Consider if aws-mistral-* families need similar enabledVariant listings
break;
case "gcp":
if (family === "gcp-claude") {
@@ -444,6 +722,24 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
info.enabledVariants = "not implemented";
}
break;
case "deepseek":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "xai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "cohere":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "google-ai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "qwen":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "moonshot":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
}
}
+66 -16
View File
@@ -19,6 +19,13 @@ const AnthropicV1BaseSchema = z
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.object({ user_id: z.string().optional() }).optional(),
tools: z.array(z.any()).optional(),
tool_choice: z.any().optional(),
service_tier: z.enum(["auto", "standard_only"]).optional(),
cache_control: z.object({
type: z.literal("ephemeral"),
ttl: z.enum(["5m", "1h"]).optional()
}).optional(),
})
.strip();
@@ -33,16 +40,35 @@ export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
})
);
const AnthropicV1BaseContentSchema = z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
})
]);
const AnthropicV1MessageMultimodalContentSchema = z.array(
z.union([
z.object({ type: z.literal("text"), text: z.string() }),
AnthropicV1BaseContentSchema,
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
type: z.literal("tool_use"),
id: z.string(),
name: z.string(),
input: z.object({}).passthrough(),
}),
z.object({
type: z.literal("tool_result"),
tool_use_id: z.string(),
is_error: z.boolean().optional(),
content: z.union([
z.string(),
z.array(AnthropicV1BaseContentSchema)
]).optional(),
}),
])
);
@@ -69,6 +95,10 @@ export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
z.array(z.object({ type: z.literal("text"), text: z.string() })),
])
.optional(),
thinking: z.object({
type: z.literal("enabled"),
budget_tokens: z.number().min(1024),
}).optional(),
})
);
export type AnthropicChatMessage = z.infer<
@@ -82,7 +112,7 @@ function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
} else if (role === "system" || role === "developer") {
role = "System";
} else if (role === "user") {
role = "Human";
@@ -109,6 +139,10 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
);
throw result.error;
}
if (result.data.max_tokens > 8192) {
result.data.max_tokens = 4096;
}
const { messages, ...rest } = result.data;
const { messages: newMessages, system } =
@@ -365,7 +399,7 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
// Here we will lose the original name if it was a system message, but that
// is generally okay because the system message is usually a prompt and not
// a character in the chat.
const name = msg.role === "system" ? "System" : msg.name?.trim();
const name = (msg.role === "system" || msg.role === "developer") ? "System" : msg.name?.trim();
const content = convertOpenAIContent(msg.content);
// Prepend the display name to the first text content in the current message
@@ -395,8 +429,8 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
function isSystemOpenAIRole(
role: OpenAIChatMessage["role"]
): role is "system" | "function" | "tool" {
return ["system", "function", "tool"].includes(role);
): role is "developer" | "system" | "function" | "tool" {
return ["developer", "system", "function", "tool"].includes(role);
}
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
@@ -439,9 +473,25 @@ function convertOpenAIContent(
});
}
export function containsImageContent(messages: AnthropicChatMessage[]) {
return messages.some(
({ content }) =>
typeof content !== "string" && content.some((c) => c.type === "image")
);
export function containsImageContent(messages: AnthropicChatMessage[]): boolean {
const isImage = (item: any) => item?.type === 'image';
return messages.some(msg => {
if (typeof msg.content === 'string') return false;
return msg.content.some(item => {
if (isImage(item)) return true;
if (item.type === 'tool_result') {
const content = item.content;
if (!content) return false;
if (typeof content === 'string') return false;
if (Array.isArray(content)) return content.some(isImage);
return isImage(content);
}
return false;
});
});
}
+69
View File
@@ -0,0 +1,69 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
/**
* Helper function to check if a model is from Cohere
*/
export function isCohereModel(model: string): boolean {
// Cohere's command model family
return model.includes("command") || model.includes("cohere");
}
// Basic chat message schema
const CohereChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system", "developer"]),
content: z.string().nullable(),
name: z.string().optional(),
});
const CohereMessagesSchema = z.array(CohereChatMessageSchema);
// Schema for Cohere chat completions
export const CohereV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: CohereMessagesSchema,
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
seed: z.number().int().min(0).optional(),
response_format: z
.object({
type: z.enum(["text", "json_object"]),
schema: z.any().optional()
})
.optional(),
// Structured output with schema
tools: z.array(z.any()).optional(),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
});
// Schema for Cohere embeddings
export const CohereV1EmbeddingsSchema = z.object({
model: z.string(),
input: z.union([z.string(), z.array(z.string())]),
encoding_format: z.enum(["float", "base64"]).optional()
});
// Helper function to convert between different message formats if needed
export function normalizeMessages(messages: any[]): any[] {
// From documentation, Cohere supports roles: developer, user, assistant
// The 'developer' role is equivalent to 'system' in OpenAI API
return messages.map((msg) => {
// Convert system role to developer role for Cohere compatibility
if (msg.role === "system") {
return { ...msg, role: "developer" };
}
return msg;
});
}
+71 -30
View File
@@ -5,19 +5,28 @@ import {
} from "./openai";
import { APIFormatTransformer } from "./index";
const TextPartSchema = z.object({
text: z.string(),
thought: z.boolean().optional()
});
const InlineDataPartSchema = z.object({
inlineData: z.object({
mimeType: z.string(),
data: z.string(),
}),
});
const PartSchema = z.union([TextPartSchema, InlineDataPartSchema]);
const GoogleAIV1ContentSchema = z.object({
parts: z
.union([
z.array(z.object({ text: z.string() })),
z.object({ text: z.string() }),
])
// Google allows parts to be an array or a single object, which is really
// annoying for downstream code. We will coerce it to an array here.
.union([PartSchema, z.array(PartSchema)])
.transform((val) => (Array.isArray(val) ? val : [val])),
// TODO: add other media types
role: z.enum(["user", "model"]).optional(),
});
const SafetySettingsSchema = z
.array(
z.object({
@@ -40,18 +49,21 @@ const SafetySettingsSchema = z
)
.optional();
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
const GoogleSearchToolSchema = z.object({
googleSearch: z.object({}),
});
// Corrected: Directly assign the schema since there's only one tool type for now
const ToolSchema = GoogleSearchToolSchema;
export const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
model: z.string().max(100),
stream: z.boolean().optional().default(false),
contents: z.array(GoogleAIV1ContentSchema),
tools: z.array(z.object({})).max(0).optional(),
tools: z.array(ToolSchema).optional(), // Uses the corrected ToolSchema
safetySettings: SafetySettingsSchema,
systemInstruction: GoogleAIV1ContentSchema.optional(),
// quick fix for SillyTavern, which uses camel case field names for everything
// except for system_instruction where it randomly uses snake case.
// google api evidently accepts either case.
system_instruction: GoogleAIV1ContentSchema.optional(),
generationConfig: z
.object({
@@ -61,11 +73,22 @@ export const GoogleAIV1GenerateContentSchema = z
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 4096)), // TODO: Add config
.transform((v) => Math.min(v, 65536)),
candidateCount: z.literal(1).optional(),
topP: z.number().min(0).max(1).optional(),
topK: z.number().min(1).max(40).optional(),
topK: z.number().min(0).max(500).optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
seed: z.number().int().optional(),
frequencyPenalty: z.number().optional().default(0),
presencePenalty: z.number().optional().default(0),
thinkingConfig: z.object({
includeThoughts: z.boolean().optional(),
thinkingBudget: z.union([
z.literal("auto"),
z.number().int()
]).optional()
}).optional(),
responseModalities: z.any().optional(), // responseModalities: z.array(z.enum(["TEXT"])).optional()
})
.default({}),
})
@@ -91,15 +114,11 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
}
const { messages, ...rest } = result.data;
const foundNames = new Set<string>();
const contents = messages
.map((m) => {
const role = m.role === "assistant" ? "model" : "user";
// Detects character names so we can set stop sequences for them as Gemini
// is prone to continuing as the next character.
// If names are not available, we'll still try to prefix the message
// with generic names so we can set stops for them but they don't work
// as well as real names.
const text = flattenOpenAIMessageContent(m.content);
const propName = m.name?.trim();
const textName =
@@ -109,12 +128,6 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
foundNames.add(name);
// Prefixing messages with their character name seems to help avoid
// Gemini trying to continue as the next character, or at the very least
// ensures it will hit the stop sequence. Otherwise it will start a new
// paragraph and switch perspectives.
// The response will be very likely to include this prefix so frontends
// will need to strip it out.
const textPrefix = textName ? "" : `${name}: `;
return {
parts: [{ text: textPrefix + text }],
@@ -123,7 +136,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
})
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
const last = acc[acc.length - 1];
if (last?.role === msg.role) {
if (last?.role === msg.role && 'text' in last.parts[0] && 'text' in msg.parts[0]) {
last.parts[0].text += "\n\n" + msg.parts[0].text;
} else {
acc.push(msg);
@@ -139,17 +152,36 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
stops = [...new Set(stops)].slice(0, 5);
let tools: z.infer<typeof ToolSchema>[] | undefined = undefined;
let responseModalities: string[] | undefined = undefined;
if (req.body.use_google_search === true) {
req.log.info("Google Search tool requested.");
tools = [{ googleSearch: {} }];
responseModalities = ["TEXT"];
}
let thinkingConfig = undefined;
if (body.generationConfig?.thinkingConfig || body.thinkingConfig) {
thinkingConfig = body.generationConfig?.thinkingConfig || body.thinkingConfig;
}
return {
model: req.body.model,
stream: rest.stream,
contents,
tools: [],
tools: tools,
generationConfig: {
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
topP: rest.top_p,
topK: 40, // openai schema doesn't have this, google ai defaults to 40
topK: 40,
temperature: rest.temperature,
seed: rest.seed,
frequencyPenalty: rest.frequency_penalty,
presencePenalty: rest.presence_penalty,
responseModalities: responseModalities,
...(thinkingConfig ? { thinkingConfig } : {})
},
safetySettings: [
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
@@ -158,5 +190,14 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "BLOCK_NONE" },
],
...(req.body.system_instruction && { system_instruction: req.body.system_instruction }),
...(req.body.systemInstruction && { systemInstruction: req.body.systemInstruction }),
};
};
export function containsImageContent(contents: GoogleAIChatMessage[]): boolean {
return contents.some(content => {
const parts = Array.isArray(content.parts) ? content.parts : [content.parts];
return parts.some(part => 'inlineData' in part);
});
}
+6
View File
@@ -17,6 +17,10 @@ import {
OpenAIV1ImagesGenerationSchema,
transformOpenAIToOpenAIImage,
} from "./openai-image";
import {
OpenAIV1ResponsesSchema,
transformOpenAIToOpenAIResponses,
} from "./openai-responses";
import {
GoogleAIV1GenerateContentSchema,
transformOpenAIToGoogleAI,
@@ -52,6 +56,7 @@ export const API_REQUEST_TRANSFORMERS: TransformerMap = {
"openai->anthropic-text": transformOpenAIToAnthropicText,
"openai->openai-text": transformOpenAIToOpenAIText,
"openai->openai-image": transformOpenAIToOpenAIImage,
"openai->openai-responses": transformOpenAIToOpenAIResponses,
"openai->google-ai": transformOpenAIToGoogleAI,
"mistral-ai->mistral-text": transformMistralChatToText,
};
@@ -62,6 +67,7 @@ export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"openai-responses": OpenAIV1ResponsesSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
"mistral-text": MistralAIV1TextCompletionsSchema,
+121 -8
View File
@@ -4,9 +4,61 @@ import { Template } from "@huggingface/jinja";
import { APIFormatTransformer } from "./index";
import { logger } from "../../logger";
// Define the content types for multimodal messages
export const TextContentSchema = z.object({
type: z.literal("text"),
text: z.string()
});
export const ImageUrlContentSchema = z.object({
type: z.literal("image_url"),
image_url: z.union([
// URL format (https://...)
z.string().url(),
// Base64 format (data:image/jpeg;base64,...)
z.string().regex(/^data:image\/(jpeg|png|gif|webp);base64,/),
// Object format (might contain detail or url properties)
z.record(z.any()),
// Allow any string for maximum compatibility
z.string()
])
});
export const ContentItemSchema = z.union([TextContentSchema, ImageUrlContentSchema]);
// Export types for the content schemas
export type TextContent = z.infer<typeof TextContentSchema>;
export type ImageUrlContent = z.infer<typeof ImageUrlContentSchema>;
export type ContentItem = z.infer<typeof ContentItemSchema>;
// List of Mistral models with vision capabilities
export const MISTRAL_VISION_MODELS = [
"pixtral-12b-2409",
"pixtral-12b-latest",
"pixtral-large-2411",
"pixtral-large-latest",
"mistral-small-2503",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-medium-2505"
];
// Helper function to check if a model supports vision
export function isMistralVisionModel(model: string): boolean {
return MISTRAL_VISION_MODELS.some(visionModel =>
model === visionModel ||
model.startsWith(`${visionModel}-`)
);
}
// Main Mistral chat message schema
const MistralChatMessageSchema = z.object({
role: z.enum(["system", "user", "assistant", "tool"]), // TODO: implement tools
content: z.string(),
// Support both string content (for backwards compatibility) and array of content items (for multimodal)
content: z.union([
z.string(),
z.array(ContentItemSchema)
]),
prefix: z.boolean().optional(),
});
@@ -107,7 +159,26 @@ export function fixMistralPrompt(
// Consolidate multiple messages from the same role
const last = acc[acc.length - 1];
if (last.role === copy.role) {
last.content += "\n\n" + copy.content;
// Handle different content types for consolidation
if (typeof last.content === "string" && typeof copy.content === "string") {
// Both are strings, concatenate them
last.content += "\n\n" + copy.content;
} else if (Array.isArray(last.content) && typeof copy.content === "string") {
// Add the string content as a new text content item
last.content.push({
type: "text",
text: copy.content
});
} else if (typeof last.content === "string" && Array.isArray(copy.content)) {
// Convert last.content to array and append copy.content items
last.content = [
{ type: "text", text: last.content },
...copy.content
];
} else if (Array.isArray(last.content) && Array.isArray(copy.content)) {
// Both are arrays, concatenate them
last.content = [...last.content, ...copy.content];
}
} else {
acc.push(copy);
}
@@ -125,18 +196,41 @@ export function fixMistralPrompt(
let jinjaTemplate: Template;
let renderTemplate: (messages: MistralAIChatMessage[]) => string;
// Helper function to convert multimodal content to string format for text-only models
function contentToString(content: string | any[]): string {
if (typeof content === "string") {
return content;
} else if (Array.isArray(content)) {
// For multimodal content, extract only the text parts
// Images are not supported in text-only templates
return content
.filter(item => item.type === "text")
.map(item => (item as any).text)
.join("\n\n");
}
return "";
}
function renderMistralPrompt(messages: MistralAIChatMessage[]) {
if (!jinjaTemplate) {
logger.warn("Lazy loading mistral chat template...");
const { chatTemplate, bosToken, eosToken } =
require("./templates/mistral-template").MISTRAL_TEMPLATE;
jinjaTemplate = new Template(chatTemplate);
renderTemplate = (messages) =>
jinjaTemplate.render({
messages,
renderTemplate = (messages) => {
// We need to convert any multimodal content to string format for the template
const textOnlyMessages = messages.map(msg => ({
...msg,
content: contentToString(msg.content)
}));
return jinjaTemplate.render({
messages: textOnlyMessages,
bos_token: bosToken,
eos_token: eosToken,
});
};
}
return renderTemplate(messages);
@@ -145,6 +239,9 @@ function renderMistralPrompt(messages: MistralAIChatMessage[]) {
/**
* Attempts to convert a Mistral chat completions request to a text completions,
* using the official prompt template published by Mistral.
*
* Note: This transformation is only applicable for text-only models.
* Multimodal/vision models (Pixtral, etc.) cannot use this transformation.
*/
export const transformMistralChatToText: APIFormatTransformer<
typeof MistralAIV1TextCompletionsSchema
@@ -159,8 +256,24 @@ export const transformMistralChatToText: APIFormatTransformer<
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = renderMistralPrompt(messages);
// Check if this is a vision request (contains any image_url content items)
const { messages, model, ...rest } = result.data;
const hasVisionContent = messages.some(msg =>
Array.isArray(msg.content) &&
msg.content.some(item => item.type === "image_url")
);
return { ...rest, prompt, messages: undefined };
// Cannot transform vision requests to text completions
if (hasVisionContent) {
req.log.warn(
{ model },
"Cannot transform Mistral vision request to text completions format"
);
throw new Error(
"Vision requests (with image_url content) cannot be transformed to text completions format"
);
}
const prompt = renderMistralPrompt(messages);
return { ...rest, model, prompt, messages: undefined };
};
+87
View File
@@ -0,0 +1,87 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
/**
* Helper function to check if a model is from Moonshot
*/
export function isMoonshotModel(model: string): boolean {
return model.includes("moonshot");
}
/**
* Helper function to check if a model is a Moonshot vision model
*/
export function isMoonshotVisionModel(model: string): boolean {
return model.includes("moonshot") && model.includes("vision");
}
// Content schema for vision models
const MoonshotVisionContentSchema = z.union([
z.string(),
z.array(
z.union([
z.object({
type: z.literal("text"),
text: z.string(),
}),
z.object({
type: z.literal("image_url"),
image_url: z.object({
url: z.string(),
detail: z.enum(["low", "high", "auto"]).optional(),
}),
}),
])
),
]);
// Basic chat message schema
const MoonshotChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system"]),
content: z.union([z.string(), MoonshotVisionContentSchema]).nullable(),
name: z.string().optional(),
// Support for partial mode
partial: z.boolean().optional(),
});
const MoonshotMessagesSchema = z.array(MoonshotChatMessageSchema);
// Schema for Moonshot chat completions
export const MoonshotV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: MoonshotMessagesSchema,
temperature: z.number().optional().default(0.3),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
stop: z
.union([z.string(), z.array(z.string()).max(5)])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
seed: z.number().int().min(0).optional(),
response_format: z
.object({
type: z.enum(["text", "json_object"])
})
.optional(),
tools: z.array(z.any()).optional(),
tool_choice: z.any().optional(),
frequency_penalty: z.number().min(-2).max(2).optional().default(0),
presence_penalty: z.number().min(-2).max(2).optional().default(0),
n: z.number().int().min(1).max(5).optional().default(1),
});
// Schema for Moonshot embeddings
export const MoonshotV1EmbeddingsSchema = z.object({
model: z.string(),
input: z.union([z.string(), z.array(z.string())]),
encoding_format: z.enum(["float", "base64"]).optional()
});
// Note: Partial mode handling is implemented directly in the proxy middleware
// to follow the Deepseek-style consolidation pattern
+277 -21
View File
@@ -1,20 +1,58 @@
import { z } from "zod";
import { Request } from "express";
import { OpenAIV1ChatCompletionSchema } from "./openai";
import { APIFormatTransformer } from "./index";
// Extend the Express Request type to include multimodal content
declare global {
namespace Express {
interface Request {
multimodalContent?: {
prompt?: string;
images?: string[];
};
}
}
}
// https://platform.openai.com/docs/api-reference/images/create
export const OpenAIV1ImagesGenerationSchema = z
.object({
prompt: z.string().max(4000),
prompt: z.string().max(32000), // gpt-image-1 supports up to 32000 chars
model: z.string().max(100).optional(),
quality: z.enum(["standard", "hd"]).optional().default("standard"),
n: z.number().int().min(1).max(4).optional().default(1),
response_format: z.enum(["url", "b64_json"]).optional(),
// Support for image inputs (multimodal capability of gpt-image-1)
image: z.union([
z.string(), // single image (base64 or URL)
z.array(z.string()) // array of images
]).optional(),
mask: z.string().optional(), // mask image for editing
// Different quality options based on model
quality: z
.union([
z.enum(["standard", "hd"]), // dall-e-3 options
z.enum(["high", "medium", "low"]), // gpt-image-1 options
z.literal("auto") // default for gpt-image-1
])
.optional()
.default("standard"),
n: z.number().int().min(1).max(10).optional().default(1), // gpt-image-1 supports up to 10
response_format: z.enum(["url", "b64_json"]).optional(), // Note: gpt-image-1 always returns b64_json
// Enhanced size options for gpt-image-1
size: z
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
.union([
// dalle models
z.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]),
// gpt-image-1 models (adds landscape, portrait, auto)
z.enum(["1024x1024", "1536x1024", "1024x1536", "auto"])
])
.optional()
.default("1024x1024"),
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
style: z.enum(["vivid", "natural"]).optional().default("vivid"), // dall-e-3 only
// New gpt-image-1 specific parameters
background: z.enum(["transparent", "opaque", "auto"]).optional(), // gpt-image-1 only
moderation: z.enum(["low", "auto"]).optional(), // gpt-image-1 only
output_compression: z.number().int().min(0).max(100).optional(), // gpt-image-1 only
output_format: z.enum(["png", "jpeg", "webp"]).optional(), // gpt-image-1 only
user: z.string().max(500).optional(),
})
.strip();
@@ -34,9 +72,41 @@ export const transformOpenAIToOpenAIImage: APIFormatTransformer<
}
const { messages } = result.data;
const prompt = messages.filter((m) => m.role === "user").pop()?.content;
if (Array.isArray(prompt)) {
throw new Error("Image generation prompt must be a text message.");
const userMessage = messages.filter((m) => m.role === "user").pop();
if (!userMessage) {
throw new Error("No user message found in the request.");
}
const content = userMessage.content;
// Handle array content (multimodal content with text and images)
if (Array.isArray(content)) {
const textParts: string[] = [];
const imageParts: string[] = [];
// Process content parts, extracting text and images
content.forEach(part => {
if (typeof part === 'string') {
textParts.push(part);
} else if (part.type === 'image_url') {
// Extract image URL or base64 data from the content
const imageUrl = typeof part.image_url === 'string'
? part.image_url
: part.image_url.url;
imageParts.push(imageUrl);
}
});
// Join all text parts to form the prompt
const prompt = textParts.join('\n');
// For gpt-image-1, we'll pass both the text prompt and image(s)
req.multimodalContent = {
prompt,
images: imageParts
};
} else if (typeof content !== 'string') {
throw new Error("Image generation prompt must be a text message or multimodal content.");
}
if (body.stream) {
@@ -49,20 +119,206 @@ export const transformOpenAIToOpenAIImage: APIFormatTransformer<
// character name or wrapping the entire thing in quotes. We will look for
// the index of "Image:" and use everything after that as the prompt.
const index = prompt?.toLowerCase().indexOf("image:");
if (index === -1 || !prompt) {
throw new Error(
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${prompt}).`
);
// Determine if this is a multimodal request (with images)
const isMultimodalRequest = Array.isArray(content) && req.multimodalContent?.images && req.multimodalContent.images.length > 0;
// Check if this is a request for gpt-image-1
const isGptImageRequest = body.model?.includes("gpt-image") || false;
// Only enforce the "Image:" prefix for non-multimodal, non-gpt-image-1 requests
if (!isMultimodalRequest && !isGptImageRequest && typeof content === 'string') {
const textIndex = content.toLowerCase().indexOf("image:");
if (textIndex === -1) {
throw new Error(
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${content}).`
);
}
}
// TODO: Add some way to specify parameters via chat message
// Determine which model to use (gpt-image-1 or dall-e-3)
const isGptImage = body.model?.includes("gpt-image") || false;
// For gpt-image-1, add the 'Image:' prefix if it's missing but only for string content
let modifiedStringContent = typeof content === 'string' ? content : '';
if (isGptImageRequest && typeof content === 'string' && !content.toLowerCase().includes("image:")) {
req.log.info("Adding 'Image:' prefix to gpt-image-1 prompt");
modifiedStringContent = `Image: ${content}`;
// Store this in the request object for later use
req.multimodalContent = req.multimodalContent || {};
req.multimodalContent.prompt = modifiedStringContent;
}
// TODO: Add some way to specify parameters via chat message
const transformed = {
model: body.model.includes("dall-e") ? body.model : "dall-e-3",
quality: "standard",
size: "1024x1024",
response_format: "url",
prompt: prompt.slice(index! + 6).trim(),
// Get the correct text prompt either from multimodal content or plain string content
let textPrompt: string | undefined;
let index = -1;
if (Array.isArray(content)) {
// For array content, use the prompt from multimodal content if available
textPrompt = req.multimodalContent?.prompt;
} else if (typeof content === 'string') {
// For string content, use the modified content which might have the Image: prefix for gpt-image-1
const contentToProcess = isGptImageRequest ? modifiedStringContent : content;
// Find the "Image:" prefix in the content
index = contentToProcess.toLowerCase().indexOf("image:");
// For gpt-image-1, we might have just added the prefix, so we need to handle both cases
if (index !== -1) {
textPrompt = contentToProcess.slice(index + 6).trim();
} else if (isGptImageRequest) {
// For gpt-image-1, use the whole content if no prefix is found
textPrompt = content; // Use the original content without prefix
} else {
// For other models, default to the content as-is
textPrompt = contentToProcess;
}
}
// Validate that we have a text prompt
if (!textPrompt) {
throw new Error("No text prompt found in the request.");
}
// Determine the exact model being used
let modelName = "dall-e-2"; // Default
if (isGptImage) {
modelName = "gpt-image-1";
} else if (body.model?.includes("dall-e-3")) {
modelName = "dall-e-3";
} else if (body.model?.includes("dall-e-2")) {
modelName = "dall-e-2";
} else {
// If no specific model requested, default to dall-e-3
modelName = "dall-e-3";
}
// Start with basic parameters common to all models
const transformed: any = {
model: modelName,
prompt: textPrompt,
};
// Add model-specific parameters
if (modelName === "gpt-image-1") {
// GPT Image specific parameters - Ensure we only include parameters that are valid for gpt-image-1
transformed.quality = "auto"; // Default quality for gpt-image-1
transformed.size = "1024x1024"; // Default size (square)
transformed.moderation = "low"; // Always set moderation to low for gpt-image-1
// Optional GPT Image parameters
if (body.background) transformed.background = body.background;
if (body.output_format) transformed.output_format = body.output_format;
if (body.output_compression) transformed.output_compression = body.output_compression;
// Handle specific quality settings for gpt-image-1
if (body.quality && ["high", "medium", "low", "auto"].includes(body.quality)) {
transformed.quality = body.quality;
}
// Handle specific size settings for gpt-image-1
if (body.size && ["1024x1024", "1536x1024", "1024x1536", "auto"].includes(body.size)) {
transformed.size = body.size;
}
// IMPORTANT: Remove any style parameter as it's not supported by gpt-image-1
delete transformed.style;
// Log what we're sending for debugging
req.log.info({ model: "gpt-image-1", allowedParams: Object.keys(transformed) }, "Filtered parameters for gpt-image-1");
// No response_format for gpt-image-1 as it always returns b64_json
} else if (modelName === "dall-e-3") {
// DALL-E 3 specific parameters
transformed.size = "1024x1024"; // Default size
transformed.response_format = "url"; // Default format
transformed.quality = "standard"; // Default quality
// Handle DALL-E 3 style parameter
if (body.style && ["vivid", "natural"].includes(body.style)) {
transformed.style = body.style;
} else {
transformed.style = "vivid"; // Default style
}
// Handle specific quality settings for dall-e-3
if (body.quality && ["standard", "hd"].includes(body.quality)) {
transformed.quality = body.quality;
}
// Handle specific size settings for dall-e-3
if (body.size && ["1024x1024", "1792x1024", "1024x1792"].includes(body.size)) {
transformed.size = body.size;
}
} else {
// DALL-E 2 specific parameters
transformed.size = "1024x1024"; // Default size
transformed.response_format = "url"; // Default format
// NO quality parameter for dall-e-2
// Explicitly remove the quality parameter before sending
delete transformed.quality;
// Handle specific size settings for dall-e-2
if (body.size && ["256x256", "512x512", "1024x1024"].includes(body.size)) {
transformed.size = body.size;
}
}
// Handle common parameters
if (body.n && !isNaN(parseInt(body.n))) {
// For dall-e-3, only n=1 is supported
if (modelName === "dall-e-3" && parseInt(body.n) > 1) {
transformed.n = 1;
} else {
transformed.n = parseInt(body.n);
}
}
// Handle response_format for non-gpt-image models
if (!isGptImage && body.response_format && ["url", "b64_json"].includes(body.response_format)) {
transformed.response_format = body.response_format;
}
// If this is gpt-image-1 and we have image content, add it to the transformed request
if (isGptImage && req.multimodalContent?.images && req.multimodalContent.images.length > 0) {
// For the edit endpoint, we need to format the images properly
transformed.image = req.multimodalContent.images.length === 1
? req.multimodalContent.images[0]
: req.multimodalContent.images;
// Any request with images for gpt-image-1 should use the edits endpoint
req.log.info(`${req.multimodalContent.images.length} image(s) detected for gpt-image-1, using images/edits endpoint`);
if (req.path.startsWith("/v1/chat/completions")) {
req.url = req.url.replace("/v1/chat/completions", "/v1/images/edits");
}
}
// For dall-e-2, we need to make sure we don't introduce unsupported parameters
// due to default values in the schema. Let's bypass Zod schema validation here
// for dall-e-2 and only include the supported parameters.
if (modelName === "dall-e-2") {
// Only include parameters that dall-e-2 supports
const filteredTransformed: any = {};
// List of parameters supported by dall-e-2
const supportedParams = [
"model", "prompt", "n", "size", "response_format", "user"
];
// Copy only supported parameters
for (const param of supportedParams) {
if (transformed[param] !== undefined) {
filteredTransformed[param] = transformed[param];
}
}
// Log what we're sending
req.log.info({ model: "dall-e-2", params: Object.keys(filteredTransformed) }, "Filtered parameters for dall-e-2");
return filteredTransformed;
}
// For other models, use the schema as normal
return OpenAIV1ImagesGenerationSchema.parse(transformed);
};
@@ -0,0 +1,61 @@
import { z } from "zod";
import { Request } from "express";
import { OpenAIChatMessage, OpenAIV1ChatCompletionSchema } from "./openai";
// Schema for the OpenAI Responses API based on the chat completion schema
// with some additional fields specific to the Responses API
export const OpenAIV1ResponsesSchema = z.object({
model: z.string(),
input: z.object({
messages: z.array(z.any())
}).optional(),
previousResponseId: z.string().optional(),
max_output_tokens: z.number().int().positive().optional(),
temperature: z.number().min(0).max(2).optional(),
top_p: z.number().min(0).max(1).optional(),
n: z.number().int().positive().optional(),
stream: z.boolean().optional(),
stop: z.union([z.string(), z.array(z.string())]).optional(),
presence_penalty: z.number().min(-2).max(2).optional(),
frequency_penalty: z.number().min(-2).max(2).optional(),
user: z.string().optional(),
tools: z.array(z.any()).optional(),
reasoning_effort: z.enum(["low", "medium", "high"]).optional(),
});
// Allow transforming from OpenAI Chat to Responses format
export async function transformOpenAIToOpenAIResponses(
req: Request
): Promise<z.infer<typeof OpenAIV1ResponsesSchema>> {
const body = { ...req.body };
// Move 'messages' to 'input.messages' as required by the Responses API
if (body.messages && !body.input) {
body.input = {
messages: body.messages
};
delete body.messages;
}
// Convert max_tokens to max_output_tokens if present and not set
if (body.max_tokens && !body.max_output_tokens) {
body.max_output_tokens = body.max_tokens;
delete body.max_tokens;
}
// Map conversation_id to previousResponseId if present
if (body.conversation_id && !body.previousResponseId) {
body.previousResponseId = body.conversation_id;
delete body.conversation_id;
}
// Ensure tools have the right format if present
if (body.tools) {
body.tools = body.tools.map((tool: any) => ({
...tool,
type: tool.type || "function"
}));
}
return body;
}
+5 -3
View File
@@ -21,11 +21,11 @@ export const OpenAIV1ChatCompletionSchema = z
model: z.string().max(100),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant", "tool", "function"]),
role: z.enum(["system", "developer", "user", "assistant", "tool", "function"]),
content: z.union([z.string(), OpenAIV1ChatContentArraySchema]),
name: z.string().optional(),
tool_calls: z.array(z.any()).optional(),
function_call: z.array(z.any()).optional(),
function_call: z.any().optional(),
tool_call_id: z.string().optional(),
}),
{
@@ -77,12 +77,14 @@ export const OpenAIV1ChatCompletionSchema = z
functions: z.array(z.any()).optional(),
tool_choice: z.any().optional(),
function_choice: z.any().optional(),
reasoning_effort: z.enum(["minimal", "low", "medium", "high"]).optional(),
verbosity: z.enum(["low", "medium", "high"]).optional(),
response_format: z.any(),
})
// Tool usage must be enabled via config because we currently have no way to
// track quota usage for them or enforce limits.
.omit(
Boolean(config.allowOpenAIToolUsage) ? {} : { tools: true, functions: true }
!Boolean(config.allowOpenAIToolUsage) ? { tools: true, functions: true } : {}
)
.strip();
export type OpenAIChatMessage = z.infer<
+118
View File
@@ -0,0 +1,118 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
/**
* Helper function to check if a model is from Qwen
*/
export function isQwenModel(model: string): boolean {
// Remove any suffix like -thinking or -nonthinking for checking
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
return baseModel.startsWith("qwen") || baseModel.includes("qwen");
}
/**
* Helper function to check if a model supports thinking capability
*/
export function isQwenThinkingModel(model: string): boolean {
// Remove any suffix like -thinking or -nonthinking for checking
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
// All Qwen3 models support thinking
if (baseModel.startsWith("qwen3")) {
return true;
}
// Other models that support thinking
return (
baseModel === "qwen-plus-latest" ||
baseModel === "qwen-plus-2025-04-28" ||
baseModel === "qwen-turbo-latest" ||
baseModel === "qwen-turbo-2025-04-28"
);
}
// Basic chat message schema
const QwenChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system"]),
content: z.string().nullable(),
name: z.string().optional(),
});
const QwenMessagesSchema = z.array(QwenChatMessageSchema);
// Schema for Qwen chat completions
export const QwenV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: QwenMessagesSchema,
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
seed: z.number().int().min(0).optional(),
response_format: z
.object({
type: z.enum(["text", "json_object"]),
schema: z.any().optional()
})
.optional(),
tools: z.array(z.any()).optional(),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
// Qwen-specific parameters
enable_thinking: z.boolean().optional(),
thinking_budget: z.number().optional(),
});
// Schema for Qwen embeddings
export const QwenV1EmbeddingsSchema = z.object({
model: z.string(),
input: z.union([z.string(), z.array(z.string())]),
encoding_format: z.enum(["float", "base64"]).optional()
});
/**
* Helper function to normalize messages for Qwen API
* Qwen uses the standard OpenAI message format, so no transformation is needed
*/
export function normalizeMessages(messages: any[]): any[] {
return messages;
}
/**
* Helper function to check if a model is a Qwen3 model
*/
export function isQwen3Model(model: string): boolean {
// Remove any suffix like -thinking or -nonthinking for checking
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
return baseModel.startsWith("qwen3");
}
/**
* Helper function to check if a model name has the thinking variant suffix
*/
export function isThinkingVariant(model: string): boolean {
return model.endsWith("-thinking");
}
/**
* Helper function to check if a model name has the non-thinking variant suffix
*/
export function isNonThinkingVariant(model: string): boolean {
return model.endsWith("-nonthinking");
}
/**
* Get the base model name without any thinking/nonthinking suffix
*/
export function getBaseModelName(model: string): string {
return model.replace(/-thinking$|-nonthinking$/, '');
}
+167
View File
@@ -0,0 +1,167 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
// Define the content types for multimodal messages
export const TextContentSchema = z.object({
type: z.literal("text"),
text: z.string()
});
export const ImageUrlContentSchema = z.object({
type: z.literal("image_url"),
image_url: z.union([
// URL format (https://...)
z.string().url(),
// Base64 format (data:image/jpeg;base64,...)
z.string().regex(/^data:image\/(jpeg|png|gif|webp);base64,/),
// Object format (might contain detail or url properties)
z.object({
url: z.string(),
detail: z.enum(["low", "high"]).optional()
}),
// Allow any string for maximum compatibility
z.string()
])
});
export const ContentItemSchema = z.union([TextContentSchema, ImageUrlContentSchema]);
// Export types for the content schemas
export type TextContent = z.infer<typeof TextContentSchema>;
export type ImageUrlContent = z.infer<typeof ImageUrlContentSchema>;
export type ContentItem = z.infer<typeof ContentItemSchema>;
// Helper function to check if a model supports vision
export function isGrokVisionModel(model: string): boolean {
// Check if the model name contains '-vision' anywhere in the name
// This makes it future-proof for new vision models
return model.toLowerCase().includes("-vision");
}
// Helper function to check if a model supports image generation
export function isGrokImageGenModel(model: string): boolean {
// Check if the model name contains '-image' anywhere in the name
// This makes it future-proof for new image generation models
return model.toLowerCase().includes("-image");
}
// Helper function to check if a model supports reasoning
export function isGrokReasoningModel(model: string): boolean {
// grok-3-mini variants and grok-4-0709 support reasoning
const modelLower = model.toLowerCase();
return (modelLower.includes("-mini") && modelLower.includes("grok-3")) ||
modelLower.includes("grok-4");
}
// Helper function to check if a model supports reasoning_effort parameter
export function isGrokReasoningEffortModel(model: string): boolean {
// Only grok-3-mini variants support reasoning_effort parameter
// grok-4-0709 does NOT support reasoning_effort
const modelLower = model.toLowerCase();
return modelLower.includes("-mini") && modelLower.includes("grok-3");
}
// Helper function to check if a model returns reasoning_content
export function isGrokReasoningContentModel(model: string): boolean {
// Only grok-3-mini variants return reasoning_content
// grok-4-0709 does NOT return reasoning_content
const modelLower = model.toLowerCase();
return modelLower.includes("-mini") && modelLower.includes("grok-3");
}
// Main Grok chat message schema
const XaiChatMessageSchema = z.object({
role: z.enum(["system", "user", "assistant", "tool", "function"]),
// Support both string content (for backwards compatibility) and array of content items (for multimodal)
content: z.union([
z.string().nullable(),
z.array(ContentItemSchema)
]),
// Reasoning content field (for grok-3-mini models)
reasoning_content: z.string().optional(),
// Tool call fields
tool_call_id: z.string().optional(),
name: z.string().optional(),
tool_calls: z.array(z.any()).optional(),
});
const XaiMessagesSchema = z.array(XaiChatMessageSchema);
// Basic chat completions schema
export const XaiV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: XaiMessagesSchema,
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
max_completion_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
max_tokens: z.coerce // Deprecated parameter, but kept for backward compatibility
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
// Grok docs say that `stop` can be a string or array
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
seed: z.number().int().min(0).optional(),
response_format: z
.object({ type: z.enum(["text", "json_object", "json_schema"]), json_schema: z.any().optional() })
.optional(),
// reasoning_effort parameter for grok-3-mini models
reasoning_effort: z.enum(["low", "medium", "high"]).optional().default("low"),
stream_options: z.object({
include_usage: z.boolean()
}).optional(),
user: z.string().optional(),
// Fields to support function calling
tools: z.array(z.any()).optional(),
tool_choice: z.union([
z.string(),
z.object({
type: z.literal("function"),
function: z.object({
name: z.string()
})
})
]).optional(),
// Advanced parameters
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
logprobs: z.boolean().optional().default(false),
top_logprobs: z.number().int().min(0).max(8).optional(),
});
// Image Generation schema
export const XaiV1ImageGenerationsSchema = z.object({
model: z.string().optional(),
prompt: z.string(),
n: z.number().int().min(1).max(10).optional().default(1),
response_format: z.enum(["url", "b64_json"]).optional().default("url"),
user: z.string().optional(),
// These are marked as not supported in the documentation but included for compatibility
quality: z.string().optional(),
size: z.string().optional(),
style: z.string().optional(),
});
// Helper function to convert multimodal content to string format for text-only models
export function contentToString(content: string | any[] | null): string {
if (typeof content === "string") {
return content || "";
} else if (Array.isArray(content)) {
// For multimodal content, extract only the text parts
// Images are not supported in text-only templates
return content
.filter(item => item.type === "text")
.map(item => (item as any).text)
.join("\n\n");
}
return "";
}
+82
View File
@@ -0,0 +1,82 @@
import { Request } from "express";
/**
* Claude Opus 4.1 has stricter API validation that doesn't allow both temperature
* and top_p parameters to be specified simultaneously. This function validates and
* adjusts the request parameters for Claude Opus 4.1 models ONLY.
*
* Rules:
* - If both parameters are at default values (1.0), omit top_p
* - If only one parameter is at default, omit the default one
* - If both are non-default, throw an error
*/
export function validateClaude41OpusParameters(req: Request): void {
const model = req.body.model;
// Only apply this validation to Claude Opus 4.1 models
if (!isClaude41OpusModel(model)) {
return;
}
const temperature = req.body.temperature;
const topP = req.body.top_p;
// If neither parameter is specified, no validation needed
if (temperature === undefined && topP === undefined) {
return;
}
// Default values for Claude API
const DEFAULT_TEMPERATURE = 1.0;
const DEFAULT_TOP_P = 1.0;
const tempIsDefault = temperature === undefined || temperature === DEFAULT_TEMPERATURE;
const topPIsDefault = topP === undefined || topP === DEFAULT_TOP_P;
// If both are at default values, omit top_p (keep temperature)
if (tempIsDefault && topPIsDefault) {
delete req.body.top_p;
req.log?.info("Claude Opus 4.1: Both temperature and top_p at default, omitting top_p");
return;
}
// If only one is at default, omit the default one
if (tempIsDefault && !topPIsDefault) {
delete req.body.temperature;
req.log?.info("Claude Opus 4.1: Temperature at default, omitting temperature");
return;
}
if (!tempIsDefault && topPIsDefault) {
delete req.body.top_p;
req.log?.info("Claude Opus 4.1: top_p at default, omitting top_p");
return;
}
// If both are non-default, throw an error
if (!tempIsDefault && !topPIsDefault) {
throw new Error(
"Claude Opus 4.1 does not support both temperature and top_p parameters being set to non-default values simultaneously. " +
"Please specify only one of these parameters or set one to its default value (1.0)."
);
}
}
/**
* Checks if the given model is a Claude Opus 4.1 model.
* This includes all provider formats for Claude Opus 4.1 ONLY.
*/
function isClaude41OpusModel(model: string): boolean {
if (!model) return false;
// Anthropic API format
if (model.includes("claude-opus-4-1")) return true;
// AWS Bedrock format
if (model.includes("anthropic.claude-opus-4-1")) return true;
// GCP Vertex AI format
if (model.includes("claude-opus-4-1@")) return true;
return false;
}
+40
View File
@@ -0,0 +1,40 @@
export interface ClaudeModelMapping {
awsId: string;
anthropicId: string;
displayName: string;
}
export const claudeModels: ClaudeModelMapping[] = [
{ awsId: "anthropic.claude-v2", anthropicId: "claude-2", displayName: "Claude 2" },
{ awsId: "anthropic.claude-v2:1", anthropicId: "claude-2.1", displayName: "Claude 2.1" },
{ awsId: "anthropic.claude-3-haiku-20240307-v1:0", anthropicId: "claude-3-haiku-20240307", displayName: "Claude 3 Haiku" },
{ awsId: "anthropic.claude-3-5-haiku-20241022-v1:0", anthropicId: "claude-3-5-haiku-20241022", displayName: "Claude 3.5 Haiku" },
{ awsId: "anthropic.claude-3-sonnet-20240229-v1:0", anthropicId: "claude-3-sonnet-20240229", displayName: "Claude 3 Sonnet" },
{ awsId: "anthropic.claude-3-5-sonnet-20240620-v1:0", anthropicId: "claude-3-5-sonnet-20240620", displayName: "Claude 3.5 Sonnet (Old)" },
{ awsId: "anthropic.claude-3-5-sonnet-20241022-v2:0", anthropicId: "claude-3-5-sonnet-20241022", displayName: "Claude 3.5 Sonnet (New)" },
{ awsId: "anthropic.claude-3-5-sonnet-20241022-v2:0", anthropicId: "claude-3-5-sonnet-latest", displayName: "Claude 3.5 Sonnet (Latest)" },
{ awsId: "anthropic.claude-3-7-sonnet-20250219-v1:0", anthropicId: "claude-3-7-sonnet-20250219", displayName: "Claude 3.7 Sonnet" },
{ awsId: "anthropic.claude-3-7-sonnet-20250219-v1:0", anthropicId: "claude-3-7-sonnet-latest", displayName: "Claude 3.7 Sonnet (Latest)" },
{ awsId: "anthropic.claude-3-opus-20240229-v1:0", anthropicId: "claude-3-opus-20240229", displayName: "Claude 3 Opus" },
{ awsId: "anthropic.claude-3-opus-20240229-v1:0", anthropicId: "claude-3-opus-latest", displayName: "Claude 3 Opus (Latest)" },
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-20250514", displayName: "Claude 4 Sonnet" },
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-latest", displayName: "Claude 4 Sonnet (Latest)" },
{ awsId: "anthropic.claude-opus-4-20250514-v1:0", anthropicId: "claude-opus-4-20250514", displayName: "Claude 4.0 Opus" },
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-1-20250805", displayName: "Claude 4.1 Opus" },
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-latest", displayName: "Claude 4 Opus (Latest)" },
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-1", displayName: "Claude 4.1 Opus" },
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-0", displayName: "Claude 4 Sonnet" },
{ awsId: "anthropic.claude-opus-4-20250514-v1:0", anthropicId: "claude-opus-4-0", displayName: "Claude 4.0 Opus" },
];
export function findByAwsId(awsId: string): ClaudeModelMapping | undefined {
return claudeModels.find(model => model.awsId === awsId);
}
export function findByAnthropicId(anthropicId: string): ClaudeModelMapping | undefined {
return claudeModels.find(model => model.anthropicId === anthropicId);
}
export function getAllClaudeModels(): ClaudeModelMapping[] {
return claudeModels;
}
+1
View File
@@ -33,6 +33,7 @@ declare global {
tokenizerInfo: Record<string, any>;
signedRequest: HttpRequest;
modelFamily?: ModelFamily;
isChunkedTransfer?: boolean;
}
}
}
@@ -13,9 +13,19 @@ export type OpenAIImageGenerationResult = {
created: number;
data: {
revised_prompt?: string;
url: string;
b64_json: string;
url?: string; // gpt-image-1 doesn't return URLs, only b64_json
b64_json?: string;
}[];
// Added for gpt-image-1 responses
usage?: {
total_tokens: number;
input_tokens: number;
output_tokens: number;
input_tokens_details?: {
text_tokens: number;
image_tokens: number;
};
};
};
async function downloadImage(url: string) {
@@ -65,11 +75,16 @@ export async function mirrorGeneratedImage(
let mirror: string;
if (item.b64_json) {
mirror = await saveB64Image(item.b64_json);
} else {
} else if (item.url) {
mirror = await downloadImage(item.url);
} else {
req.log.warn("No image data found in response");
continue;
}
// Set the URL to our mirrored version
item.url = `${host}/user_content/${path.basename(mirror)}`;
await createThumbnail(mirror);
// Add to image history with the local URL
addToImageHistory({
url: item.url,
prompt,
+3 -2
View File
@@ -1,6 +1,6 @@
import { RequestHandler } from "express";
import { config } from "../config";
import { getTokenCostUsd, prettyTokens } from "./stats";
import { getTokenCostUsd, getTokenCostDetailsUsd, prettyTokens } from "./stats"; // Added getTokenCostDetailsUsd
import { redactIp } from "./utils";
import * as userStore from "./users/user-store";
@@ -30,7 +30,8 @@ export const injectLocals: RequestHandler = (req, res, next) => {
// view helpers
res.locals.prettyTokens = prettyTokens;
res.locals.tokenCost = getTokenCostUsd;
res.locals.tokenCost = getTokenCostUsd; // Returns total cost as a number
res.locals.tokenCostDetails = getTokenCostDetailsUsd; // Returns { inputCost, outputCost, totalCost }
res.locals.redactIp = redactIp;
next();
@@ -6,9 +6,9 @@ import type { AnthropicKey, AnthropicKeyProvider } from "./provider";
const axios = getAxiosInstance();
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
const KEY_CHECK_PERIOD = 1000 * 60 * 60 * 6; // 6 hours
const KEY_CHECK_PERIOD = 1000 * 60 * 60 * 24; // 24 hours (no reason to do it every 6 hours)
const POST_MESSAGES_URL = "https://api.anthropic.com/v1/messages";
const TEST_MODEL = "claude-3-sonnet-20240229";
const TEST_MODEL = "claude-3-7-sonnet-latest";
const SYSTEM = "Obey all instructions from the user.";
const DETECTION_PROMPT = [
{
@@ -71,10 +71,13 @@ export class AnthropicKeyChecker extends KeyCheckerBase<AnthropicKey> {
// The type is always invalid_request_error, so we have to check the text.
const isOverQuota =
data.error?.message?.match(/usage blocked until/i) ||
data.error?.message?.match(/credit balance is too low/i);
data.error?.message?.match(/credit balance is too low/i) ||
data.error?.message?.match(/reached your specified API usage limits/i) ||
data.error?.message?.match(/You will regain access on/i);
const isDisabled = data.error?.message?.match(
/organization has been disabled/i
);
) ||
data.error?.message?.match(/credential is only authorized for use with Claude Code/i);
if (status === 400 && isOverQuota) {
this.log.warn(
{ key: key.hash, error: data },
+18 -10
View File
@@ -16,11 +16,8 @@ export type AnthropicKeyUpdate = Omit<
| "rateLimitedUntil"
>;
type AnthropicKeyUsage = {
[K in AnthropicModelFamily as `${K}Tokens`]: number;
};
export interface AnthropicKey extends Key, AnthropicKeyUsage {
// AnthropicKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface AnthropicKey extends Key {
readonly service: "anthropic";
readonly modelFamilies: AnthropicModelFamily[];
/**
@@ -120,8 +117,7 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
.digest("hex")
.slice(0, 8)}`,
lastChecked: 0,
claudeTokens: 0,
"claude-opusTokens": 0,
tokenUsage: {}, // Initialize new tokenUsage field
tier: "unknown",
};
this.keys.push(newKey);
@@ -206,11 +202,23 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: AnthropicModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getClaudeModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Ensure the specific family object exists
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
+6
View File
@@ -24,6 +24,10 @@ const KNOWN_MODEL_IDS: ModuleAliasTuple[] = [
["anthropic.claude-3-opus-20240229-v1:0"],
["anthropic.claude-3-5-sonnet-20240620-v1:0"],
["anthropic.claude-3-5-sonnet-20241022-v2:0"],
["anthropic.claude-3-7-sonnet-20250219-v1:0"],
["anthropic.claude-sonnet-4-20250514-v1:0"],
["anthropic.claude-opus-4-20250514-v1:0"],
["anthropic.claude-opus-4-1-20250805-v1:0"],
["mistral.mistral-7b-instruct-v0:2"],
["mistral.mixtral-8x7b-instruct-v0:1"],
["mistral.mistral-large-2402-v1:0"],
@@ -92,6 +96,8 @@ export class AwsKeyChecker extends KeyCheckerBase<AwsBedrockKey> {
protected async testKeyOrFail(key: AwsBedrockKey) {
const isInitialCheck = !key.lastChecked;
// Keys with logging enabled will get rejected in the provider
await this.checkLoggingConfiguration(key);
if (isInitialCheck) {
try {
await this.checkInferenceProfiles(key);
+27 -14
View File
@@ -3,15 +3,13 @@ import { config } from "../../../config";
import { logger } from "../../../logger";
import { PaymentRequiredError } from "../../errors";
import { AwsBedrockModelFamily, getAwsBedrockModelFamily } from "../../models";
import { findByAnthropicId } from "../../claude-models";
import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
import { prioritizeKeys } from "../prioritize-keys";
import { AwsKeyChecker } from "./checker";
type AwsBedrockKeyUsage = {
[K in AwsBedrockModelFamily as `${K}Tokens`]: number;
};
export interface AwsBedrockKey extends Key, AwsBedrockKeyUsage {
// AwsBedrockKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface AwsBedrockKey extends Key {
readonly service: "aws";
readonly modelFamilies: AwsBedrockModelFamily[];
/**
@@ -74,12 +72,7 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
lastChecked: 0,
modelIds: ["anthropic.claude-3-sonnet-20240229-v1:0"],
inferenceProfileIds: [],
["aws-claudeTokens"]: 0,
["aws-claude-opusTokens"]: 0,
["aws-mistral-tinyTokens"]: 0,
["aws-mistral-smallTokens"]: 0,
["aws-mistral-mediumTokens"]: 0,
["aws-mistral-largeTokens"]: 0,
tokenUsage: {}, // Initialize new tokenUsage field
};
this.keys.push(newKey);
}
@@ -104,6 +97,15 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
// Claude 2 is the only model that breaks this convention; Anthropic calls
// it claude-2 but AWS calls it claude-v2.
if (model.includes("claude-2")) neededVariantId = "claude-v2";
// For Claude models, try to resolve aliases to AWS model IDs
if (model.includes("claude") && !model.includes("anthropic.")) {
const claudeMapping = findByAnthropicId(model);
if (claudeMapping) {
neededVariantId = claudeMapping.awsId;
}
}
const neededFamily = getAwsBedrockModelFamily(model);
const availableKeys = this.keys.filter((k) => {
@@ -173,11 +175,22 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: AwsBedrockModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getAwsBedrockModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
+2 -2
View File
@@ -123,7 +123,7 @@ export class AzureOpenAIKeyChecker extends KeyCheckerBase<AzureOpenAIKey> {
AzureOpenAIKeyChecker.getCredentialsFromKey(key);
const url = POST_CHAT_COMPLETIONS(resourceName, deploymentId);
const testRequest = {
max_tokens: 1,
max_completion_tokens: 1,
stream: false,
messages: [{ role: "user", content: "" }],
};
@@ -159,7 +159,7 @@ export class AzureOpenAIKeyChecker extends KeyCheckerBase<AzureOpenAIKey> {
// Try to send an oversized prompt. GPT-4 Turbo can handle this but regular
// GPT-4 will return a Bad Request error.
const contextText = {
max_tokens: 9000,
max_completion_tokens: 9000,
stream: false,
temperature: 0,
seed: 0,
+17 -16
View File
@@ -10,11 +10,8 @@ import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
import { prioritizeKeys } from "../prioritize-keys";
import { AzureOpenAIKeyChecker } from "./checker";
type AzureOpenAIKeyUsage = {
[K in AzureOpenAIModelFamily as `${K}Tokens`]: number;
};
export interface AzureOpenAIKey extends Key, AzureOpenAIKeyUsage {
// AzureOpenAIKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface AzureOpenAIKey extends Key {
readonly service: "azure";
readonly modelFamilies: AzureOpenAIModelFamily[];
contentFiltering: boolean;
@@ -68,14 +65,7 @@ export class AzureOpenAIKeyProvider implements KeyProvider<AzureOpenAIKey> {
.digest("hex")
.slice(0, 8)}`,
lastChecked: 0,
"azure-turboTokens": 0,
"azure-gpt4Tokens": 0,
"azure-gpt4-32kTokens": 0,
"azure-gpt4-turboTokens": 0,
"azure-gpt4oTokens": 0,
"azure-o1Tokens": 0,
"azure-o1-miniTokens": 0,
"azure-dall-eTokens": 0,
tokenUsage: {}, // Initialize new tokenUsage field
modelIds: [],
};
this.keys.push(newKey);
@@ -130,11 +120,22 @@ export class AzureOpenAIKeyProvider implements KeyProvider<AzureOpenAIKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: AzureOpenAIModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getAzureOpenAIModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
+116
View File
@@ -0,0 +1,116 @@
import { CohereKey } from "./provider";
import { logger } from "../../../logger";
import { assertNever } from "../../utils";
const CHECK_TIMEOUT = 10000;
const API_URL = "https://api.cohere.com/v1/check-api-key";
export class CohereKeyChecker {
private log = logger.child({ module: "key-checker", service: "cohere" });
constructor(private readonly update: (hash: string, key: Partial<CohereKey>) => void) {
this.log.info("CohereKeyChecker initialized");
}
public async checkKey(key: CohereKey): Promise<void> {
this.log.info({ hash: key.hash }, "Starting key validation check");
try {
const result = await this.validateKey(key);
this.handleCheckResult(key, result);
} catch (error) {
if (error instanceof Error) {
this.log.warn(
{ error: error.message, stack: error.stack, hash: key.hash },
"Failed to check key status"
);
} else {
this.log.warn(
{ error, hash: key.hash },
"Failed to check key status with unknown error"
);
}
}
}
private async validateKey(key: CohereKey): Promise<"valid" | "invalid" | "quota"> {
const controller = new AbortController();
const timeout = setTimeout(() => {
controller.abort();
this.log.warn({ hash: key.hash }, "Key validation timed out after " + CHECK_TIMEOUT + "ms");
}, CHECK_TIMEOUT);
try {
// Check API key endpoint to verify key validity as per the provided example
const headers = {
"Content-Type": "application/json",
"Authorization": `Bearer ${key.key}`,
"Cohere-Version": "2022-12-06"
};
const response = await fetch(API_URL, {
method: "POST",
headers,
signal: controller.signal,
});
// According to the provided example, we should check for valid:true in the response
const data = await response.json();
if (response.status === 200) {
if (data.valid === true) {
return "valid";
} else {
return "invalid";
}
} else if (response.status === 429) {
return "quota";
} else {
this.log.warn(
{ status: response.status, hash: key.hash },
"Unexpected status code while testing key validity"
);
return "invalid";
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
this.log.warn({ hash: key.hash }, "Key validation aborted");
}
throw error;
} finally {
clearTimeout(timeout);
}
}
private handleCheckResult(
key: CohereKey,
result: "valid" | "invalid" | "quota"
): void {
switch (result) {
case "valid":
this.log.info({ hash: key.hash }, "Key is valid and enabled");
this.update(key.hash, {
isDisabled: false,
lastChecked: Date.now(),
});
break;
case "invalid":
this.log.warn({ hash: key.hash }, "Key is invalid, marking as revoked");
this.update(key.hash, {
isDisabled: true,
isRevoked: true,
lastChecked: Date.now(),
});
break;
case "quota":
this.log.warn({ hash: key.hash }, "Key has exceeded its quota, disabling");
this.update(key.hash, {
isDisabled: true,
isOverQuota: true,
lastChecked: Date.now(),
});
break;
default:
assertNever(result);
}
}
}
@@ -0,0 +1,167 @@
import { Key, KeyProvider, createGenericGetLockoutPeriod } from "..";
import { CohereKeyChecker } from "./checker";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { CohereModelFamily, ModelFamily } from "../../models"; // Added ModelFamily
// CohereKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface CohereKey extends Key {
readonly service: "cohere";
readonly modelFamilies: CohereModelFamily[];
isOverQuota: boolean;
}
export class CohereKeyProvider implements KeyProvider<CohereKey> {
readonly service = "cohere";
private keys: CohereKey[] = [];
private checker?: CohereKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
const keyConfig = config.cohereKey?.trim();
if (!keyConfig) {
return;
}
const keys = keyConfig.split(",").map((k) => k.trim());
for (const key of keys) {
if (!key) continue;
this.keys.push({
key,
service: this.service,
modelFamilies: ["cohere"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
lastChecked: 0,
hash: this.hashKey(key),
rateLimitedAt: 0,
rateLimitedUntil: 0,
tokenUsage: {}, // Initialize new tokenUsage field
isOverQuota: false,
});
}
}
private hashKey(key: string): string {
return require("crypto").createHash("sha256").update(key).digest("hex");
}
public init() {
if (this.keys.length === 0) return;
if (!config.checkKeys) {
this.log.warn(
"Key checking is disabled. Keys will not be verified."
);
return;
}
this.checker = new CohereKeyChecker(this.update.bind(this));
for (const key of this.keys) {
void this.checker.checkKey(key);
}
}
public get(model: string): CohereKey {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No Cohere keys available");
}
const key = availableKeys[Math.floor(Math.random() * availableKeys.length)];
key.lastUsed = Date.now();
this.throttle(key.hash);
return { ...key };
}
public list(): Omit<CohereKey, "key">[] {
return this.keys.map(({ key, ...rest }) => rest);
}
public disable(key: CohereKey): void {
const found = this.keys.find((k) => k.hash === key.hash);
if (found) {
found.isDisabled = true;
}
}
public update(hash: string, update: Partial<CohereKey>): void {
const key = this.keys.find((k) => k.hash === hash);
if (key) {
Object.assign(key, update);
}
}
public available(): number {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(keyHash: string, modelFamily: CohereModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Cohere only has one model family "cohere"
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
/**
* Upon being rate limited, a key will be locked out for this many milliseconds
* while we wait for other concurrent requests to finish.
*/
private static readonly RATE_LIMIT_LOCKOUT = 2000;
/**
* Upon assigning a key, we will wait this many milliseconds before allowing it
* to be used again. This is to prevent the queue from flooding a key with too
* many requests while we wait to learn whether previous ones succeeded.
*/
private static readonly KEY_REUSE_DELAY = 500;
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + CohereKeyProvider.RATE_LIMIT_LOCKOUT;
}
public recheck(): void {
if (!this.checker || !config.checkKeys) return;
for (const key of this.keys) {
this.update(key.hash, {
isOverQuota: false,
isDisabled: false,
lastChecked: 0
});
void this.checker.checkKey(key);
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
* prevent it from being immediately assigned to another request before the
* current one can be dispatched.
**/
private throttle(hash: string) {
const now = Date.now();
const key = this.keys.find((k) => k.hash === hash)!;
const currentRateLimit = key.rateLimitedUntil;
const nextRateLimit = now + CohereKeyProvider.KEY_REUSE_DELAY;
key.rateLimitedAt = now;
key.rateLimitedUntil = Math.max(currentRateLimit, nextRateLimit);
}
}
@@ -0,0 +1,213 @@
import { DeepseekKey } from "./provider";
import { logger } from "../../../logger";
import { assertNever } from "../../utils";
const CHECK_TIMEOUT = 10000;
const SERVER_ERROR_RETRY_DELAY = 5000; // 5 seconds
const MAX_SERVER_ERROR_RETRIES = 2;
const CONNECTION_ERROR_RETRY_DELAY = 10000; // 10 seconds
const MAX_CONNECTION_ERROR_RETRIES = 2; // 3 total attempts (initial + 2 retries)
// Track server error counts for each key
const serverErrorCounts: Record<string, number> = {};
// Track connection error counts for each key
const connectionErrorCounts: Record<string, number> = {};
export class DeepseekKeyChecker {
private log = logger.child({ module: "key-checker", service: "deepseek" });
constructor(private readonly update: (hash: string, key: Partial<DeepseekKey>) => void) {}
public async checkKey(key: DeepseekKey): Promise<void> {
try {
const result = await this.validateKey(key);
// If we get here, reset any connection error counters since the request succeeded
if (connectionErrorCounts[key.hash]) {
delete connectionErrorCounts[key.hash];
}
if (result === "server_error") {
// Increment server error count for this key
const currentCount = (serverErrorCounts[key.hash] || 0) + 1;
serverErrorCounts[key.hash] = currentCount;
if (currentCount <= MAX_SERVER_ERROR_RETRIES) {
// Schedule a retry after delay
this.log.info(
{ hash: key.hash, retryCount: currentCount },
`Server error detected, scheduling retry ${currentCount} of ${MAX_SERVER_ERROR_RETRIES} in ${SERVER_ERROR_RETRY_DELAY/1000} seconds`
);
setTimeout(() => {
this.log.info({ hash: key.hash }, "Retrying key check after server error");
this.checkKey(key);
}, SERVER_ERROR_RETRY_DELAY);
// Just mark as checked for now, but don't disable
this.update(key.hash, {
lastChecked: Date.now(),
});
return;
} else {
// Max retries reached, handle as invalid
this.log.warn(
{ hash: key.hash, retries: currentCount },
"Key failed server error checks multiple times, marking as invalid"
);
// Reset the counter since we're handling it now
delete serverErrorCounts[key.hash];
// Mark as invalid
this.handleCheckResult(key, "invalid");
return;
}
} else {
// If we get a non-server-error result, reset the server error count
if (serverErrorCounts[key.hash]) {
delete serverErrorCounts[key.hash];
}
// Handle the result normally
this.handleCheckResult(key, result);
}
} catch (error) {
// Increment connection error count for this key
const currentCount = (connectionErrorCounts[key.hash] || 0) + 1;
connectionErrorCounts[key.hash] = currentCount;
if (currentCount <= MAX_CONNECTION_ERROR_RETRIES) {
// Schedule a retry after delay
this.log.warn(
{ error, hash: key.hash, retryCount: currentCount },
`Failed to check key status, scheduling retry ${currentCount} of ${MAX_CONNECTION_ERROR_RETRIES} in ${CONNECTION_ERROR_RETRY_DELAY/1000} seconds`
);
setTimeout(() => {
this.log.info({ hash: key.hash }, "Retrying key check after connection error");
this.checkKey(key);
}, CONNECTION_ERROR_RETRY_DELAY);
// Just mark as checked for now, don't change status
this.update(key.hash, {
lastChecked: Date.now(),
});
} else {
// Max retries reached, log final warning
this.log.warn(
{ error, hash: key.hash, retries: currentCount },
"Key failed connection checks multiple times, marking as invalid"
);
// Reset the counter since we're handling it now
delete connectionErrorCounts[key.hash];
// Mark as invalid after exhausting retries
this.update(key.hash, {
isDisabled: true,
isRevoked: true, // Assuming connection failures after retries mean the key is invalid
lastChecked: Date.now(),
});
}
}
}
private async validateKey(key: DeepseekKey): Promise<"valid" | "invalid" | "quota" | "server_error"> {
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), CHECK_TIMEOUT);
try {
const response = await fetch("https://api.deepseek.com/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${key.key}`,
},
body: JSON.stringify({
model: "deepseek-chat",
messages: [{ role: "user", content: "hi" }],
max_tokens: 0,
}),
signal: controller.signal,
});
const rateLimit = {
limit: parseInt(response.headers.get("x-ratelimit-limit") || "200"),
remaining: parseInt(response.headers.get("x-ratelimit-remaining") || "199"),
};
switch (response.status) {
case 400:
this.log.debug(
{ key: key.hash, rateLimit },
"Key check successful, updating rate limit info"
);
return "valid";
case 401:
this.log.warn({ hash: key.hash }, "Key is invalid (authentication failed)");
return "invalid";
case 402:
this.log.warn({ hash: key.hash }, "Key has insufficient balance");
return "quota";
case 429:
this.log.warn({ key: key.hash }, "Key is rate limited");
return "valid";
case 500:
this.log.warn({ hash: key.hash }, "Server error when checking key");
return "server_error";
case 503:
this.log.warn({ hash: key.hash }, "Server overloaded when checking key");
return "server_error";
default:
this.log.warn(
{ status: response.status, hash: key.hash },
"Unexpected status code while checking key"
);
return "valid";
}
} finally {
clearTimeout(timeout);
}
}
private handleCheckResult(
key: DeepseekKey,
result: "valid" | "invalid" | "quota" | "server_error"
): void {
switch (result) {
case "valid":
this.update(key.hash, {
isDisabled: false,
lastChecked: Date.now(),
});
break;
case "invalid":
this.log.warn({ hash: key.hash }, "Key is invalid");
this.update(key.hash, {
isDisabled: true,
isRevoked: true,
lastChecked: Date.now(),
});
break;
case "quota":
this.log.warn({ hash: key.hash }, "Key has exceeded its quota");
this.update(key.hash, {
isDisabled: true,
isOverQuota: true,
lastChecked: Date.now(),
});
break;
case "server_error":
// This case is now handled in the checkKey method with retries
this.log.warn({ hash: key.hash }, "Server error when checking key");
this.update(key.hash, {
lastChecked: Date.now(),
});
break;
default:
assertNever(result);
}
}
}
@@ -0,0 +1,167 @@
import { Key, KeyProvider, createGenericGetLockoutPeriod } from "..";
import { DeepseekKeyChecker } from "./checker";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { DeepseekModelFamily, ModelFamily } from "../../models"; // Added ModelFamily
// DeepseekKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface DeepseekKey extends Key {
readonly service: "deepseek";
readonly modelFamilies: DeepseekModelFamily[];
isOverQuota: boolean;
}
export class DeepseekKeyProvider implements KeyProvider<DeepseekKey> {
readonly service = "deepseek";
private keys: DeepseekKey[] = [];
private checker?: DeepseekKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
const keyConfig = config.deepseekKey?.trim();
if (!keyConfig) {
return;
}
const keys = keyConfig.split(",").map((k) => k.trim());
for (const key of keys) {
if (!key) continue;
this.keys.push({
key,
service: this.service,
modelFamilies: ["deepseek"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
lastChecked: 0,
hash: this.hashKey(key),
rateLimitedAt: 0,
rateLimitedUntil: 0,
tokenUsage: {}, // Initialize new tokenUsage field
isOverQuota: false,
});
}
}
private hashKey(key: string): string {
return require("crypto").createHash("sha256").update(key).digest("hex");
}
public init() {
if (this.keys.length === 0) return;
if (!config.checkKeys) {
this.log.warn(
"Key checking is disabled. Keys will not be verified."
);
return;
}
this.checker = new DeepseekKeyChecker(this.update.bind(this));
for (const key of this.keys) {
void this.checker.checkKey(key);
}
}
public get(model: string): DeepseekKey {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No Deepseek keys available");
}
const key = availableKeys[Math.floor(Math.random() * availableKeys.length)];
key.lastUsed = Date.now();
this.throttle(key.hash);
return { ...key };
}
public list(): Omit<DeepseekKey, "key">[] {
return this.keys.map(({ key, ...rest }) => rest);
}
public disable(key: DeepseekKey): void {
const found = this.keys.find((k) => k.hash === key.hash);
if (found) {
found.isDisabled = true;
}
}
public update(hash: string, update: Partial<DeepseekKey>): void {
const key = this.keys.find((k) => k.hash === hash);
if (key) {
Object.assign(key, update);
}
}
public available(): number {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(keyHash: string, modelFamily: DeepseekModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Deepseek only has one model family "deepseek"
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
/**
* Upon being rate limited, a key will be locked out for this many milliseconds
* while we wait for other concurrent requests to finish.
*/
private static readonly RATE_LIMIT_LOCKOUT = 2000;
/**
* Upon assigning a key, we will wait this many milliseconds before allowing it
* to be used again. This is to prevent the queue from flooding a key with too
* many requests while we wait to learn whether previous ones succeeded.
*/
private static readonly KEY_REUSE_DELAY = 500;
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + DeepseekKeyProvider.RATE_LIMIT_LOCKOUT;
}
public recheck(): void {
if (!this.checker || !config.checkKeys) return;
for (const key of this.keys) {
this.update(key.hash, {
isOverQuota: false,
isDisabled: false,
lastChecked: 0
});
void this.checker.checkKey(key);
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
* prevent it from being immediately assigned to another request before the
* current one can be dispatched.
**/
private throttle(hash: string) {
const now = Date.now();
const key = this.keys.find((k) => k.hash === hash)!;
const currentRateLimit = key.rateLimitedUntil;
const nextRateLimit = now + DeepseekKeyProvider.KEY_REUSE_DELAY;
key.rateLimitedAt = now;
key.rateLimitedUntil = Math.max(currentRateLimit, nextRateLimit);
}
}
+7 -4
View File
@@ -42,19 +42,20 @@ export class GcpKeyChecker extends KeyCheckerBase<GcpKey> {
this.invokeModel("claude-3-haiku@20240307", key, true),
this.invokeModel("claude-3-sonnet@20240229", key, true),
this.invokeModel("claude-3-opus@20240229", key, true),
this.invokeModel("claude-3-5-sonnet@20240620", key, true),
this.invokeModel("claude-opus-4-1@20250805", key, true),
this.invokeModel("claude-3-5-sonnet-v2@20241022", key, true),
];
const [sonnet, haiku, opus, sonnet35] = await Promise.all(checks);
const [sonnet, haiku, opus3, opus41, sonnet35] = await Promise.all(checks);
this.log.debug(
{ key: key.hash, sonnet, haiku, opus, sonnet35 },
{ key: key.hash, sonnet, haiku, opus3, opus41, sonnet35 },
"GCP model initial tests complete."
);
const families: GcpModelFamily[] = [];
if (sonnet || sonnet35 || haiku) families.push("gcp-claude");
if (opus) families.push("gcp-claude-opus");
if (opus3 || opus41) families.push("gcp-claude-opus");
if (families.length === 0) {
this.log.warn(
@@ -78,8 +79,10 @@ export class GcpKeyChecker extends KeyCheckerBase<GcpKey> {
await this.invokeModel("claude-3-sonnet@20240229", key, false);
} else if (key.sonnet35Enabled) {
await this.invokeModel("claude-3-5-sonnet@20240620", key, false);
await this.invokeModel("claude-3-5-sonnet-v2@20241022", key, false);
} else {
await this.invokeModel("claude-3-opus@20240229", key, false);
await this.invokeModel("claude-opus-4-1@20250805", key, false);
}
this.updateKey(key.hash, { lastChecked: Date.now() });
+17 -10
View File
@@ -7,11 +7,8 @@ import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
import { prioritizeKeys } from "../prioritize-keys";
import { GcpKeyChecker } from "./checker";
type GcpKeyUsage = {
[K in GcpModelFamily as `${K}Tokens`]: number;
};
export interface GcpKey extends Key, GcpKeyUsage {
// GcpKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface GcpKey extends Key {
readonly service: "gcp";
readonly modelFamilies: GcpModelFamily[];
sonnetEnabled: boolean;
@@ -75,8 +72,7 @@ export class GcpKeyProvider implements KeyProvider<GcpKey> {
sonnet35Enabled: false,
accessToken: "",
accessTokenExpiresAt: 0,
["gcp-claudeTokens"]: 0,
["gcp-claude-opusTokens"]: 0,
tokenUsage: {}, // Initialize new tokenUsage field
};
this.keys.push(newKey);
}
@@ -160,11 +156,22 @@ export class GcpKeyProvider implements KeyProvider<GcpKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: GcpModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getGcpModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
+132 -35
View File
@@ -11,7 +11,12 @@ const KEY_CHECK_PERIOD = 6 * 60 * 60 * 1000; // 3 hours
const LIST_MODELS_URL =
"https://generativelanguage.googleapis.com/v1beta/models";
const GENERATE_CONTENT_URL =
"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=%KEY%";
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=%KEY%";
const PRO_MODEL_ID = "gemini-2.5-pro";
const GENERATE_PRO_CONTENT_URL =
`https://generativelanguage.googleapis.com/v1beta/models/${PRO_MODEL_ID}:generateContent?key=%KEY%`;
const IMAGEN_BILLING_TEST_URL =
"https://generativelanguage.googleapis.com/v1beta/models/imagen-3.0-generate-002:predict?key=%KEY%";
type ListModelsResponse = {
models: {
@@ -46,12 +51,30 @@ export class GoogleAIKeyChecker extends KeyCheckerBase<GoogleAIKey> {
protected async testKeyOrFail(key: GoogleAIKey) {
const provisionedModels = await this.getProvisionedModels(key);
// Always test flash model access (existing behaviour)
await this.testGenerateContent(key);
const updates = { modelFamilies: provisionedModels };
// Test if billing is enabled for this key
const billingEnabled = await this.testBillingEnabled(key);
// If key claims to support gemini-pro, perform a second layer test with a pro model.
let effectiveFamilies = [...provisionedModels];
if (effectiveFamilies.includes("gemini-pro")) {
const proAccessible = await this.canAccessModel(
key,
GENERATE_PRO_CONTENT_URL
);
if (!proAccessible) {
// Remove pro access if invocation fails
effectiveFamilies = effectiveFamilies.filter((f) => f !== "gemini-pro");
}
}
const updates = { modelFamilies: effectiveFamilies, billingEnabled };
this.updateKey(key.hash, updates);
this.log.info(
{ key: key.hash, models: key.modelFamilies, ids: key.modelIds.length },
{ key: key.hash, models: effectiveFamilies, ids: key.modelIds?.length, billingEnabled },
"Checked key."
);
}
@@ -94,6 +117,57 @@ export class GoogleAIKeyChecker extends KeyCheckerBase<GoogleAIKey> {
);
}
private async canAccessModel(
key: GoogleAIKey,
modelGenerateUrlTemplate: string
): Promise<boolean> {
const payload = {
contents: [{ parts: { text: "hi" }, role: "user" }],
tools: [],
safetySettings: [],
generationConfig: { maxOutputTokens: 5 },
};
try {
await axios.post(
modelGenerateUrlTemplate.replace("%KEY%", key.key),
payload,
{ validateStatus: (status) => status === 200 }
);
return true;
} catch {
return false;
}
}
private async testBillingEnabled(key: GoogleAIKey): Promise<boolean> {
const payload = {
instances: [{ prompt: "" }]
};
try {
const response = await axios.post(
IMAGEN_BILLING_TEST_URL.replace("%KEY%", key.key),
payload,
{ validateStatus: () => true } // Accept all status codes
);
if (response.status === 400) {
const errorMessage = response.data?.error?.message || "";
// If the error message contains the billing requirement, billing is NOT enabled
if (errorMessage.includes("Imagen API is only accessible to billed users at this time")) {
return false;
}
// Other 400 errors indicate billing IS enabled (following Python logic)
return true;
}
// For other status codes, assume no billing (conservative approach)
return false;
} catch (error: any) {
// Network errors or other issues - assume no billing
return false;
}
}
protected handleAxiosError(key: GoogleAIKey, error: AxiosError): void {
if (error.response && GoogleAIKeyChecker.errorIsGoogleAIError(error)) {
const httpStatus = error.response.status;
@@ -103,69 +177,92 @@ export class GoogleAIKeyChecker extends KeyCheckerBase<GoogleAIKey> {
case 400: {
const keyDeadMsgs = [
/please enable billing/i,
/API key not valid/i,
/API key expired/i,
/pass a valid API/i,
/api key not valid/i,
/api key expired/i,
/pass a valid api/i, // This may also indicate an invalid key.
/api key not found/i, // Explicitly for "not found" keys
];
const text = JSON.stringify(error.response.data.error);
if (text.match(keyDeadMsgs.join("|"))) {
if (keyDeadMsgs.some((r) => r.test(text))) {
this.log.warn(
{ key: key.hash, error: text },
"Key check returned a non-transient 400 error. Disabling key."
{ key: key.hash, error: text, errorCode: code, httpStatus },
"Key check returned a 400 error indicating a permanent key issue (e.g., invalid, expired, billing). Disabling and revoking key."
);
this.updateKey(key.hash, { isDisabled: true, isRevoked: true });
return;
}
break;
}
case 401:
case 403:
// If it's a 400 but not a key-revoking message, treat as transient.
this.log.warn(
{ key: key.hash, status, code, message, details },
"Key check returned Forbidden/Unauthorized error. Disabling key."
{ key: key.hash, error: text, errorCode: code, httpStatus },
"Key check returned a generic 400 error. Treating as transient. Rechecking in 1 minute."
);
const recheckInOneMinute = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000);
this.updateKey(key.hash, { lastChecked: recheckInOneMinute });
return;
}
case 401: // Unauthorized
case 403: // Forbidden / Permission Denied
this.log.warn(
{ key: key.hash, status, code, message, details, httpStatus },
"Key check returned Forbidden/Unauthorized error. Disabling and revoking key."
);
this.updateKey(key.hash, { isDisabled: true, isRevoked: true });
return;
case 429: {
case 429: { // Resource Exhausted (Rate Limit / Quota)
const text = JSON.stringify(error.response.data.error);
const keyDeadMsgs = [
/GenerateContentRequestsPerMinutePerProjectPerRegion/i,
/"quota_limit_value":"0"/i,
const hardQuotaMessages = [
/GenerateContentRequestsPerMinutePerProjectPerRegion/i, // Often indicates a hard limit or misconfiguration
/"quota_limit_value":"0"/i, // Explicitly out of quota
/billing account not found/i, // Billing issue presented as 429 sometimes
/project has been suspended/i, // Project level issue
];
if (text.match(keyDeadMsgs.join("|"))) {
if (hardQuotaMessages.some((r) => r.test(text))) {
this.log.warn(
{ key: key.hash, error: text },
"Key check returned a non-transient 429 error. Disabling key."
{ key: key.hash, error: text, errorCode: code, httpStatus },
"Key check returned a 429 error indicating a hard quota limit or billing issue. Disabling and marking as over quota, but not revoking."
);
this.updateKey(key.hash, { isDisabled: true, isRevoked: true });
this.updateKey(key.hash, { isDisabled: true, isRevoked: false, isOverQuota: true });
return;
}
// Transient 429 (e.g., TPM/RPM exceeded)
this.log.warn(
{ key: key.hash, status, code, message, details },
"Key is rate limited. Rechecking key in 1 minute."
{ key: key.hash, status, code, message, details, httpStatus },
"Key is temporarily rate limited (429). Rechecking key in 1 minute."
);
const next = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000);
this.updateKey(key.hash, { lastChecked: next });
const nextTransient429 = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000);
this.updateKey(key.hash, { lastChecked: nextTransient429 });
return;
}
case 500: // Internal Server Error
case 503: // Service Unavailable
case 504: // Deadline Exceeded
this.log.warn(
{ key: key.hash, status, code, message, details, httpStatus },
`Key check encountered a server-side error (${httpStatus}). Treating as transient. Rechecking in 1 minute.`
);
const recheck5xx = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000);
this.updateKey(key.hash, { lastChecked: recheck5xx });
return;
}
// Fallthrough for other unexpected Google AI API errors
this.log.error(
{ key: key.hash, status, code, message, details },
"Encountered unexpected error status while checking key. This may indicate a change in the API; please report this."
{ key: key.hash, status, code, message, details, httpStatus },
"Encountered unexpected Google AI error status while checking key. This may indicate a change in the API. Rechecking in 1 minute."
);
return this.updateKey(key.hash, { lastChecked: Date.now() });
const recheckUnexpected = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000);
this.updateKey(key.hash, { lastChecked: recheckUnexpected });
return;
}
// Network errors (not HTTP errors from Google AI)
this.log.error(
{ key: key.hash, error: error.message },
"Network error while checking key; trying this key again in a minute."
"Network error while checking key; trying this key again in 1 minute."
);
const oneMinute = 10 * 1000;
const next = Date.now() - (KEY_CHECK_PERIOD - oneMinute);
return this.updateKey(key.hash, { lastChecked: next });
const recheckNetworkError = Date.now() - (KEY_CHECK_PERIOD - 60 * 1000); // Corrected to 60 * 1000
return this.updateKey(key.hash, { lastChecked: recheckNetworkError });
}
static errorIsGoogleAIError(
+94 -15
View File
@@ -22,15 +22,18 @@ export type GoogleAIKeyUpdate = Omit<
| "rateLimitedUntil"
>;
type GoogleAIKeyUsage = {
[K in GoogleAIModelFamily as `${K}Tokens`]: number;
};
export interface GoogleAIKey extends Key, GoogleAIKeyUsage {
// GoogleAIKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface GoogleAIKey extends Key {
readonly service: "google-ai";
readonly modelFamilies: GoogleAIModelFamily[];
/** All detected model IDs on this key. */
modelIds: string[];
/** Whether this key is over quota (for any model family). */
isOverQuota?: boolean;
/** Model families that are over quota and need to be excluded. */
overQuotaFamilies?: GoogleAIModelFamily[];
/** Whether this key has billing enabled (required for preview models). */
billingEnabled?: boolean;
}
/**
@@ -45,6 +48,13 @@ const RATE_LIMIT_LOCKOUT = 2000;
*/
const KEY_REUSE_DELAY = 500;
/**
* Determines if a model is a preview model that requires billing-enabled keys.
*/
function isPreviewModel(model: string): boolean {
return model.includes("-preview");
}
export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
readonly service = "google-ai";
@@ -69,6 +79,7 @@ export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
modelFamilies: ["gemini-pro"],
isDisabled: false,
isRevoked: false,
isOverQuota: false,
promptCount: 0,
lastUsed: 0,
rateLimitedAt: 0,
@@ -79,10 +90,10 @@ export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
.digest("hex")
.slice(0, 8)}`,
lastChecked: 0,
"gemini-flashTokens": 0,
"gemini-proTokens": 0,
"gemini-ultraTokens": 0,
tokenUsage: {}, // Initialize new tokenUsage field
modelIds: [],
overQuotaFamilies: [],
billingEnabled: false, // Will be determined during key checking
};
this.keys.push(newKey);
}
@@ -102,11 +113,23 @@ export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
public get(model: string) {
const neededFamily = getGoogleAIModelFamily(model);
const availableKeys = this.keys.filter(
let availableKeys = this.keys.filter(
(k) => !k.isDisabled && k.modelFamilies.includes(neededFamily)
);
if (availableKeys.length === 0) {
throw new PaymentRequiredError("No Google AI keys available");
// For preview models, only use billing-enabled keys
if (isPreviewModel(model)) {
availableKeys = availableKeys.filter((k) => k.billingEnabled === true);
if (availableKeys.length === 0) {
throw new PaymentRequiredError(
"No billing-enabled Google AI keys available for preview models"
);
}
} else {
// For standard models, use any available key
if (availableKeys.length === 0) {
throw new PaymentRequiredError("No Google AI keys available");
}
}
const keysByPriority = prioritizeKeys(availableKeys);
@@ -133,11 +156,22 @@ export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: GoogleAIModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getGoogleAIModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
@@ -157,7 +191,52 @@ export class GoogleAIKeyProvider implements KeyProvider<GoogleAIKey> {
key.rateLimitedUntil = now + RATE_LIMIT_LOCKOUT;
}
public recheck() {}
/**
* Periodically rechecks keys that have been marked as over-quota or disabled
* to see if they can be restored to the rotation.
*/
public recheck() {
// For each key that's either over quota or disabled, reset its status
// so the checker can re-evaluate it
const keysToRecheck = this.keys.filter(k => k.isOverQuota || (k.isDisabled && !k.isRevoked));
if (keysToRecheck.length === 0) {
this.log.debug("No Google AI keys need rechecking");
return;
}
keysToRecheck.forEach(key => {
// Priority to keys marked as overQuota (and not revoked)
if (key.isOverQuota && !key.isRevoked) {
this.log.info(
{ key: key.hash },
"Rechecking over-quota Google AI key. Resetting isOverQuota, isDisabled, and overQuotaFamilies."
);
this.update(key.hash, {
isOverQuota: false,
isDisabled: false, // Was disabled due to being overQuota
lastChecked: 0, // Force a recheck soon
overQuotaFamilies: [] // Clear any specific family quotas
});
}
// Handle other disabled (but not revoked) keys that weren't caught by the isOverQuota condition
else if (key.isDisabled && !key.isRevoked) {
this.log.info(
{ key: key.hash },
"Rechecking disabled (but not revoked or previously over-quota) Google AI key."
);
this.update(key.hash, {
isDisabled: false, // Re-enable for checking
lastChecked: 0 // Force a recheck soon
});
}
});
// Schedule the actual key checking if we have a checker
if (this.checker) {
this.checker.scheduleNextCheck();
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
+16 -2
View File
@@ -6,6 +6,7 @@ export type APIFormat =
| "openai"
| "openai-text"
| "openai-image"
| "openai-responses" // OpenAI Responses API (e.g., for o1-pro, o3-pro)
| "anthropic-chat" // Anthropic's newer messages array format
| "anthropic-text" // Legacy flat string prompt format
| "google-ai"
@@ -35,6 +36,14 @@ export interface Key {
rateLimitedAt: number;
/** The time until which this key is rate limited. */
rateLimitedUntil: number;
/** Detailed token usage, separated by input and output, per model family. */
tokenUsage?: {
[family in ModelFamily]?: {
input: number;
output: number;
legacy_total?: number; // To store migrated single-number totals
};
};
}
/*
@@ -52,12 +61,12 @@ for service-agnostic functionality.
export interface KeyProvider<T extends Key = Key> {
readonly service: LLMService;
init(): void;
get(model: string): T;
get(model: string, streaming?: boolean): T;
list(): Omit<T, "key">[];
disable(key: T): void;
update(hash: string, update: Partial<T>): void;
available(): number;
incrementUsage(hash: string, model: string, tokens: number): void;
incrementUsage(hash: string, modelFamily: ModelFamily, usage: { input: number; output: number }): void;
getLockoutPeriod(model: ModelFamily): number;
markRateLimited(hash: string): void;
recheck(): void;
@@ -92,3 +101,8 @@ export { AzureOpenAIKey } from "./azure/provider";
export { GoogleAIKey } from "././google-ai/provider";
export { MistralAIKey } from "./mistral-ai/provider";
export { OpenAIKey } from "./openai/provider";
export { DeepseekKey } from "./deepseek/provider";
export { XaiKey } from "./xai/provider";
export { CohereKey } from "./cohere/provider";
export { QwenKey } from "./qwen/provider";
export { MoonshotKey } from "./moonshot/provider";
+101 -16
View File
@@ -13,6 +13,11 @@ import { AwsBedrockKeyProvider } from "./aws/provider";
import { GcpKeyProvider, GcpKey } from "./gcp/provider";
import { AzureOpenAIKeyProvider } from "./azure/provider";
import { MistralAIKeyProvider } from "./mistral-ai/provider";
import { DeepseekKeyProvider } from "./deepseek/provider";
import { XaiKeyProvider } from "./xai/provider";
import { CohereKeyProvider } from "./cohere/provider";
import { QwenKeyProvider } from "./qwen/provider";
import { MoonshotKeyProvider } from "./moonshot/provider";
type AllowedPartial = OpenAIKeyUpdate | AnthropicKeyUpdate | Partial<GcpKey>;
@@ -30,6 +35,11 @@ export class KeyPool {
this.keyProviders.push(new AwsBedrockKeyProvider());
this.keyProviders.push(new GcpKeyProvider());
this.keyProviders.push(new AzureOpenAIKeyProvider());
this.keyProviders.push(new DeepseekKeyProvider());
this.keyProviders.push(new XaiKeyProvider());
this.keyProviders.push(new CohereKeyProvider());
this.keyProviders.push(new QwenKeyProvider());
this.keyProviders.push(new MoonshotKeyProvider());
}
public init() {
@@ -43,7 +53,7 @@ export class KeyPool {
this.scheduleRecheck();
}
public get(model: string, service?: LLMService, multimodal?: boolean): Key {
public get(model: string, service?: LLMService, multimodal?: boolean, streaming?: boolean): Key {
// hack for some claude requests needing keys with particular permissions
// even though they use the same models as the non-multimodal requests
if (multimodal) {
@@ -51,7 +61,7 @@ export class KeyPool {
}
const queryService = service || this.getServiceForModel(model);
return this.getKeyProvider(queryService).get(model);
return this.getKeyProvider(queryService).get(model, streaming);
}
public list(): Omit<Key, "key">[] {
@@ -69,7 +79,12 @@ export class KeyPool {
service.update(key.hash, { isRevoked: reason === "revoked" });
if (
service instanceof OpenAIKeyProvider ||
service instanceof AnthropicKeyProvider
service instanceof AnthropicKeyProvider ||
service instanceof DeepseekKeyProvider ||
service instanceof XaiKeyProvider ||
service instanceof CohereKeyProvider ||
service instanceof QwenKeyProvider ||
service instanceof MoonshotKeyProvider
) {
service.update(key.hash, { isOverQuota: reason === "quota" });
}
@@ -96,9 +111,30 @@ export class KeyPool {
}, 0);
}
public incrementUsage(key: Key, model: string, tokens: number): void {
public incrementUsage(key: Key, modelName: string, usage: { input: number; output: number }): void {
const provider = this.getKeyProvider(key.service);
provider.incrementUsage(key.hash, model, tokens);
// Assuming the provider's incrementUsage expects a modelFamily.
// We need a robust way to get modelFamily from modelName here.
// This might involve calling a method similar to getModelFamilyForRequest from user-store,
// or enhancing getServiceForModel to also return family, or passing family directly.
// For now, let's assume the provider can handle the modelName or we derive family.
// This part is tricky as KeyPool's getServiceForModel is for service, not family directly from a generic model string.
// Let's assume for now the provider's incrementUsage can take modelName and derive family,
// or the KeyProvider interface's incrementUsage should take modelName.
// The KeyProvider interface was changed to modelFamily. So we MUST derive it.
// This requires a utility function similar to what's in user-store or models.ts.
// For now, I'll placeholder this derivation. This is a critical point.
// Placeholder: const modelFamily = this.getModelFamilyForModel(modelName, key.service);
// This is complex because getModelFamilyForModel needs the service context.
// Let's assume the `modelName` passed here is actually `modelFamily` for now,
// or that the caller will resolve it.
// The KeyProvider interface expects `modelFamily`. The caller in middleware/response/index.ts
// has `model` (name) and `req.outboundApi`. It should resolve to family there.
// So, `modelName` here should actually be `modelFamily`.
// I will assume the caller of KeyPool.incrementUsage will pass modelFamily.
// So, changing `model: string` to `modelFamily: ModelFamily` in signature.
// This change needs to be propagated to the caller.
provider.incrementUsage(key.hash, modelName as ModelFamily, usage); // Casting modelName, assuming caller provides family
}
public getLockoutPeriod(family: ModelFamily): number {
@@ -127,9 +163,32 @@ export class KeyPool {
const provider = this.getKeyProvider(service);
provider.recheck();
}
/**
* Validates organization verification status for all OpenAI keys and returns detailed results.
* This tests each key that claims to have gpt-image-1 or o3 access by attempting to stream from the o3 model,
* which requires a verified organization. Keys from unverified organizations will have only
* gpt-image-1 access removed from their available model families, as o3 can still be used without streaming.
*/
public async validateGptImageAccess(): Promise<{
total: number;
validated: number;
removed: string[];
verified: string[];
errors: {key: string, error: string}[];
}> {
const provider = this.getKeyProvider("openai");
if (!(provider instanceof OpenAIKeyProvider)) {
throw new Error("OpenAI provider not initialized");
}
return provider.validateGptImageAccess();
}
private getServiceForModel(model: string): LLMService {
if (
if (model.startsWith("deepseek")) {
return "deepseek";
} else if (
model.startsWith("gpt") ||
model.startsWith("text-embedding-ada") ||
model.startsWith("dall-e")
@@ -149,6 +208,14 @@ export class KeyPool {
} else if (model.includes("mistral")) {
// https://docs.mistral.ai/platform/endpoints
return "mistral-ai";
} else if (model.includes("xai")) {
return "xai";
} else if (model.includes("command") || model.includes("cohere")) {
return "cohere";
} else if (model.includes("qwen")) {
return "qwen";
} else if (model.includes("moonshot")) {
return "moonshot";
} else if (model.startsWith("anthropic.claude")) {
// AWS offers models from a few providers
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids-arns.html
@@ -164,8 +231,10 @@ export class KeyPool {
}
/**
* Schedules a periodic recheck of OpenAI keys, which runs every 8 hours on
* a schedule offset by the server's hostname.
* Schedules periodic rechecks of keys:
* - OpenAI keys: every 8 hours
* - Google AI keys: every 1 hour (to handle quota resets more promptly)
* All schedules have an offset based on the server's hostname.
*/
private scheduleRecheck(): void {
const machineHash = crypto
@@ -173,19 +242,35 @@ export class KeyPool {
.update(os.hostname())
.digest("hex");
const offset = parseInt(machineHash, 16) % 7;
const hour = [0, 8, 16].map((h) => h + offset).join(",");
const crontab = `0 ${hour} * * *`;
// OpenAI keys recheck every 8 hours
const openaiHour = [0, 8, 16].map((h) => h + offset).join(",");
const openaiCrontab = `0 ${openaiHour} * * *`;
const job = schedule.scheduleJob(crontab, () => {
const next = job.nextInvocation();
logger.info({ next }, "Performing periodic recheck.");
const openaiJob = schedule.scheduleJob(openaiCrontab, () => {
const next = openaiJob.nextInvocation();
logger.info({ next, service: "openai" }, "Performing periodic OpenAI key recheck.");
this.recheck("openai");
});
logger.info(
{ rule: openaiCrontab, next: openaiJob.nextInvocation(), service: "openai" },
"Scheduled periodic OpenAI key recheck job"
);
this.recheckJobs.openai = openaiJob;
// Schedule hourly recheck for Google AI keys to handle quota resets more quickly
const googleMinute = offset;
const googleCrontab = `${googleMinute} * * * *`; // Run every hour
const googleJob = schedule.scheduleJob(googleCrontab, () => {
const next = googleJob.nextInvocation();
logger.info({ next, service: "google-ai" }, "Performing hourly Google AI key recheck for quota status.");
this.recheck("google-ai");
});
logger.info(
{ rule: crontab, next: job.nextInvocation() },
"Scheduled periodic key recheck job"
{ rule: googleCrontab, next: googleJob.nextInvocation(), service: "google-ai" },
"Scheduled hourly Google AI key recheck job"
);
this.recheckJobs.openai = job;
this.recheckJobs["google-ai"] = googleJob;
}
}
@@ -7,11 +7,8 @@ import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
import { prioritizeKeys } from "../prioritize-keys";
import { MistralAIKeyChecker } from "./checker";
type MistralAIKeyUsage = {
[K in MistralAIModelFamily as `${K}Tokens`]: number;
};
export interface MistralAIKey extends Key, MistralAIKeyUsage {
// MistralAIKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface MistralAIKey extends Key {
readonly service: "mistral-ai";
readonly modelFamilies: MistralAIModelFamily[];
}
@@ -67,10 +64,7 @@ export class MistralAIKeyProvider implements KeyProvider<MistralAIKey> {
.digest("hex")
.slice(0, 8)}`,
lastChecked: 0,
"mistral-tinyTokens": 0,
"mistral-smallTokens": 0,
"mistral-mediumTokens": 0,
"mistral-largeTokens": 0,
tokenUsage: {}, // Initialize new tokenUsage field
};
this.keys.push(newKey);
}
@@ -117,12 +111,22 @@ export class MistralAIKeyProvider implements KeyProvider<MistralAIKey> {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(hash: string, model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
public incrementUsage(keyHash: string, modelFamily: MistralAIModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
const family = getMistralAIModelFamily(model);
key[`${family}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
@@ -0,0 +1,127 @@
import { MoonshotKey } from "./provider";
import { logger } from "../../../logger";
import { assertNever } from "../../utils";
const CHECK_TIMEOUT = 10000;
const API_URL = "https://api.moonshot.cn/v1/users/me/balance";
export class MoonshotKeyChecker {
private log = logger.child({ module: "key-checker", service: "moonshot" });
constructor(private readonly update: (hash: string, key: Partial<MoonshotKey>) => void) {
this.log.info("MoonshotKeyChecker initialized");
}
public async checkKey(key: MoonshotKey): Promise<void> {
this.log.info({ hash: key.hash }, "Starting key validation check");
try {
const result = await this.validateKey(key);
this.handleCheckResult(key, result);
} catch (error) {
if (error instanceof Error) {
this.log.warn(
{ error: error.message, stack: error.stack, hash: key.hash },
"Failed to check key status"
);
} else {
this.log.warn(
{ error, hash: key.hash },
"Failed to check key status with unknown error"
);
}
}
}
private async validateKey(key: MoonshotKey): Promise<"valid" | "invalid" | "quota"> {
const controller = new AbortController();
const timeout = setTimeout(() => {
controller.abort();
this.log.warn({ hash: key.hash }, "Key validation timed out after " + CHECK_TIMEOUT + "ms");
}, CHECK_TIMEOUT);
try {
// Check balance endpoint to verify key validity
const headers = {
"Content-Type": "application/json",
"Authorization": `Bearer ${key.key}`
};
const response = await fetch(API_URL, {
method: "GET",
headers,
signal: controller.signal,
});
if (response.status === 200) {
const data = await response.json();
// Check if response has the expected Moonshot API structure
if (data && data.status === true && data.code === 0 && data.data) {
const balance = data.data.available_balance;
// Check if balance is too low (consider it quota exceeded if balance is 0 or negative)
if (typeof balance === 'number' && balance <= 0) {
return "quota";
}
return "valid";
} else {
this.log.warn(
{ response: data, hash: key.hash },
"Unexpected response format from Moonshot API"
);
return "invalid";
}
} else if (response.status === 401) {
// Unauthorized - invalid key
return "invalid";
} else if (response.status === 429) {
// Rate limit - but key is valid
return "valid";
} else {
this.log.warn(
{ status: response.status, hash: key.hash },
"Unexpected status code while testing key validity"
);
return "invalid";
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
this.log.warn({ hash: key.hash }, "Key validation aborted");
}
throw error;
} finally {
clearTimeout(timeout);
}
}
private handleCheckResult(
key: MoonshotKey,
result: "valid" | "invalid" | "quota"
): void {
switch (result) {
case "valid":
this.log.info({ hash: key.hash }, "Key is valid and enabled");
this.update(key.hash, {
isDisabled: false,
lastChecked: Date.now(),
});
break;
case "invalid":
this.log.warn({ hash: key.hash }, "Key is invalid, marking as revoked");
this.update(key.hash, {
isDisabled: true,
isRevoked: true,
lastChecked: Date.now(),
});
break;
case "quota":
this.log.warn({ hash: key.hash }, "Key has exceeded its quota, disabling");
this.update(key.hash, {
isDisabled: true,
isOverQuota: true,
lastChecked: Date.now(),
});
break;
default:
assertNever(result);
}
}
}
@@ -0,0 +1,2 @@
export { MoonshotKey, MoonshotKeyProvider } from "./provider";
export { MoonshotKeyChecker } from "./checker";
@@ -0,0 +1,166 @@
import { Key, KeyProvider, createGenericGetLockoutPeriod } from "..";
import { MoonshotKeyChecker } from "./checker";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { MoonshotModelFamily, ModelFamily } from "../../models";
export interface MoonshotKey extends Key {
readonly service: "moonshot";
readonly modelFamilies: MoonshotModelFamily[];
isOverQuota: boolean;
}
export class MoonshotKeyProvider implements KeyProvider<MoonshotKey> {
readonly service = "moonshot";
private keys: MoonshotKey[] = [];
private checker?: MoonshotKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
const keyConfig = config.moonshotKey?.trim();
if (!keyConfig) {
return;
}
const keys = keyConfig.split(",").map((k) => k.trim());
for (const key of keys) {
if (!key) continue;
this.keys.push({
key,
service: this.service,
modelFamilies: ["moonshot"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
lastChecked: 0,
hash: this.hashKey(key),
rateLimitedAt: 0,
rateLimitedUntil: 0,
tokenUsage: {},
isOverQuota: false,
});
}
}
private hashKey(key: string): string {
return require("crypto").createHash("sha256").update(key).digest("hex");
}
public init() {
if (this.keys.length === 0) return;
if (!config.checkKeys) {
this.log.warn(
"Key checking is disabled. Keys will not be verified."
);
return;
}
this.checker = new MoonshotKeyChecker(this.update.bind(this));
for (const key of this.keys) {
void this.checker.checkKey(key);
}
}
public get(model: string): MoonshotKey {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No Moonshot keys available");
}
const key = availableKeys[Math.floor(Math.random() * availableKeys.length)];
key.lastUsed = Date.now();
this.throttle(key.hash);
return { ...key };
}
public list(): Omit<MoonshotKey, "key">[] {
return this.keys.map(({ key, ...rest }) => rest);
}
public disable(key: MoonshotKey): void {
const found = this.keys.find((k) => k.hash === key.hash);
if (found) {
found.isDisabled = true;
}
}
public update(hash: string, update: Partial<MoonshotKey>): void {
const key = this.keys.find((k) => k.hash === hash);
if (key) {
Object.assign(key, update);
}
}
public available(): number {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(keyHash: string, modelFamily: MoonshotModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Moonshot only has one model family "moonshot"
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
/**
* Upon being rate limited, a key will be locked out for this many milliseconds
* while we wait for other concurrent requests to finish.
*/
private static readonly RATE_LIMIT_LOCKOUT = 2000;
/**
* Upon assigning a key, we will wait this many milliseconds before allowing it
* to be used again. This is to prevent the queue from flooding a key with too
* many requests while we wait to learn whether previous ones succeeded.
*/
private static readonly KEY_REUSE_DELAY = 500;
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + MoonshotKeyProvider.RATE_LIMIT_LOCKOUT;
}
public recheck(): void {
if (!this.checker || !config.checkKeys) return;
for (const key of this.keys) {
this.update(key.hash, {
isOverQuota: false,
isDisabled: false,
lastChecked: 0
});
void this.checker.checkKey(key);
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
* prevent it from being immediately assigned to another request before the
* current one can be dispatched.
**/
private throttle(hash: string) {
const now = Date.now();
const key = this.keys.find((k) => k.hash === hash)!;
const currentRateLimit = key.rateLimitedUntil;
const nextRateLimit = now + MoonshotKeyProvider.KEY_REUSE_DELAY;
key.rateLimitedAt = now;
key.rateLimitedUntil = Math.max(currentRateLimit, nextRateLimit);
}
}
+162 -8
View File
@@ -1,23 +1,24 @@
import { AxiosError } from "axios";
import { KeyCheckerBase } from "../key-checker-base";
import type { OpenAIKey, OpenAIKeyProvider } from "./provider";
import type { OpenAIKey, OpenAIKeyProvider, OpenAIKeyUpdate } from "./provider";
import { OpenAIModelFamily, getOpenAIModelFamily } from "../../models";
import { getAxiosInstance } from "../../network";
const axios = getAxiosInstance();
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
const KEY_CHECK_PERIOD = 60 * 60 * 1000; // 1 hour
const KEY_CHECK_PERIOD = 5 * 60 * 60 * 1000; // 5 hours
const POST_CHAT_COMPLETIONS_URL = "https://api.openai.com/v1/chat/completions";
const POST_IMAGE_GENERATIONS_URL = "https://api.openai.com/v1/images/generations";
const GET_MODELS_URL = "https://api.openai.com/v1/models";
const GET_ORGANIZATIONS_URL = "https://api.openai.com/v1/organizations";
const GET_ORGANIZATIONS_URL = "https://api.openai.com/v1/me";
type GetModelsResponse = {
data: [{ id: string }];
};
type GetOrganizationsResponse = {
data: [{ id: string; is_default: boolean }];
orgs: {data: [{ id: string; is_default: boolean }]};
};
type OpenAIError = {
@@ -50,10 +51,40 @@ export class OpenAIKeyChecker extends KeyCheckerBase<OpenAIKey> {
this.testLiveness(key),
this.maybeCreateOrganizationClones(key),
]);
const updates = {
const updates: OpenAIKeyUpdate = {
modelFamilies: provisionedModels,
isTrial: livenessTest.rateLimit <= 250,
};
// Test organization verification status for all keys
// This is needed for GPT-5, o1, o3, and gpt-image-1 streaming restrictions
try {
const isVerifiedOrg = await this.testVerifiedOrg(key);
// Always set the organizationVerified field for all keys
updates.organizationVerified = isVerifiedOrg;
// Only remove gpt-image from unverified orgs if they have it
if (!isVerifiedOrg && provisionedModels.includes("gpt-image")) {
const updatedFamilies = provisionedModels.filter(family => family !== "gpt-image");
updates.modelFamilies = updatedFamilies;
this.log.warn({ key: key.hash }, "Key's organization is not verified. Removing gpt-image-1 from available models.");
}
if (isVerifiedOrg) {
this.log.info({ key: key.hash }, "Verified organization status for key. Can use streaming for GPT-5, o1, o3, and gpt-image-1.");
} else {
this.log.warn({ key: key.hash }, "Key's organization is not verified. Streaming restricted for GPT-5, o1, o3, and gpt-image-1.");
}
} catch (error) {
// If test fails, assume no access to be safe
updates.organizationVerified = false;
if (provisionedModels.includes("gpt-image")) {
const updatedFamilies = provisionedModels.filter(family => family !== "gpt-image");
updates.modelFamilies = updatedFamilies;
}
this.log.error({ key: key.hash, error }, "Error testing organization verification status. Assuming not verified for safety.");
}
this.updateKey(key.hash, updates);
} else {
// No updates needed as models and trial status generally don't change.
@@ -105,7 +136,7 @@ export class OpenAIKeyChecker extends KeyCheckerBase<OpenAIKey> {
GET_ORGANIZATIONS_URL,
opts
);
const organizations = data.data;
const organizations = data.orgs.data;
const defaultOrg = organizations.find(({ is_default }) => is_default);
this.updateKey(key.hash, { organizationId: defaultOrg?.id });
if (organizations.length <= 1) return;
@@ -288,7 +319,7 @@ export class OpenAIKeyChecker extends KeyCheckerBase<OpenAIKey> {
payload,
{
headers: OpenAIKeyChecker.getHeaders(key),
validateStatus: (status) => status === 400,
validateStatus: (status) => status === 404,
}
);
const rateLimitHeader = headers["x-ratelimit-limit-requests"];
@@ -298,7 +329,7 @@ export class OpenAIKeyChecker extends KeyCheckerBase<OpenAIKey> {
if (data.error.type !== "invalid_request_error") {
this.log.warn(
{ key: key.hash, error: data },
"Unexpected 400 error class while checking key; assuming key is valid, but this may indicate a change in the API."
"Unexpected 404 error class while checking key; assuming key is valid, but this may indicate a change in the API."
);
}
return { rateLimit };
@@ -311,6 +342,129 @@ export class OpenAIKeyChecker extends KeyCheckerBase<OpenAIKey> {
return data?.error?.type;
}
/**
* Tests whether the key's organization is verified by attempting to stream from the gpt-5-mini model.
* Only verified organizations can stream from GPT-5 models, so this is a reliable test for both
* GPT-5 streaming and gpt-image-1 access (which also requires verified organization status).
* Returns true if the organization is verified.
*/
public async testVerifiedOrg(key: OpenAIKey): Promise<boolean> {
this.log.info({ key: key.hash }, "Testing organization verification status via gpt-5-mini streaming");
try {
const payload = {
model: "gpt-5",
messages: [{ role: "user", content: "Hi" }],
max_completion_tokens: 1,
stream: true
};
// Make a minimal streaming request to check organization verification
const response = await axios.post(
POST_CHAT_COMPLETIONS_URL,
payload,
{
headers: OpenAIKeyChecker.getHeaders(key),
validateStatus: (status) => true, // Accept any status code to inspect errors
timeout: 30000, // 30 second timeout
signal: AbortSignal.timeout(30000)
}
);
// If we get a 200 response, the organization is verified
if (response.status === 200) {
this.log.info(
{ key: key.hash, status: response.status },
`Organization is verified. Streaming gpt-5-mini request succeeded with status code ${response.status}`
);
return true;
}
// Check for specific error responses that indicate unverified organization
const data = response.data as any;
const errorMessage = data?.error?.message || '';
// Explicitly check for organization verification errors
if (errorMessage.includes("organization must be verified")) {
this.log.warn(
{ key: key.hash, status: response.status, error: errorMessage },
"Organization is not verified: verification required for streaming gpt-5-mini"
);
return false;
}
// If we get a 400 error but it's not about verification, the organization might be verified
// but there's another issue with the request
if (response.status === 400 && !errorMessage.includes("organization must be verified")) {
// Check if the error is specifically about the 'stream' parameter
if (errorMessage.includes("stream") && errorMessage.includes("unsupported_value")) {
this.log.warn(
{ key: key.hash, status: response.status, error: errorMessage },
"Organization is not verified: cannot stream with gpt-5-mini"
);
return false;
}
// If it's some other validation error, the organization might be verified
this.log.info(
{ key: key.hash, status: response.status, error: errorMessage },
"Got 400 error but not related to organization verification. Assuming organization is verified."
);
return true;
}
// For other status codes, log the issue but assume unverified
this.log.warn(
{ key: key.hash, status: response.status, error: errorMessage },
"Unexpected response when testing organization verification, assuming not verified"
);
return false;
} catch (error) {
// Handle network errors or request failures
if (error instanceof AxiosError && error.response) {
const status = error.response.status;
const data = error.response.data as any;
const errorMessage = data?.error?.message || 'Unknown error';
// Check for specific error messages related to organization verification
if (errorMessage.includes("organization must be verified")) {
this.log.warn(
{ key: key.hash, status, error: errorMessage },
"Organization is not verified based on error message"
);
return false;
}
// If we get a 400 error but it's not about verification, the organization might be verified
if (status === 400 && !errorMessage.includes("organization must be verified")) {
// Check if the error is specifically about the 'stream' parameter
if (errorMessage.includes("stream") && errorMessage.includes("unsupported_value")) {
this.log.warn(
{ key: key.hash, status, error: errorMessage },
"Organization is not verified: cannot stream with gpt-5-mini"
);
return false;
}
// If it's some other validation error, the organization might be verified
this.log.info(
{ key: key.hash, status, error: errorMessage },
"Got 400 error but not related to organization verification. Assuming organization is verified."
);
return true;
}
}
// For all other errors, assume unverified for safety
this.log.error(
{ key: key.hash, error: error instanceof Error ? error.message : String(error) },
"Error testing organization verification status. Assuming not verified for safety."
);
return false;
}
}
static getHeaders(key: OpenAIKey) {
const useOrg = !key.key.includes("svcacct");
return {
+205 -24
View File
@@ -3,16 +3,13 @@ import http from "http";
import { Key, KeyProvider } from "../index";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { getOpenAIModelFamily, OpenAIModelFamily } from "../../models";
import { getOpenAIModelFamily, OpenAIModelFamily, ModelFamily } from "../../models"; // Added ModelFamily
import { PaymentRequiredError } from "../../errors";
import { OpenAIKeyChecker } from "./checker";
import { prioritizeKeys } from "../prioritize-keys";
type OpenAIKeyUsage = {
[K in OpenAIModelFamily as `${K}Tokens`]: number;
};
export interface OpenAIKey extends Key, OpenAIKeyUsage {
// OpenAIKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface OpenAIKey extends Key {
readonly service: "openai";
modelFamilies: OpenAIModelFamily[];
/**
@@ -23,6 +20,8 @@ export interface OpenAIKey extends Key, OpenAIKeyUsage {
organizationId?: string;
/** Whether this is a free trial key. These are prioritized over paid keys if they can fulfill the request. */
isTrial: boolean;
/** Whether the organization associated with this key is verified. Verified organizations can use streaming for GPT-5 models and gpt-image-1. */
organizationVerified?: boolean;
/** Set when key check returns a non-transient 429. */
isOverQuota: boolean;
/**
@@ -90,6 +89,14 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
"gpt4" as const,
"gpt4-turbo" as const,
"gpt4o" as const,
"gpt45" as const,
"gpt41" as const,
"gpt41-mini" as const,
"gpt41-nano" as const,
"gpt5" as const,
"gpt5-mini" as const,
"gpt5-nano" as const,
"gpt5-chat-latest" as const,
],
isTrial: false,
isDisabled: false,
@@ -107,14 +114,7 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
rateLimitedUntil: 0,
rateLimitRequestsReset: 0,
rateLimitTokensReset: 0,
turboTokens: 0,
gpt4Tokens: 0,
"gpt4-32kTokens": 0,
"gpt4-turboTokens": 0,
gpt4oTokens: 0,
"o1Tokens": 0,
"o1-miniTokens": 0,
"dall-eTokens": 0,
tokenUsage: {}, // Initialize new tokenUsage field
modelIds: [],
};
this.keys.push(newKey);
@@ -139,22 +139,97 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
return this.keys.map((key) => Object.freeze({ ...key, key: undefined }));
}
public get(requestModel: string) {
public get(requestModel: string, streaming?: boolean) {
let model = requestModel;
const neededFamily = getOpenAIModelFamily(model);
const excludeTrials = model === "text-embedding-ada-002";
const isGptImageRequest = neededFamily === "gpt-image";
// GPT-5 models (gpt-5, gpt-5-mini, gpt-5-nano) require verified keys for streaming
const isGpt5Model = /^gpt-5(-mini|-nano)?(-\d{4}-\d{2}-\d{2})?$/.test(model);
const isO1Model = /^o1(-mini|-preview)?(-\d{4}-\d{2}-\d{2})?$/.test(model);
const isO3Model = /^o3(-mini)?(-\d{4}-\d{2}-\d{2})?$/.test(model);
const isO4MiniModel = /^o4-mini(-\d{4}-\d{2}-\d{2})?$/.test(model);
const requiresVerifiedStreaming = (isGpt5Model || isO1Model || isO3Model || isO4MiniModel) && streaming;
const availableKeys = this.keys.filter(
// Allow keys which
(key) =>
!key.isDisabled && // are not disabled
key.modelFamilies.includes(neededFamily) && // have access to the model family we need
(!excludeTrials || !key.isTrial) && // and are not trials if we don't want them
(!config.checkKeys || key.modelIds.includes(model)) // and have the specific snapshot we need
// First, filter keys based on basic criteria
let availableKeys = this.keys.filter(
(key) =>
!key.isDisabled && // not disabled
key.modelFamilies.includes(neededFamily) && // has access to the model family we need
(!excludeTrials || !key.isTrial) && // not a trial if we don't want trials
(!config.checkKeys || key.modelIds.includes(model)) // has the specific snapshot if needed
);
// For gpt-image requests, we need an additional verification step
// Only keys from verified organizations can use gpt-image-1
if (isGptImageRequest) {
this.log.debug(
{ model, keyCount: availableKeys.length },
"Filtering keys for gpt-image request to ensure verified organization status"
);
// Log the keys that claim to have gpt-image access for debugging
availableKeys.forEach(key => {
this.log.debug(
{ keyHash: key.hash, modelFamilies: key.modelFamilies, orgId: key.organizationId },
"Key with gpt-image access"
);
});
// Filter to only include keys from verified organizations
// Use the organizationVerified field which is set by the key checker
const verifiedKeys = availableKeys.filter(key => key.organizationVerified === true);
if (verifiedKeys.length > 0) {
this.log.info(
{ model, totalKeys: availableKeys.length, verifiedKeys: verifiedKeys.length },
"Using only verified organization keys for gpt-image request"
);
availableKeys = verifiedKeys;
} else {
this.log.warn(
{ model, totalKeys: availableKeys.length },
"No verified organization keys available for gpt-image request"
);
}
}
// For streaming requests with models that require verified organizations
// GPT-5, o1, o3, and o4-mini models require verified organizations for streaming
if (requiresVerifiedStreaming) {
this.log.debug(
{ model, keyCount: availableKeys.length, streaming },
"Filtering keys for streaming request to ensure verified organization status"
);
// Filter to only include keys from verified organizations
// Use the organizationVerified field which is set by the key checker
const verifiedKeys = availableKeys.filter(key => key.organizationVerified === true);
if (verifiedKeys.length > 0) {
this.log.info(
{ model, totalKeys: availableKeys.length, verifiedKeys: verifiedKeys.length, streaming },
"Using only verified organization keys for streaming request"
);
availableKeys = verifiedKeys;
} else {
this.log.warn(
{ model, totalKeys: availableKeys.length, streaming },
"No verified organization keys available for streaming request"
);
// Set availableKeys to empty array to trigger the error below
availableKeys = [];
}
}
if (availableKeys.length === 0) {
if (requiresVerifiedStreaming) {
throw new PaymentRequiredError(
"No verified OpenAI keys available for streaming GPT-5, o1, o3, or o4-mini models. Only verified organizations can stream these models. Please disable streaming or contact support to verify your organization."
);
}
throw new PaymentRequiredError(
`No OpenAI keys available for model ${model}`
);
@@ -200,7 +275,18 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
);
return clone;
});
// Add the clones to the key pool
this.keys.push(...clones);
// Log the total number of keys after cloning
this.log.info(
{ totalKeys: this.keys.length, newClones: clones.length },
"Added cloned keys to the key pool"
);
// Return the clones so they can be checked immediately if needed
return clones;
}
/** Disables a key, or does nothing if the key isn't in this pool. */
@@ -279,11 +365,22 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
key.rateLimitedUntil = now + key.rateLimitRequestsReset;
}
public incrementUsage(keyHash: string, model: string, tokens: number) {
public incrementUsage(keyHash: string, modelFamily: OpenAIModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
key[`${getOpenAIModelFamily(model)}Tokens`] += tokens;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
public updateRateLimits(keyHash: string, headers: http.IncomingHttpHeaders) {
@@ -323,6 +420,90 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
});
this.checker?.scheduleNextCheck();
}
/**
* Explicitly tests all keys for organization verification status and returns detailed results.
* This checks if the organization is verified, which is required for both gpt-image-1 access
* and o3 streaming capabilities.
*/
public async validateGptImageAccess(): Promise<{
total: number;
validated: number;
removed: string[];
verified: string[];
errors: {key: string, error: string}[];
}> {
if (!this.checker) {
throw new Error("Key checker not initialized");
}
const results = {
total: this.keys.length,
validated: 0,
removed: [] as string[],
verified: [] as string[],
errors: [] as {key: string, error: string}[]
};
this.log.info({ keyCount: this.keys.length }, "Starting organization verification check for all OpenAI keys");
// Process keys sequentially to avoid hitting rate limits
for (const key of this.keys) {
try {
// Skip keys that are already disabled
if (key.isDisabled || key.isRevoked) {
this.log.debug({ key: key.hash }, "Skipping disabled/revoked key");
continue;
}
// Check if the key claims to have gpt-image-1 or o3 access
const hasGptImageFamily = key.modelFamilies.includes("gpt-image");
const hasO3Family = key.modelFamilies.includes("o3");
if (hasGptImageFamily || hasO3Family) {
// Test the key's organization verification status using o3 streaming
const isVerifiedOrg = await this.checker.testVerifiedOrg(key);
results.validated++;
if (!isVerifiedOrg) {
// Only remove gpt-image from unverified orgs - they can still use o3, just not stream it
const updatedFamilies = key.modelFamilies.filter(family => family !== "gpt-image");
this.update(key.hash, { modelFamilies: updatedFamilies });
results.removed.push(key.hash);
this.log.warn({ key: key.hash }, "Key's organization is not verified. Removing gpt-image-1 from available models.");
} else {
results.verified.push(key.hash);
this.log.info({ key: key.hash }, "Verified organization status for key. Can use gpt-image-1 and o3 streaming.");
}
} else {
this.log.debug({ key: key.hash }, "Key does not claim gpt-image-1 or o3 access. Skipping verification.");
}
} catch (error) {
results.errors.push({ key: key.hash, error: error.message });
this.log.error({ key: key.hash, error }, "Error validating organization verification status");
// If a key errors during validation, only remove gpt-image access to be safe
if (key.modelFamilies.includes("gpt-image")) {
const updatedFamilies = key.modelFamilies.filter(family => family !== "gpt-image");
this.update(key.hash, { modelFamilies: updatedFamilies });
results.removed.push(key.hash);
}
}
// Delay between checks to avoid hitting rate limits
await new Promise(resolve => setTimeout(resolve, 500));
}
this.log.info({
total: results.total,
validated: results.validated,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length
}, "Completed organization verification check");
return results;
}
/**
* Called when a key is selected for a request, briefly disabling it to
+144
View File
@@ -0,0 +1,144 @@
import { Key } from "..";
import { QwenModelFamily } from "../../models";
// Define the QwenKey interface here to avoid circular dependency
export interface QwenKey extends Key {
readonly service: "qwen";
readonly modelFamilies: QwenModelFamily[];
isOverQuota: boolean;
// "qwenTokens" is removed, tokenUsage from base Key interface will be used.
}
import { logger } from "../../../logger";
import { assertNever } from "../../utils";
const CHECK_TIMEOUT = 10000;
const API_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/chat/completions";
export class QwenKeyChecker {
private log = logger.child({ module: "key-checker", service: "qwen" });
constructor(private readonly update: (hash: string, key: Partial<QwenKey>) => void) {
this.log.info("QwenKeyChecker initialized");
}
public async checkKey(key: QwenKey): Promise<void> {
this.log.info({ hash: key.hash }, "Starting key validation check");
try {
const result = await this.validateKey(key);
this.handleCheckResult(key, result);
} catch (error) {
if (error instanceof Error) {
this.log.warn(
{ error: error.message, stack: error.stack, hash: key.hash },
"Failed to check key status"
);
} else {
this.log.warn(
{ error, hash: key.hash },
"Failed to check key status with unknown error"
);
}
}
}
private async validateKey(key: QwenKey): Promise<"valid" | "invalid" | "quota"> {
const controller = new AbortController();
const timeout = setTimeout(() => {
controller.abort();
this.log.warn({ hash: key.hash }, "Key validation timed out after " + CHECK_TIMEOUT + "ms");
}, CHECK_TIMEOUT);
try {
// Simple test request to check if the key is valid
const headers = {
"Content-Type": "application/json",
"Authorization": `Bearer ${key.key}`
};
const body = {
model: "qwen-turbo",
max_tokens: 5,
temperature: 0.2,
messages: [
{
role: "user",
content: "Hello"
}
]
};
const response = await fetch(API_URL, {
method: "POST",
headers,
body: JSON.stringify(body),
signal: controller.signal,
});
// Check response status
if (response.status === 200) {
return "valid";
} else if (response.status === 401) {
// Invalid API key
return "invalid";
} else if (response.status === 429) {
// Rate limit or quota exceeded
const responseBody = await response.json();
const errorMsg = responseBody?.error?.message || "";
// Check if it's a quota issue or just rate limiting
if (errorMsg.includes("quota") || errorMsg.includes("billing")) {
return "quota";
}
// Otherwise it's just rate limited, still valid
return "valid";
} else {
this.log.warn(
{ status: response.status, hash: key.hash },
"Unexpected status code while testing key validity"
);
return "invalid";
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
this.log.warn({ hash: key.hash }, "Key validation aborted");
}
throw error;
} finally {
clearTimeout(timeout);
}
}
private handleCheckResult(
key: QwenKey,
result: "valid" | "invalid" | "quota"
): void {
switch (result) {
case "valid":
this.log.info({ hash: key.hash }, "Key is valid and enabled");
this.update(key.hash, {
isDisabled: false,
lastChecked: Date.now(),
});
break;
case "invalid":
this.log.warn({ hash: key.hash }, "Key is invalid, marking as revoked");
this.update(key.hash, {
isDisabled: true,
isRevoked: true,
lastChecked: Date.now(),
});
break;
case "quota":
this.log.warn({ hash: key.hash }, "Key has exceeded its quota, disabling");
this.update(key.hash, {
isDisabled: true,
isOverQuota: true,
lastChecked: Date.now(),
});
break;
default:
assertNever(result);
}
}
}
+9
View File
@@ -0,0 +1,9 @@
import { QwenKeyProvider } from "./provider";
// Export only the provider and the checker, not the QwenKey interface directly
export { QwenKeyProvider } from "./provider";
export { QwenKeyChecker } from "./checker";
// Re-export the QwenKey interface from provider to maintain compatibility
export type { QwenKey } from "./provider";
export const qwenKeyProvider = new QwenKeyProvider();
+165
View File
@@ -0,0 +1,165 @@
import { KeyProvider, createGenericGetLockoutPeriod } from "..";
import { QwenKeyChecker, QwenKey } from "./checker";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { QwenModelFamily, ModelFamily } from "../../models"; // Added ModelFamily
// Re-export the QwenKey interface
export type { QwenKey } from "./checker";
export class QwenKeyProvider implements KeyProvider<QwenKey> {
readonly service = "qwen";
private keys: QwenKey[] = [];
private checker?: QwenKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
// Access the qwenKey property from config using indexing to avoid TypeScript error
// since the property was added dynamically
const keyConfig = (config as any)["qwenKey"]?.trim();
if (!keyConfig) {
return;
}
const keys = keyConfig.split(",").map((k: string) => k.trim());
for (const key of keys) {
if (!key) continue;
this.keys.push({
key,
service: this.service,
modelFamilies: ["qwen"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
lastChecked: 0,
hash: this.hashKey(key),
rateLimitedAt: 0,
rateLimitedUntil: 0,
tokenUsage: {}, // Initialize new tokenUsage field
isOverQuota: false,
});
}
}
private hashKey(key: string): string {
return require("crypto").createHash("sha256").update(key).digest("hex");
}
public init() {
if (this.keys.length === 0) return;
if (!config.checkKeys) {
this.log.warn(
"Key checking is disabled. Keys will not be verified."
);
return;
}
this.checker = new QwenKeyChecker(this.update.bind(this));
for (const key of this.keys) {
void this.checker.checkKey(key);
}
}
public get(model: string): QwenKey {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No Qwen keys available");
}
const key = availableKeys[Math.floor(Math.random() * availableKeys.length)];
key.lastUsed = Date.now();
this.throttle(key.hash);
return { ...key };
}
public list(): Omit<QwenKey, "key">[] {
return this.keys.map(({ key, ...rest }) => rest);
}
public disable(key: QwenKey): void {
const found = this.keys.find((k) => k.hash === key.hash);
if (found) {
found.isDisabled = true;
}
}
public update(hash: string, update: Partial<QwenKey>): void {
const key = this.keys.find((k) => k.hash === hash);
if (key) {
Object.assign(key, update);
}
}
public available(): number {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(keyHash: string, modelFamily: QwenModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Qwen only has one model family "qwen"
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
/**
* Upon being rate limited, a key will be locked out for this many milliseconds
* while we wait for other concurrent requests to finish.
*/
private static readonly RATE_LIMIT_LOCKOUT = 2000;
/**
* Upon assigning a key, we will wait this many milliseconds before allowing it
* to be used again. This is to prevent the queue from flooding a key with too
* many requests while we wait to learn whether previous ones succeeded.
*/
private static readonly KEY_REUSE_DELAY = 500;
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + QwenKeyProvider.RATE_LIMIT_LOCKOUT;
}
public recheck(): void {
if (!this.checker || !config.checkKeys) return;
for (const key of this.keys) {
this.update(key.hash, {
isOverQuota: false,
isDisabled: false,
lastChecked: 0
});
void this.checker.checkKey(key);
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
* prevent it from being immediately assigned to another request before the
* current one can be dispatched.
**/
private throttle(hash: string) {
const now = Date.now();
const key = this.keys.find((k) => k.hash === hash)!;
const currentRateLimit = key.rateLimitedUntil;
const nextRateLimit = now + QwenKeyProvider.KEY_REUSE_DELAY;
key.rateLimitedAt = now;
key.rateLimitedUntil = Math.max(currentRateLimit, nextRateLimit);
}
}
+138
View File
@@ -0,0 +1,138 @@
import { XaiKey } from "./provider";
import { logger } from "../../../logger";
import { assertNever } from "../../utils";
const CHECK_TIMEOUT = 10000;
export class XaiKeyChecker {
private log = logger.child({ module: "key-checker", service: "xai" });
constructor(private readonly update: (hash: string, key: Partial<XaiKey>) => void) {
this.log.info("XaiKeyChecker initialized");
}
public async checkKey(key: XaiKey): Promise<void> {
this.log.info({ hash: key.hash }, "Starting key validation check");
try {
const result = await this.validateKey(key);
this.handleCheckResult(key, result);
} catch (error) {
if (error instanceof Error) {
this.log.warn(
{ error: error.message, stack: error.stack, hash: key.hash },
"Failed to check key status"
);
} else {
this.log.warn(
{ error, hash: key.hash },
"Failed to check key status with unknown error"
);
}
}
}
private async validateKey(key: XaiKey): Promise<"valid" | "invalid" | "quota"> {
const controller = new AbortController();
const timeout = setTimeout(() => {
controller.abort();
this.log.warn({ hash: key.hash }, "Key validation timed out after " + CHECK_TIMEOUT + "ms");
}, CHECK_TIMEOUT);
try {
// First check API key endpoint to verify key validity
const apiResponse = await fetch("https://api.x.ai/v1/api-key", {
method: "GET",
headers: {
Authorization: `Bearer ${key.key}`,
},
signal: controller.signal,
});
if (apiResponse.status !== 200) {
// Key is invalid or has some other issue
return "invalid";
}
const apiData = await apiResponse.json();
const isBlocked = apiData.team_blocked || apiData.api_key_blocked || apiData.api_key_disabled;
if (isBlocked) {
return "invalid";
}
// If the key passed the first check, test a minimal API call to verify quota
const testResponse = await fetch("https://api.x.ai/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${key.key}`,
},
body: JSON.stringify({
messages: [],
model: "grok-3-mini-latest",
frequency_penalty: -3.0,
}),
signal: controller.signal,
});
// If we get 400 or 200, the key is valid (400 might be parameter error but key is valid)
if (testResponse.status === 400 || testResponse.status === 200) {
return "valid";
} else if (testResponse.status === 429) {
return "quota";
} else if (testResponse.status === 403) {
this.log.warn(
{ status: testResponse.status, hash: key.hash },
"Forbidden (403) response, key is invalid"
);
return "invalid";
} else {
this.log.warn(
{ status: testResponse.status, hash: key.hash },
"Unexpected status code while testing key usage"
);
return "invalid";
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
this.log.warn({ hash: key.hash }, "Key validation aborted");
}
throw error;
} finally {
clearTimeout(timeout);
}
}
private handleCheckResult(
key: XaiKey,
result: "valid" | "invalid" | "quota"
): void {
switch (result) {
case "valid":
this.log.info({ hash: key.hash }, "Key is valid and enabled");
this.update(key.hash, {
isDisabled: false,
lastChecked: Date.now(),
});
break;
case "invalid":
this.log.warn({ hash: key.hash }, "Key is invalid, marking as revoked");
this.update(key.hash, {
isDisabled: true,
isRevoked: true,
lastChecked: Date.now(),
});
break;
case "quota":
this.log.warn({ hash: key.hash }, "Key has exceeded its quota, disabling");
this.update(key.hash, {
isDisabled: true,
isOverQuota: true,
lastChecked: Date.now(),
});
break;
default:
assertNever(result);
}
}
}
+167
View File
@@ -0,0 +1,167 @@
import { Key, KeyProvider, createGenericGetLockoutPeriod } from "..";
import { XaiKeyChecker } from "./checker";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { XaiModelFamily, ModelFamily } from "../../models"; // Added ModelFamily
// XaiKeyUsage is removed, tokenUsage from base Key interface will be used.
export interface XaiKey extends Key {
readonly service: "xai";
readonly modelFamilies: XaiModelFamily[];
isOverQuota: boolean;
}
export class XaiKeyProvider implements KeyProvider<XaiKey> {
readonly service = "xai";
private keys: XaiKey[] = [];
private checker?: XaiKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
const keyConfig = config.xaiKey?.trim();
if (!keyConfig) {
return;
}
const keys = keyConfig.split(",").map((k) => k.trim());
for (const key of keys) {
if (!key) continue;
this.keys.push({
key,
service: this.service,
modelFamilies: ["xai"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
lastChecked: 0,
hash: this.hashKey(key),
rateLimitedAt: 0,
rateLimitedUntil: 0,
tokenUsage: {}, // Initialize new tokenUsage field
isOverQuota: false,
});
}
}
private hashKey(key: string): string {
return require("crypto").createHash("sha256").update(key).digest("hex");
}
public init() {
if (this.keys.length === 0) return;
if (!config.checkKeys) {
this.log.warn(
"Key checking is disabled. Keys will not be verified."
);
return;
}
this.checker = new XaiKeyChecker(this.update.bind(this));
for (const key of this.keys) {
void this.checker.checkKey(key);
}
}
public get(model: string): XaiKey {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No XAI keys available");
}
const key = availableKeys[Math.floor(Math.random() * availableKeys.length)];
key.lastUsed = Date.now();
this.throttle(key.hash);
return { ...key };
}
public list(): Omit<XaiKey, "key">[] {
return this.keys.map(({ key, ...rest }) => rest);
}
public disable(key: XaiKey): void {
const found = this.keys.find((k) => k.hash === key.hash);
if (found) {
found.isDisabled = true;
}
}
public update(hash: string, update: Partial<XaiKey>): void {
const key = this.keys.find((k) => k.hash === hash);
if (key) {
Object.assign(key, update);
}
}
public available(): number {
return this.keys.filter((k) => !k.isDisabled).length;
}
public incrementUsage(keyHash: string, modelFamily: XaiModelFamily, usage: { input: number; output: number }) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
if (!key.tokenUsage) {
key.tokenUsage = {};
}
// Xai only has one model family "xai"
if (!key.tokenUsage[modelFamily]) {
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
}
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
currentFamilyUsage.input += usage.input;
currentFamilyUsage.output += usage.output;
}
/**
* Upon being rate limited, a key will be locked out for this many milliseconds
* while we wait for other concurrent requests to finish.
*/
private static readonly RATE_LIMIT_LOCKOUT = 2000;
/**
* Upon assigning a key, we will wait this many milliseconds before allowing it
* to be used again. This is to prevent the queue from flooding a key with too
* many requests while we wait to learn whether previous ones succeeded.
*/
private static readonly KEY_REUSE_DELAY = 500;
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + XaiKeyProvider.RATE_LIMIT_LOCKOUT;
}
public recheck(): void {
if (!this.checker || !config.checkKeys) return;
for (const key of this.keys) {
this.update(key.hash, {
isOverQuota: false,
isDisabled: false,
lastChecked: 0
});
void this.checker.checkKey(key);
}
}
/**
* Applies a short artificial delay to the key upon dequeueing, in order to
* prevent it from being immediately assigned to another request before the
* current one can be dispatched.
**/
private throttle(hash: string) {
const now = Date.now();
const key = this.keys.find((k) => k.hash === hash)!;
const currentRateLimit = key.rateLimitedUntil;
const nextRateLimit = now + XaiKeyProvider.KEY_REUSE_DELAY;
key.rateLimitedAt = now;
key.rateLimitedUntil = Math.max(currentRateLimit, nextRateLimit);
}
}
+181 -10
View File
@@ -14,7 +14,12 @@ export type LLMService =
| "mistral-ai"
| "aws"
| "gcp"
| "azure";
| "azure"
| "deepseek"
| "xai"
| "cohere"
| "qwen"
| "moonshot";
export type OpenAIModelFamily =
| "turbo"
@@ -22,9 +27,24 @@ export type OpenAIModelFamily =
| "gpt4-32k"
| "gpt4-turbo"
| "gpt4o"
| "gpt41"
| "gpt41-mini"
| "gpt41-nano"
| "gpt45"
| "gpt5"
| "gpt5-mini"
| "gpt5-nano"
| "gpt5-chat-latest"
| "o1"
| "o1-mini"
| "dall-e";
| "o1-pro"
| "o3-pro"
| "o3-mini"
| "o3"
| "o4-mini"
| "codex-mini"
| "dall-e"
| "gpt-image";
export type AnthropicModelFamily = "claude" | "claude-opus";
export type GoogleAIModelFamily =
| "gemini-flash"
@@ -39,6 +59,12 @@ export type AwsBedrockModelFamily = `aws-${
| MistralAIModelFamily}`;
export type GcpModelFamily = "gcp-claude" | "gcp-claude-opus";
export type AzureOpenAIModelFamily = `azure-${OpenAIModelFamily}`;
export type DeepseekModelFamily = "deepseek";
export type XaiModelFamily = "xai";
export type CohereModelFamily = "cohere";
export type QwenModelFamily = "qwen";
export type MoonshotModelFamily = "moonshot";
export type ModelFamily =
| OpenAIModelFamily
| AnthropicModelFamily
@@ -46,19 +72,44 @@ export type ModelFamily =
| MistralAIModelFamily
| AwsBedrockModelFamily
| GcpModelFamily
| AzureOpenAIModelFamily;
| AzureOpenAIModelFamily
| DeepseekModelFamily
| XaiModelFamily
| CohereModelFamily
| QwenModelFamily
| MoonshotModelFamily;
export const MODEL_FAMILIES = (<A extends readonly ModelFamily[]>(
arr: A & ([ModelFamily] extends [A[number]] ? unknown : never)
) => arr)([
"moonshot",
"qwen",
"cohere",
"xai",
"deepseek",
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"gpt4o",
"gpt45",
"gpt41",
"gpt41-mini",
"gpt41-nano",
"gpt5",
"gpt5-mini",
"gpt5-nano",
"gpt5-chat-latest",
"o1",
"o1-mini",
"o1-pro",
"o3-pro",
"o3-mini",
"o3",
"o4-mini",
"codex-mini",
"dall-e",
"gpt-image",
"claude",
"claude-opus",
"gemini-flash",
@@ -81,9 +132,24 @@ export const MODEL_FAMILIES = (<A extends readonly ModelFamily[]>(
"azure-gpt4-32k",
"azure-gpt4-turbo",
"azure-gpt4o",
"azure-gpt45",
"azure-gpt41",
"azure-gpt41-mini",
"azure-gpt41-nano",
"azure-gpt5",
"azure-gpt5-mini",
"azure-gpt5-nano",
"azure-gpt5-chat-latest",
"azure-dall-e",
"azure-o1",
"azure-o1-mini",
"azure-o1-pro",
"azure-o3-pro",
"azure-o3-mini",
"azure-o3",
"azure-o4-mini",
"azure-codex-mini",
"azure-gpt-image",
] as const);
export const LLM_SERVICES = (<A extends readonly LLMService[]>(
@@ -96,19 +162,44 @@ export const LLM_SERVICES = (<A extends readonly LLMService[]>(
"aws",
"gcp",
"azure",
"deepseek",
"xai",
"cohere",
"qwen",
"moonshot"
] as const);
export const MODEL_FAMILY_SERVICE: {
[f in ModelFamily]: LLMService;
} = {
moonshot: "moonshot",
qwen: "qwen",
cohere: "cohere",
xai: "xai",
deepseek: "deepseek",
turbo: "openai",
gpt4: "openai",
"gpt4-turbo": "openai",
"gpt4-32k": "openai",
gpt4o: "openai",
gpt45: "openai",
gpt41: "openai",
"gpt41-mini": "openai",
"gpt41-nano": "openai",
gpt5: "openai",
"gpt5-mini": "openai",
"gpt5-nano": "openai",
"gpt5-chat-latest": "openai",
"o1": "openai",
"o1-mini": "openai",
"o1-pro": "openai",
"o3-pro": "openai",
"o3-mini": "openai",
"o3": "openai",
"o4-mini": "openai",
"codex-mini": "openai",
"dall-e": "openai",
"gpt-image": "openai",
claude: "anthropic",
"claude-opus": "anthropic",
"aws-claude": "aws",
@@ -124,9 +215,24 @@ export const MODEL_FAMILY_SERVICE: {
"azure-gpt4-32k": "azure",
"azure-gpt4-turbo": "azure",
"azure-gpt4o": "azure",
"azure-gpt45": "azure",
"azure-gpt41": "azure",
"azure-gpt41-mini": "azure",
"azure-gpt41-nano": "azure",
"azure-gpt5": "azure",
"azure-gpt5-mini": "azure",
"azure-gpt5-nano": "azure",
"azure-gpt5-chat-latest": "azure",
"azure-dall-e": "azure",
"azure-o1": "azure",
"azure-o1-mini": "azure",
"azure-o1-pro": "azure",
"azure-o3-pro": "azure",
"azure-o3-mini": "azure",
"azure-o3": "azure",
"azure-o4-mini": "azure",
"azure-codex-mini": "azure",
"azure-gpt-image": "azure",
"gemini-flash": "google-ai",
"gemini-pro": "google-ai",
"gemini-ultra": "google-ai",
@@ -136,9 +242,18 @@ export const MODEL_FAMILY_SERVICE: {
"mistral-large": "mistral-ai",
};
export const IMAGE_GEN_MODELS: ModelFamily[] = ["dall-e", "azure-dall-e"];
export const IMAGE_GEN_MODELS: ModelFamily[] = ["dall-e", "azure-dall-e", "gpt-image", "azure-gpt-image"];
export const OPENAI_MODEL_FAMILY_MAP: { [regex: string]: OpenAIModelFamily } = {
"^gpt-image(-\\d+)?(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$": "gpt-image",
"^gpt-5(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5",
"^gpt-5-mini(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-mini",
"^gpt-5-nano(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-nano",
"^gpt-5-chat-latest(-\\d{4}-\\d{2}-\\d{2})?$": "gpt5-chat-latest",
"^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$": "gpt45",
"^gpt-4\\.1(-\\d{4}-\\d{2}-\\d{2})?$": "gpt41",
"^gpt-4\\.1-mini(-\\d{4}-\\d{2}-\\d{2})?$": "gpt41-mini",
"^gpt-4\\.1-nano(-\\d{4}-\\d{2}-\\d{2})?$": "gpt41-nano",
"^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$": "gpt4o",
"^chatgpt-4o": "gpt4o",
"^gpt-4o-mini(-\\d{4}-\\d{2}-\\d{2})?$": "turbo", // closest match
@@ -154,7 +269,13 @@ export const OPENAI_MODEL_FAMILY_MAP: { [regex: string]: OpenAIModelFamily } = {
"^text-embedding-ada-002$": "turbo",
"^dall-e-\\d{1}$": "dall-e",
"^o1-mini(-\\d{4}-\\d{2}-\\d{2})?$": "o1-mini",
"^o1(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$": "o1",
"^o1-pro(-\\d{4}-\\d{2}-\\d{2})?$": "o1-pro",
"^o3-pro(-\\d{4}-\\d{2}-\\d{2})?$": "o3-pro",
"^o1(-\\d{4}-\\d{2}-\\d{2})?$": "o1",
"^o3-mini(-\\d{4}-\\d{2}-\\d{2})?$": "o3-mini",
"^o3(-\\d{4}-\\d{2}-\\d{2})?$": "o3",
"^o4-mini(-\\d{4}-\\d{2}-\\d{2})?$": "o4-mini",
"^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$": "codex-mini",
};
export function getOpenAIModelFamily(
@@ -173,7 +294,8 @@ export function getClaudeModelFamily(model: string): AnthropicModelFamily {
}
export function getGoogleAIModelFamily(model: string): GoogleAIModelFamily {
return model.includes("ultra")
// Treat models as Gemni Ultra only if they include "ultra" and are NOT Imagen models
return model.includes("ultra") && !model.includes("imagen")
? "gemini-ultra"
: model.includes("flash")
? "gemini-flash"
@@ -181,22 +303,58 @@ export function getGoogleAIModelFamily(model: string): GoogleAIModelFamily {
}
export function getMistralAIModelFamily(model: string): MistralAIModelFamily {
const prunedModel = model.replace(/-(latest|\d{4})$/, "");
const prunedModel = model.replace(/-(latest|\d{4}(-\d{2}){0,2})$/, "");
// Premier models (higher tier)
switch (prunedModel) {
// Existing direct matches
case "mistral-tiny":
case "mistral-small":
case "mistral-medium":
case "mistral-large":
return prunedModel as MistralAIModelFamily;
// Premier models - Large tier
case "mistral-large":
case "pixtral-large":
return "mistral-large";
// Premier models - Medium tier
case "mistral-medium-2505":
case "magistral-medium-latest":
return "mistral-medium";
// Premier models - Small tier
case "codestral":
case "ministral-8b":
case "mistral-embed":
case "pixtral-12b-2409":
case "magistral-small-latest":
return "mistral-small";
// Premier models - Tiny tier
case "ministral-3b":
return "mistral-tiny";
// Free models - Tiny tier
case "open-mistral-7b":
return "mistral-tiny";
// Free models - Small tier
case "mistral-small":
case "pixtral":
case "pixtral-12b":
case "open-mistral-nemo":
case "open-mixtral-8x7b":
case "codestral":
case "open-codestral-mamba":
case "mathstral":
return "mistral-small";
// Free models - Medium tier
case "open-mixtral-8x22b":
return "mistral-medium";
// Default to small if unknown
default:
return "mistral-small";
}
@@ -263,6 +421,8 @@ export function getModelFamilyForRequest(req: Request): ModelFamily {
modelFamily = getGcpModelFamily(model);
} else if (req.service === "azure") {
modelFamily = getAzureOpenAIModelFamily(model);
} else if (req.service === "qwen") {
modelFamily = "qwen";
} else {
switch (req.outboundApi) {
case "anthropic-chat":
@@ -272,7 +432,15 @@ export function getModelFamilyForRequest(req: Request): ModelFamily {
case "openai":
case "openai-text":
case "openai-image":
modelFamily = getOpenAIModelFamily(model);
if (req.service === "deepseek") {
modelFamily = "deepseek";
} else if (req.service === "xai") {
modelFamily = "xai";
} else if (req.service === "moonshot") {
modelFamily = "moonshot";
} else {
modelFamily = getOpenAIModelFamily(model);
}
break;
case "google-ai":
modelFamily = getGoogleAIModelFamily(model);
@@ -281,6 +449,9 @@ export function getModelFamilyForRequest(req: Request): ModelFamily {
case "mistral-text":
modelFamily = getMistralAIModelFamily(model);
break;
case "openai-responses":
modelFamily = getOpenAIModelFamily(model);
break;
default:
assertNever(req.outboundApi);
}
@@ -291,4 +462,4 @@ export function getModelFamilyForRequest(req: Request): ModelFamily {
function assertNever(x: never): never {
throw new Error(`Called assertNever with argument ${x}.`);
}
}
+62
View File
@@ -0,0 +1,62 @@
import Database from 'better-sqlite3';
import { config } from '../config';
import { logger } from '../logger';
const log = logger.child({ module: 'sqlite-db' });
let db: Database.Database;
export function initSQLiteDB(): Database.Database {
if (db) {
return db;
}
const dbPath = config.sqliteUserStorePath;
if (!dbPath) {
log.error('SQLite user store DB path (SQLITE_USER_STORE_PATH) is not configured.');
throw new Error('SQLite user store DB path is not configured.');
}
log.info({ path: dbPath }, 'Initializing SQLite database for user store...');
db = new Database(dbPath);
// Enable WAL mode for better concurrency and performance.
db.pragma('journal_mode = WAL');
// Create users table
// Note: JSON fields (ip, tokenCounts, etc.) are stored as TEXT.
// Timestamps are stored as INTEGER (Unix epoch milliseconds).
db.exec(`
CREATE TABLE IF NOT EXISTS users (
token TEXT PRIMARY KEY,
ip TEXT, /* JSON string array */
nickname TEXT,
type TEXT NOT NULL CHECK(type IN ('normal', 'special', 'temporary')),
promptCount INTEGER NOT NULL DEFAULT 0,
tokenCounts TEXT, /* JSON string object */
tokenLimits TEXT, /* JSON string object */
tokenRefresh TEXT, /* JSON string object */
createdAt INTEGER NOT NULL,
lastUsedAt INTEGER,
disabledAt INTEGER,
disabledReason TEXT,
expiresAt INTEGER,
maxIps INTEGER,
adminNote TEXT,
meta TEXT /* JSON string object */
);
`);
log.info('SQLite database initialized and `users` table created/verified.');
return db;
}
export function getDB(): Database.Database {
if (!db) {
// This might happen if getDB is called before initSQLiteDB,
// though user-store should ensure init is called first.
log.warn('SQLite DB instance requested before initialization. Attempting to initialize now.');
return initSQLiteDB();
}
return db;
}
+92 -67
View File
@@ -1,74 +1,99 @@
import { config } from "../config";
import { ModelFamily } from "./models";
// technically slightly underestimates, because completion tokens cost more
// than prompt tokens but we don't track those separately right now
export function getTokenCostUsd(model: ModelFamily, tokens: number) {
let cost = 0;
switch (model) {
case "gpt4o":
case "azure-gpt4o":
cost = 0.000005;
break;
case "azure-gpt4-turbo":
case "gpt4-turbo":
cost = 0.00001;
break;
case "azure-o1":
case "o1":
// Currently we do not track output tokens separately, and O1 uses
// considerably more output tokens that other models for its hidden
// reasoning. The official O1 pricing is $15/1M input tokens and $60/1M
// output tokens so we will return a higher estimate here.
cost = 0.00002;
break
case "azure-o1-mini":
case "o1-mini":
cost = 0.000005; // $3/1M input tokens, $12/1M output tokens
break
case "azure-gpt4-32k":
case "gpt4-32k":
cost = 0.00006;
break;
case "azure-gpt4":
case "gpt4":
cost = 0.00003;
break;
case "azure-turbo":
case "turbo":
cost = 0.000001;
break;
case "azure-dall-e":
cost = 0.00001;
break;
case "aws-claude":
case "gcp-claude":
case "claude":
cost = 0.000008;
break;
case "aws-claude-opus":
case "gcp-claude-opus":
case "claude-opus":
cost = 0.000015;
break;
case "aws-mistral-tiny":
case "mistral-tiny":
cost = 0.00000025;
break;
case "aws-mistral-small":
case "mistral-small":
cost = 0.0000003;
break;
case "aws-mistral-medium":
case "mistral-medium":
cost = 0.00000275;
break;
case "aws-mistral-large":
case "mistral-large":
cost = 0.000003;
break;
// Prices are per 1 million tokens.
const MODEL_PRICING: Record<ModelFamily, { input: number; output: number } | undefined> = {
"deepseek": { input: 0.55, output: 2.19 }, // DeepSeek Reasoner (standard price, input cache miss)
"xai": { input: 5.6, output: 16.8 }, // Grok: Derived from avg $14/1M (assuming 1:3 in/out ratio) - needs official pricing
"gpt41": { input: 2.00, output: 8.00 },
"azure-gpt41": { input: 2.00, output: 8.00 },
"gpt41-mini": { input: 0.40, output: 1.60 },
"azure-gpt41-mini": { input: 0.40, output: 1.60 },
"gpt41-nano": { input: 0.10, output: 0.40 },
"azure-gpt41-nano": { input: 0.10, output: 0.40 },
"gpt5": { input: 1.25, output: 10.00 },
"azure-gpt5": { input: 1.25, output: 10.00 },
"gpt5-mini": { input: 0.25, output: 2.00 },
"azure-gpt5-mini": { input: 0.25, output: 2.00 },
"gpt5-nano": { input: 0.05, output: 0.40 },
"azure-gpt5-nano": { input: 0.05, output: 0.40 },
"gpt5-chat-latest": { input: 1.25, output: 10.00 },
"azure-gpt5-chat-latest": { input: 1.25, output: 10.00 },
"gpt45": { input: 75.00, output: 150.00 }, // Example, needs verification if this model family is still current with this pricing
"azure-gpt45": { input: 75.00, output: 150.00 }, // Example, needs verification
"gpt4o": { input: 2.50, output: 10.00 },
"azure-gpt4o": { input: 2.50, output: 10.00 },
"gpt4-turbo": { input: 10.00, output: 30.00 },
"azure-gpt4-turbo": { input: 10.00, output: 30.00 },
"o1-pro": { input: 150.00, output: 600.00 },
"azure-o1-pro": { input: 150.00, output: 600.00 },
"o3-pro": { input: 20.00, output: 80.00 },
"azure-o3-pro": { input: 20.00, output: 80.00 },
"o1": { input: 15.00, output: 60.00 },
"azure-o1": { input: 15.00, output: 60.00 },
"o1-mini": { input: 1.10, output: 4.40 },
"azure-o1-mini": { input: 1.10, output: 4.40 },
"o3-mini": { input: 1.10, output: 4.40 },
"azure-o3-mini": { input: 1.10, output: 4.40 },
"o3": { input: 2.00, output: 8.00 },
"azure-o3": { input: 10.00, output: 40.00 },
"o4-mini": { input: 1.10, output: 4.40 },
"azure-o4-mini": { input: 1.10, output: 4.40 },
"codex-mini": { input: 1.50, output: 6.00 },
"azure-codex-mini": { input: 1.50, output: 6.00 },
"gpt4-32k": { input: 60.00, output: 120.00 },
"azure-gpt4-32k": { input: 60.00, output: 120.00 },
"gpt4": { input: 30.00, output: 60.00 },
"azure-gpt4": { input: 30.00, output: 60.00 },
"turbo": { input: 0.15, output: 0.60 }, // Maps to GPT-4o mini
"azure-turbo": { input: 0.15, output: 0.60 },
"dall-e": { input: 0, output: 0 }, // Pricing is per image, not token based in this context.
"azure-dall-e": { input: 0, output: 0 }, // Pricing is per image.
"gpt-image": { input: 0, output: 0 }, // Complex pricing (text, image input, image output tokens), handle separately.
"azure-gpt-image": { input: 0, output: 0 }, // Complex pricing.
"claude": { input: 3.00, output: 15.00 }, // Anthropic Claude Sonnet 4
"aws-claude": { input: 3.00, output: 15.00 },
"gcp-claude": { input: 3.00, output: 15.00 },
"claude-opus": { input: 15.00, output: 75.00 }, // Anthropic Claude Opus 4
"aws-claude-opus": { input: 15.00, output: 75.00 },
"gcp-claude-opus": { input: 15.00, output: 75.00 },
"mistral-tiny": { input: 0.04, output: 0.04 }, // Using old price if no new API price found
"aws-mistral-tiny": { input: 0.04, output: 0.04 },
"mistral-small": { input: 0.10, output: 0.30 }, // Mistral Small 3.1
"aws-mistral-small": { input: 0.10, output: 0.30 },
"mistral-medium": { input: 0.40, output: 2.00 }, // Mistral Medium 3
"aws-mistral-medium": { input: 0.40, output: 2.00 },
"mistral-large": { input: 2.00, output: 6.00 },
"aws-mistral-large": { input: 2.00, output: 6.00 },
"gemini-flash": { input: 0.15, output: 0.60 }, // Updated to Gemini 2.5 Flash Preview (text input, non-thinking output)
"gemini-pro": { input: 1.25, output: 10.00 }, // Updated to Gemini 2.5 Pro Preview (<=200k tokens)
"gemini-ultra": { input: 25.00, output: 75.00 }, // Estimated based on Gemini Pro (5-10x) and character to token conversion. Official per-token pricing needed.
// Ensure all ModelFamily entries from models.ts are covered or have a default.
// Adding placeholders for families in models.ts but not yet priced here.
"cohere": { input: 0.15, output: 0.60 }, // Updated to Command R
"qwen": { input: 1.40, output: 2.80 }, // Qwen-plus, as an example
"moonshot": { input: 0.6, output: 2.5 }, // Moonshot kimi k2
};
export function getTokenCostDetailsUsd(model: ModelFamily, inputTokens: number, outputTokens?: number): { inputCost: number, outputCost: number, totalCost: number } {
const pricing = MODEL_PRICING[model];
if (!pricing) {
console.warn(`Pricing not found for model family: ${model}. Returning 0 cost for all components.`);
return { inputCost: 0, outputCost: 0, totalCost: 0 };
}
return cost * Math.max(0, tokens);
const costPerMillionInputTokens = pricing.input;
const costPerMillionOutputTokens = pricing.output;
const inputCost = (costPerMillionInputTokens / 1_000_000) * Math.max(0, inputTokens);
const outputCost = (costPerMillionOutputTokens / 1_000_000) * Math.max(0, outputTokens ?? 0);
return { inputCost, outputCost, totalCost: inputCost + outputCost };
}
export function getTokenCostUsd(model: ModelFamily, inputTokens: number, outputTokens?: number): number {
return getTokenCostDetailsUsd(model, inputTokens, outputTokens).totalCost;
}
export function prettyTokens(tokens: number): string {
+3
View File
@@ -67,6 +67,9 @@ async function getTokenCountForMessages({
case "image":
numTokens += await getImageTokenCount(part.source.data);
break;
case "tool_use":
case "tool_result":
break;
default:
throw new Error(`Unsupported Anthropic content type.`);
}
+13 -6
View File
@@ -45,7 +45,7 @@ export async function getTokenCount(
const value = message[key as keyof OpenAIChatMessage];
if (!value) continue;
if (key === 'function_call') continue;
if (Array.isArray(value)) {
for (const item of value) {
if (item.type === "text") {
@@ -57,7 +57,7 @@ export async function getTokenCount(
}
}
} else {
textContent = value;
textContent = value as string;
}
if (textContent.length > 800000 || numTokens > 200000) {
@@ -179,9 +179,9 @@ export const DALLE_TOKENS_PER_DOLLAR = 100000;
* which we convert to tokens at a rate of 100000 tokens per dollar.
*/
export function getOpenAIImageCost(params: {
model: "dall-e-2" | "dall-e-3";
quality: "standard" | "hd";
resolution: "512x512" | "256x256" | "1024x1024" | "1024x1792" | "1792x1024";
model: "dall-e-2" | "dall-e-3" | "gpt-image-1";
quality: "standard" | "hd" | "high" | "medium" | "low" | "auto";
resolution: "512x512" | "256x256" | "1024x1024" | "1024x1792" | "1792x1024" | "1536x1024" | "1024x1536" | "auto";
n: number | null;
}) {
const { model, quality, resolution, n } = params;
@@ -208,6 +208,10 @@ export function getOpenAIImageCost(params: {
default:
throw new Error("Invalid resolution");
}
case "gpt-image-1":
// gpt-image-1 pricing is approximately $0.04 per image
// This is a simplified pricing model, adjust as needed based on official pricing
return 0.04;
default:
throw new Error("Invalid image generation model");
}
@@ -233,7 +237,10 @@ export function estimateGoogleAITokenCount(
let numTokens = 0;
for (const message of prompt) {
numTokens += tokensPerMessage;
numTokens += encoder.encode(message.parts[0].text).length;
const textPart = message.parts.find(p => 'text' in p) as { text: string } | undefined;
if (textPart) {
numTokens += encoder.encode(textPart.text).length;
}
}
numTokens += 3;
+2 -1
View File
@@ -31,7 +31,7 @@ export async function init() {
type OpenAIChatTokenCountRequest = {
prompt: OpenAIChatMessage[];
completion?: never;
service: "openai";
service: "openai" | "openai-responses";
};
type AnthropicChatTokenCountRequest = {
@@ -108,6 +108,7 @@ export async function countTokens({
};
case "openai":
case "openai-text":
case "openai-responses":
return {
...(await getOpenAITokenCount(prompt ?? completion, req.body.model)),
tokenization_duration_ms: getElapsedMs(time),
+29 -6
View File
@@ -2,11 +2,31 @@ import { ZodType, z } from "zod";
import { MODEL_FAMILIES, ModelFamily } from "../models";
import { makeOptionalPropsNullable } from "../utils";
// This just dynamically creates a Zod object type with a key for each model
// family and an optional number value.
// Schema for token counts - keeps track of input/output usage
export const tokenCountsSchema: ZodType<UserTokenCounts> = z.object(
MODEL_FAMILIES.reduce(
(acc, family) => ({ ...acc, [family]: z.number().optional().default(0) }),
(acc, family) => ({
...acc,
[family]: z
.object({
input: z.number().optional().default(0),
output: z.number().optional().default(0),
legacy_total: z.number().optional(), // Added legacy_total
})
.optional()
.default({ input: 0, output: 0 }), // Default will not have legacy_total
}),
{} as Record<ModelFamily, ZodType<{ input: number; output: number; legacy_total?: number }>>
)
);
// Schema for token limits - simple numbers representing total quota
export const tokenLimitsSchema: ZodType<UserTokenLimits> = z.object(
MODEL_FAMILIES.reduce(
(acc, family) => ({
...acc,
[family]: z.number().optional().default(0),
}),
{} as Record<ModelFamily, ZodType<number>>
)
);
@@ -33,12 +53,12 @@ export const UserSchema = z
* Never used; retained for backwards compatibility.
*/
tokenCount: z.any().optional(),
/** Number of tokens the user has consumed, by model family. */
/** Number of input and output tokens the user has consumed, by model family. */
tokenCounts: tokenCountsSchema,
/** Maximum number of tokens the user can consume, by model family. */
tokenLimits: tokenCountsSchema,
tokenLimits: tokenLimitsSchema,
/** User-specific token refresh amount, by model family. */
tokenRefresh: tokenCountsSchema,
tokenRefresh: tokenLimitsSchema,
/** Time at which the user was created. */
createdAt: z.number(),
/** Time at which the user last connected. */
@@ -67,6 +87,9 @@ export const UserPartialSchema = makeOptionalPropsNullable(UserSchema)
.extend({ token: z.string() });
export type UserTokenCounts = {
[K in ModelFamily]: { input: number; output: number; legacy_total?: number } | undefined;
};
export type UserTokenLimits = {
[K in ModelFamily]: number | undefined;
};
export type User = z.infer<typeof UserSchema>;

Some files were not shown because too many files have changed in this diff Show More