447 Commits

Author SHA1 Message Date
reanon 9c0a4fd3a7 401 is universal 2025-08-08 13:14:54 +02:00
reanon bc85a71c2a No more error user messages 2025-08-08 12:58:41 +02:00
reanon 1604246cf1 more stupid mistakes (auto reenqueue) 2025-08-08 12:45:32 +02:00
reanon 82028d14b7 stupid mistake 2025-08-08 11:47:48 +02:00
reanon f23315d233 deepseek quota - retry internally instead of throwing error 2025-08-08 11:42:59 +02:00
reanon 1bf6d6ac99 gemini wtf 2025-08-08 02:18:20 +02:00
reanon 09ce6a70d2 sam.. 2025-08-07 23:51:49 +02:00
reanon 0f8581d340 forgot o4 mini 2025-08-07 23:25:47 +02:00
reanon e8c5d06cd7 proper streaming filter 2025-08-07 23:22:34 +02:00
reanon 20c9920199 gpt5 streaming = use only verified orgs 2025-08-07 22:51:38 +02:00
reanon 253a2af13f juust in case 2025-08-07 22:23:11 +02:00
reanon 2af4a02b15 forgot some models in order listing 2025-08-07 21:54:04 +02:00
reanon c8dab8786a gpt5 2025-08-07 21:03:03 +02:00
reanon 9cc86c2d68 opus 4.1 2025-08-05 19:51:02 +02:00
reanon e974da8a58 gemini penalties 2025-07-27 07:36:48 +02:00
reanon f114469057 google is idiotic 2025-07-23 23:19:54 +02:00
reanon 6e02db4bd7 another firebase fix? 2025-07-20 04:03:11 +02:00
reanon 1f9af4374d Revert "grok update"
This reverts commit 79a7dee586
2025-07-14 15:00:34 -08:00
reanon 79a7dee586 grok update 2025-07-14 14:59:11 -08:00
reanon e1bd960bb7 moon blinded me 2025-07-15 00:08:04 +02:00
reanon 867fda430b Moon shot back 2025-07-14 21:21:41 +02:00
reanon bbd2b88503 Lets shoot the moon 2025-07-14 21:10:36 +02:00
reanon 08400db220 grok4 2025-07-10 08:18:20 +02:00
reanon 5249e1c904 gemini -exp whitelist 2025-07-09 20:14:01 +02:00
reanon c18df6a546 firebase fix? 2025-07-05 17:23:30 +02:00
reanon ceedb52478 less stupid -ultra detection 2025-06-24 22:24:20 -08:00
reanon fa13d06f45 imagegen ultra ISNT gemini-ultra 2025-06-25 08:12:15 +02:00
reanon 0c0dc09020 fix disabled users being able to get to info page 2025-06-22 10:17:24 -08:00
reanon 317ef03ab4 2.0 flash has better limits, lets continue using that 2025-06-21 07:46:34 -08:00
reanon 7def7c17e4 gemini pro base instead of preview 2025-06-21 07:44:12 -08:00
reanon e201c2cf5e Merge branch 'main' into 'main'
disable thinking budget validation on gemini

See merge request reanon/nonono!6
2025-06-18 12:25:24 -08:00
Nopm edbbf056a0 remove zod validation 2025-06-18 17:20:11 -03:00
Nopm 4c214305af disable thinking budget validation on gemini 2025-06-18 17:11:39 -03:00
reanon cb8f2669ac update dep. 2025-06-17 21:42:43 +02:00
reanon ed737e43a5 Merge branch 'main' into 'main'
more quota fixes

See merge request reanon/nonono!5
2025-06-16 10:42:43 -08:00
Nopm 2bc1a7dbea moar fixes 2025-06-16 15:24:24 -03:00
Nopm 5ad22145a0 Merge branch 'main' of https://gitgud.io/reanon/nonono 2025-06-16 15:05:28 -03:00
Nopm b7ad5f1dae more quota fixes 2025-06-16 15:05:21 -03:00
reanon 2405be71c1 Merge branch 'main' into 'main'
fix quota handling with new user schema

See merge request reanon/nonono!4
2025-06-16 09:42:39 -08:00
Nopm aec3927c94 fix quota handling with new user schema 2025-06-16 11:51:39 -03:00
reanon ec82599e24 re-enqueue amazon bedrock 503 failed requests 2025-06-13 04:33:18 -08:00
Nopm 21294abd8e re-enqueue amazon bedrock 503 failed requests 2025-06-13 09:27:47 -03:00
reanon ca4a1f3252 magistral 2025-06-11 11:17:13 +02:00
reanon e0270f99ee Edit stats.ts 2025-06-11 00:49:49 -08:00
reanon 38e2980419 o3-pro 2025-06-11 10:47:48 +02:00
reanon 0102c7a6a5 o3 pricing 2025-06-10 11:53:51 -08:00
reanon b89439287e refactor: simplify Google AI model fetching by using synthetic response from existing keys 2025-06-08 13:41:29 +02:00
reanon 508bb3e08b claude error messages 2025-06-07 08:41:32 +02:00
reanon a17d087928 fix user json imports 2025-06-06 21:56:42 -08:00
Nopm 3f32a9b14d fix user json imports 2025-06-07 02:32:49 -03:00
reanon 3e11b0bf49 Merge branch 'main' into 'main'
Big update

See merge request reanon/nonono!1
2025-06-03 18:25:40 -08:00
Nopm 64d26c5c6c doc update 2025-06-03 23:19:09 -03:00
Nopm 41bc4998fc model pricing update 2025-06-03 23:09:39 -03:00
Nopm 4e3fb9d152 stop sqlite from dumping queries to console 2025-06-03 22:42:51 -03:00
Nopm 8c98fca56d fix pointless alt text when logo is empty 2025-06-03 21:49:37 -03:00
Nopm 2389b30e68 doc update 2025-06-03 21:47:39 -03:00
Nopm c066a7d46b password based service info auth (better than the first one we had) 2025-06-03 21:44:43 -03:00
Nopm 7b3cf409e4 google is dumb 2025-06-03 21:05:12 -03:00
Nopm 74cbafbb3b pro-exp BEGONE 2025-06-03 20:57:51 -03:00
Nopm 0411b4c3a6 I should have made all these commits separately but oops 2025-06-03 20:14:07 -03:00
reanon 5988cd7e45 dont false revoke ratelimited gemini 2025-05-29 19:15:41 +02:00
reanon f80873ef8a fuck if I know (aws bs) 2025-05-25 21:31:41 +02:00
reanon 45c0b99f20 Fuckoff jew 2025-05-24 21:04:39 -08:00
reanon 692da2b457 I hate aws v2 2025-05-24 20:07:59 +02:00
reanon 32bc797216 Edit claude-models.ts 2025-05-22 23:12:03 -08:00
reanon 1a6ce7ea04 I hate aws v2 2025-05-23 08:48:36 +02:00
reanon fdba7cd7e4 I hate aws 2025-05-23 08:44:37 +02:00
reanon 64d2f78526 fix my fuckup 2025-05-23 05:39:19 +02:00
reanon 566d42da07 hopefully last edit 2025-05-23 05:30:38 +02:00
reanon 74bb88daa3 forgot 2025-05-23 05:18:32 +02:00
reanon 2ea5fdf902 fix 2025-05-23 05:06:05 +02:00
reanon d5ec6fe1f9 aws opus shown 2025-05-23 05:01:23 +02:00
reanon ce9c8ec8b6 aws claude show variant sonnet 2025-05-23 00:16:53 +02:00
reanon 29323fd7bf claude4 2025-05-23 00:00:15 +02:00
reanon af162e567a feat: add Google Search tool support and improve token counting safety 2025-05-20 22:50:55 +02:00
reanon 87c6dd90cb dunno 2025-05-20 12:24:51 +02:00
reanon 1d8b13ba70 scuffed codex-mini 2025-05-19 16:05:55 +02:00
reanon 8344fd2e2a Workaround for qwen3 think/nonthink auto ST compatibility 2025-05-19 10:54:17 +02:00
reanon 8c30088383 Check for gemini pro/invalid key fix 2025-05-19 08:14:48 +02:00
reanon dde0183d7d quick dirty qwen3 test 2025-05-17 14:03:29 -08:00
reanon d64edbb3b7 Gemini overquota handling 2025-05-15 04:39:05 +02:00
reanon 5f0b5cc4e5 fuck it, dont care about websearch anyways 2025-05-09 06:30:57 -08:00
reanon 99269b7cd6 Edit anthropic.ts 2025-05-09 06:27:23 -08:00
reanon 6870a36a6e actual last try 2025-05-09 16:18:42 +02:00
reanon 45535de6ae last try for websearch results 2025-05-09 16:08:28 +02:00
reanon 8ea6fe463b again 2025-05-09 16:02:07 +02:00
reanon 4fd5d08ed8 ui test 2025-05-09 15:55:49 +02:00
reanon 4496afe7a1 claude websearch, might redo/revert 2025-05-09 15:30:49 +02:00
reanon cc0ece32d0 Edit mistral-ai.ts 2025-05-07 09:54:40 -08:00
reanon be8accbc37 2505 2025-05-07 09:52:42 -08:00
reanon 1d6f3dbf10 strip models/ 2025-05-06 17:21:17 +02:00
reanon f2b55ebabb small cohere fix 2025-05-05 13:59:53 +02:00
reanon 6374bfdee1 no forced redirect 2025-05-05 07:54:35 +02:00
reanon eb66f6b149 turbo 2025-05-02 04:09:01 +02:00
reanon 551a13498b mega basic qwen 2025-05-02 04:06:44 +02:00
reanon 780b885aeb basic cohere 2025-05-02 01:41:20 +02:00
reanon d9645025c9 xai update 2025-04-30 21:26:03 +02:00
reanon c1cb395020 mistral update 2025-04-30 20:03:40 +02:00
reanon 80d09f470b Edit service-info.ts 2025-04-26 05:55:54 -08:00
reanon 44338652fd Update file add-key.ts 2025-04-26 04:36:12 -08:00
reanon 8ef272f8b3 Update 2 files
- /src/shared/key-management/openai/checker.ts
- /src/shared/key-management/openai/provider.ts
2025-04-26 04:28:37 -08:00
reanon 9c804c0560 Update file checker.ts 2025-04-26 04:15:47 -08:00
reanon 2dc7fda2dd I am a tard 2025-04-26 04:15:21 -08:00
reanon 68b199e712 Update file checker.ts 2025-04-26 04:12:29 -08:00
reanon 1b110d3269 Update file checker.ts 2025-04-26 04:11:24 -08:00
reanon abfde6f684 verification test again 2025-04-26 04:07:38 -08:00
reanon d2d6ff3d52 cute dog 2025-04-26 01:43:27 -08:00
reanon a5eda7685b LAST TRY 2025-04-26 01:37:01 -08:00
reanon cbca37dd77 last test for image verif 2025-04-26 01:26:50 -08:00
reanon fc55518cd1 dunno anymore 2025-04-26 11:21:04 +02:00
reanon 925a81de43 auto 2025-04-26 01:11:34 -08:00
reanon 989bfc0ca3 verif2 2025-04-26 01:08:42 -08:00
reanon a1c04234ab openai verification 2025-04-26 00:57:21 -08:00
reanon dc0e7498e8 Edit openai-image.ts 2025-04-26 00:42:09 -08:00
reanon 6628498d5e again 2025-04-26 00:37:12 -08:00
reanon 31f9b4d536 tt 2025-04-26 10:29:40 +02:00
reanon afe6ad8ac9 gpt-image fix maybe 2025-04-26 10:29:01 +02:00
reanon a16d66a45b prelim gpt-image (cant test, no access) 2025-04-25 10:38:23 +02:00
reanon 465b13e5fb Edit config.ts 2025-04-24 15:01:47 -08:00
reanon 6c8b19651d Edit config.ts 2025-04-24 14:21:44 -08:00
reanon d3292d8a76 Edit .env.example 2025-04-24 14:15:56 -08:00
reanon dab5c1bbf0 no limiting on models available, limit it in env 2025-04-24 14:13:31 -08:00
reanon 2ffce3eff8 .. 2025-04-23 20:14:36 +00:00
reanon 8197192223 yea fuck it 2025-04-23 19:49:21 +00:00
reanon 7e6857fcf5 last attempt, otherwise fuck thinking for now 2025-04-23 19:39:17 +00:00
reanon 719cbc3cfa tf is this 2025-04-23 19:36:36 +00:00
reanon 3beea5dcfc wtf 2025-04-23 19:22:46 +00:00
reanon 9213b7088b Cohee, when I catch you Cohee 2025-04-23 19:10:13 +00:00
reanon 86ed19af99 longer wait for openai check 2025-04-22 06:24:31 +02:00
reanon bb75cc668c deepseek update (automodels/checking) 2025-04-21 14:44:06 +02:00
reanon a6d095dcda xai edits 2025-04-21 10:38:09 +02:00
reanon 588aaae5d9 codebase 2025-04-21 08:24:46 +02:00
reanon 6eec7ff7e6 oops 2025-04-21 02:02:49 +00:00
reanon 272b812db3 Using weighted averages for pricing 2025-04-21 02:00:44 +00:00
reanon 0bcc0c1037 Edit stats.ts 2025-04-21 01:24:01 +00:00
reanon af58d25fb5 sloppytoppy 2025-04-21 01:17:37 +00:00
reanon 15dc2514ee Api scale is bad sometimes 2025-04-20 01:33:18 +00:00
reanon d650038f7e thinking budget according to ST 2025-04-19 02:24:33 +00:00
reanon 6efe09b62e claude checker to latest sonnet, checking interval from 6 to 24 hours 2025-04-18 22:41:02 +00:00
reanon 14a1203be7 New thinking budget for 2.5 flash 2025-04-17 21:13:36 +00:00
reanon 1e8f55f96d Revert "New 2.5 flash thinking budget parameter"
This reverts commit 2f8538519b
2025-04-17 21:12:39 +00:00
reanon 2f8538519b New 2.5 flash thinking budget parameter 2025-04-17 21:11:58 +00:00
reanon 1b7ce423a6 Edit google-ai.ts 2025-04-17 21:03:43 +00:00
reanon 799a73655c Edit google-ai.ts 2025-04-17 20:55:01 +00:00
reanon 96645ba529 Edit google-ai.ts 2025-04-17 20:15:22 +00:00
reanon de631d3d91 Edit google-ai.ts 2025-04-17 20:13:06 +00:00
reanon bf2c0dd3d9 Edit google-ai.ts 2025-04-17 20:08:01 +00:00
reanon 2415be7c51 Gemini 2.5 Flash Preview 04-17 2025-04-17 20:01:03 +00:00
reanon 4c9a3678ae Edit config.ts - o1-pro disabled by default 2025-04-17 12:09:39 +00:00
reanon 19df23f342 .. 2025-04-17 11:51:57 +00:00
reanon 85fafb8edb fix? 2025-04-17 11:47:25 +00:00
reanon 5eb4858c69 o1-pro test 2025-04-17 11:33:58 +00:00
reanon 8081d9516d Update 2 files
- /src/config.ts
- /.env.example
2025-04-17 08:13:31 +00:00
reanon 5473ef903e support -preview in the regex 2025-04-17 03:03:08 +00:00
reanon 568288c180 tookens 2025-04-17 02:40:26 +00:00
reanon 65f4e14d3b o4-mini and o3 I hope 2025-04-17 02:37:37 +00:00
reanon 6479cefe07 Update file block-zoomer-origins.ts 2025-04-15 00:37:50 +00:00
reanon 94e2c907b5 Update file block-zoomer-origins.ts 2025-04-15 00:36:16 +00:00
reanon af53fc9913 pricing 2025-04-15 00:26:08 +00:00
reanon e6cc393296 Update file block-zoomer-origins.ts 2025-04-14 23:14:07 +00:00
reanon a9811c2886 ... dumbo 2025-04-14 21:44:37 +00:00
reanon 64e07a0429 4.1 maybe 2025-04-14 21:07:38 +00:00
reanon 83676caa8b Edit xai.ts 2025-04-12 07:19:18 +00:00
reanon a76f8a3c87 Edit checker.ts 2025-04-11 10:09:13 +00:00
reanon ecae252df4 Edit checker.ts 2025-04-10 00:04:15 +00:00
reanon d951989a57 Edit xai.ts 2025-04-10 00:03:20 +00:00
reanon 9deafb445b Edit checker.ts 2025-04-08 00:27:07 +00:00
reanon ee1d8ab1a2 Revert "remove hardcoded model list"
This reverts commit c2bfcdc744
2025-04-07 23:47:47 +00:00
reanon c2bfcdc744 remove hardcoded model list 2025-04-07 23:37:43 +00:00
reanon 24b6a090d8 ... dude 2025-04-06 22:35:38 +00:00
reanon 758ccbf23b Edit README.md 2025-04-06 19:33:37 +00:00
reanon 4ad3c217a4 Update file google-ai.ts 2025-04-06 14:11:17 +00:00
reanon ab1fb89ab9 Update file key-pool.ts 2025-04-04 03:25:50 +00:00
reanon ac79935205 Update file checker.ts 2025-04-04 03:03:35 +00:00
reanon 2b7c901951 Update 3 files
- /src/shared/key-management/xai/checker.ts
- /src/shared/key-management/key-pool.ts
- /src/service-info.ts
2025-04-04 02:48:25 +00:00
reanon ad13928383 grok not xai 2025-04-04 01:32:44 +00:00
SternAnon a3869c2d67 Added gemini-2.5 2025-03-26 02:02:24 +00:00
SternAnon 6ebc2f5126 Revert "Added gemma-3-27b-it"
This reverts commit d551f86020
2025-03-14 20:22:55 +00:00
SternAnon d551f86020 Added gemma-3-27b-it 2025-03-14 19:56:58 +00:00
SternAnon 7cfaf5777e Update file checker.ts 2025-03-06 16:53:32 +00:00
SternAnon 4f6ef38222 fix xai 2 2025-03-06 16:36:36 +00:00
SternAnon d21b232a8e fixes xai 2025-03-06 16:33:12 +00:00
SternAnon 72c9516679 Update 13 files
- /src/config.ts
- /src/info-page.ts
- /src/proxy/xai.ts
- /src/proxy/middleware/request/mutators/add-key.ts
- /src/proxy/middleware/request/preprocessors/validate-context-size.ts
- /src/proxy/middleware/response/index.ts
- /src/proxy/routes.ts
- /src/service-info.ts
- /src/shared/key-management/xai/checker.ts
- /src/shared/key-management/xai/provider.ts
- /src/shared/key-management/index.ts
- /src/shared/key-management/key-pool.ts
- /src/shared/models.ts
2025-03-06 16:25:48 +00:00
user fcaad65ccb Slop AI code to unify Anthropic model list and give Anthropic-style IDs for AWS /v1/models requests, needed for e.g. big-AGI 2025-02-27 20:25:30 +00:00
user b3d4650275 Initial GPT 4.5 bringup, separate model family due to extreme price 2025-02-27 20:25:30 +00:00
user 70c7f2aae9 aws sign fix for 3.7 2025-02-25 20:59:49 +00:00
user aecc934fad untested 3.7 sonnet, treating it like another 3.5 sonnet model 2025-02-24 18:38:56 +00:00
user a8d36f832e Check tool_result images for vision 2025-02-12 14:09:22 +00:00
user c1db122016 Simplify model reassignment in GCP 2025-02-12 13:27:15 +00:00
user e9bd6127a4 merge 2025-02-12 13:27:15 +00:00
user e230e9acec Remove 3.5 Sonnet v1 from GCP checking 2025-02-12 13:27:15 +00:00
penurin 239f95e8a1 Merge branch 'patch-1' into 'main'
Fix anthropic content schema

See merge request penurin/oai-reverse-proxy!1
2025-02-12 13:18:21 +00:00
W92k6zuinOCClyWS 17475447a0 Fix anthropic content schema (penurin/oai-reverse-proxy!1) 2025-02-12 13:18:21 +00:00
user d2b37b8455 Fix Gemini key checking: old code didn't properly check for the error message. Swapped the check to use 2.0 Flash because it catches more 429 keys 2025-02-05 17:11:27 +00:00
user cec66cdc44 Newer Gemini 2.0 models in the list 2025-02-05 17:11:27 +00:00
user a5c9e95929 Add all o models to the OpenAI model list 2025-01-31 20:22:06 +00:00
user c5d4fe44e6 Fix for the timeout workaround for o1 2025-01-31 20:22:06 +00:00
user 8ed883eaff o3 mini 2025-01-31 20:22:06 +00:00
user 6de338c6ac Properly separate deepseek keys from the generic ones in service info 2025-01-25 11:06:38 +00:00
user 45576db441 [Deepseek] Properly handle over-quota keys 2025-01-25 11:59:41 +00:00
user bcc83f30d9 Properly count DS reasoning tokens and properly save them per-user 2025-01-25 11:06:38 +00:00
user e5a26215e1 Add native Gemini model list endpoint 2025-01-24 08:34:12 +00:00
user cd6cc76a46 Attempt to improve the o1 timeout hack 2025-01-22 15:23:17 +00:00
faggot 613bb789fb fix error checking o1 deployments 2025-01-22 09:59:11 +00:00
user f1c698388e [Gemini] Support the new thinking config for 2.0 Flash Thinking 2025-01-22 09:59:11 +00:00
user 75605a2bfb Add preliminary deepseek-reasoner support 2025-01-20 07:28:30 +00:00
user 58e67d40e2 Check logging for AWS keys (untested) 2025-01-20 11:28:53 +00:00
user 796b4eee47 Make the OpenAI checker properly clone orgs again, and fix the error with the liveness check 2025-01-06 06:55:41 +00:00
user 0f482e67d2 Fix OpenAI -> Google AI conversion 2025-01-05 14:02:17 +00:00
user 496ec09905 Add v1alpha support (needed for 2.0 flash thinking with the new 'thought' parameter), already used by ST 2025-01-04 19:11:02 +00:00
user f522dba6a3 Fix errors with o1-preview and o1-mini 2025-01-03 04:41:29 +00:00
user 25ba8447d9 And add display names as well 2025-01-03 04:41:29 +00:00
user 91b8c01a9d Do the same for the AWS endpoint 2025-01-03 04:41:29 +00:00
user 82b88764ba Fix Anthropic model list to be actually compatible with Anthropic API (required by some frontends), remove old models 2025-01-03 04:50:55 +00:00
user 6ea9235ff8 Actually camelCase is canon for Gemini, oops 2024-12-31 08:23:45 +00:00
user 372ad85283 Support camelCase Gemini params and validate vision 2024-12-31 10:16:04 +00:00
user c2f5d2fbf3 Add /v1/models to deepseek 2024-12-31 08:23:45 +00:00
user c264413495 Leave a comment about concurrency for "special" user tokens 2024-12-31 08:23:45 +00:00
user 8d27082ad0 Fix formatting changes with upstream 2024-12-31 08:23:45 +00:00
user e2b602fd52 Adjust chunked transfer to send 4KB (CF's buffer size) of data every 49 seconds 2024-12-31 08:23:45 +00:00
user b00fb88cab Don't overwrite the reasoning effort by default 2024-12-31 07:45:11 +00:00
user 1cc281f6fe Add automatic prefill for Deepseek - works the same way as with Claude 2024-12-31 07:45:11 +00:00
user 8f4d00ed26 Init commit, some things:
- 'Transfer-Encoding: chunked' for o1 requests to prevent CF's 100 second limit
- Better tool/function call support
- Deepseek support
- Handling system as an array for AWS Claude
- Image support for Gemini
- Better o1 support (reasoning effort, developer role, context size)
2024-12-31 00:00:00 +00:00
nai-degen 36e2430a8f adjusts gemini keychecker to trigger real generation for better rate limit detection 2024-12-07 01:31:10 -06:00
nai-degen 28447d0811 resolves server-side error when a Gemini prompt is blocked due to safety 2024-12-07 00:54:13 -06:00
nai-degen 6d54cbc785 maybe handles gemini daily key block idk 2024-12-05 15:06:29 -06:00
nai-degen 9d7a4f4b51 maybe fixes gemini's fucked error messages idk 2024-12-05 14:54:35 -06:00
nai-degen 3496a2a9bd fixes incorrect 3.5 sonnet v2 model id in gcp model reassignment 2024-12-03 19:53:56 -06:00
nai-degen 5072638ec2 attempt at fixing persistent 'invalid csrf token' error on some browsers 2024-12-03 19:49:47 -06:00
Nopm 8a325a1e0b Add 2 million Google AI context (khanon/oai-reverse-proxy!82) 2024-12-04 01:27:21 +00:00
khanon 5eeb2875b4 adds haiku 3.5 2024-11-04 18:20:19 +00:00
khanon c67dad1617 fixes AWS claude v2 2024-10-25 18:10:26 +00:00
khanon fe61745e24 fixes issue with AWS model assignment when requesting legacy claude (claude-2.1, etc) 2024-10-25 17:49:24 +00:00
nai-degen 251ea6d412 fixes typo in AWS ThrottlingException 2024-10-23 15:36:21 -05:00
nai-degen 55f7337ea4 adjusts AWS keychecker to treat rate limited models as available models 2024-10-23 15:35:32 -05:00
nai-degen f3b876887e fixes issue with AWS model name reassignment 2024-10-23 13:59:10 -05:00
nai-degen 49c578f4dc adds Sonnet 3.5v2 AWS model ID and adjusts AWS model assignment to raise error on no match 2024-10-23 13:39:34 -05:00
khanon 4190d5fef6 fixes missing comma....... 2024-10-22 16:06:39 +00:00
khanon 1644e82f25 adds Sonnet 20241022 snapshot IDs 2024-10-22 15:52:52 +00:00
nai-degen 0bbdc0b841 fixes google ai language filter and updates readme 2024-10-18 23:15:02 -05:00
nai-degen c4a633a5d6 fixes gcp oauth2 token refresh not updating cloned key instance 2024-10-18 22:41:24 -05:00
nai-degen 0c6ec3254f finally DOES something about broken GCP streaming, boebeitfully 2024-10-12 20:10:59 -05:00
nai-degen 13aa55cd3d handles gemini ai test message from sillytavern 2024-10-12 09:01:08 -05:00
nai-degen ba4532b38d more fixes for annoying gemini API design that allows arrays or single objects for contents parts 2024-10-09 17:11:53 -05:00
nai-degen b57627e69b adds stripHeaders to global mutators in createQueuedProxyMiddleware 2024-10-09 16:59:12 -05:00
nai-degen 536803853a uses removeHeader instead of setHeader to empty string 2024-10-09 16:44:53 -05:00
nai-degen ad0a3c0936 removes cors/sec-fetch headers to fix venus chub with anthropic api 2024-10-09 16:33:54 -05:00
nai-degen 161f5aba3e handles sillytavern using both camel and snake-cased parameters for gemini api 2024-10-06 11:03:48 -05:00
nai-degen 514d1b7e31 fixes azure 2024-10-01 16:15:04 -05:00
nai-degen 22d7f966c6 fixes for gemini api streaming 2024-09-29 12:44:18 -05:00
nai-degen cfb6353c65 updates google ai safety settings schema 2024-09-25 21:19:17 -05:00
nai-degen a7fed3136e fixes google ai gemini 2024-09-25 15:58:52 -05:00
nai-degen 29638cf26e minor cleanup to pow challenge 2024-09-22 11:28:33 -05:00
nai-degen ee26e7be65 various improvements and fixes to PoW challenge UI and token refresh 2024-09-22 11:11:30 -05:00
nai-degen ff0d3dfdcd prevents overwriting anthropic-version header if it's already provided 2024-09-19 00:55:17 -05:00
nai-degen 81a3ae1746 maybe fixes missing anthropic version header in some cases 2024-09-19 00:50:17 -05:00
nai-degen 4dfd57fcb4 updates render dockerfile to correctly copy patches dir into build context 2024-09-16 23:39:43 -05:00
khanon d21e274358 Add configurable network interface or SOCKS/HTTP proxy for outgoing requests (khanon/oai-reverse-proxy!80) 2024-09-16 15:17:57 +00:00
nai-degen 6e97e036b2 fixes refreshed PoW tokens not actually being reactivated 2024-09-15 18:01:23 -05:00
nai-degen 7a4a16dd2f fixes chatgpt-latest missing from models endpoint 2024-09-15 06:02:35 -05:00
nai-degen f1cfa644c5 maybe fixes openai sk-svcacct keys 2024-09-13 00:55:29 -05:00
nai-degen 6a908b09cb adds preliminary openai o1 support and some improvements to openai keychecker 2024-09-12 23:03:33 -05:00
nai-degen 86772ab32a adds 503 as a 'successful' AWS keychecker response to deal with temporary outages 2024-09-11 02:42:59 -05:00
honeytree bd87ca60f7 Implement priority queue by tokens (khanon/oai-reverse-proxy!79) 2024-09-09 16:48:46 +00:00
nai-degen ac1897fd17 returns more clear proxy_note hint on AWS 503 error 2024-09-09 09:56:18 -05:00
nai-degen 2a6f85e2e2 Revert "handles AWS HTTP 503 ServiceUnavailableException similarly to 429s"
This reverts commit ffcaa23511.
2024-09-09 09:43:59 -05:00
nai-degen ffcaa23511 handles AWS HTTP 503 ServiceUnavailableException similarly to 429s 2024-09-09 08:07:53 -05:00
nai-degen 1d5b8efa23 reduces key lockout period to more quickly drain queue after AWS rate limit resolves 2024-09-08 17:17:22 -05:00
nai-degen 905273abf2 fixes aws mistral token cost estimation 2024-09-08 17:15:59 -05:00
nai-degen ac92a19946 improves reliability of inference profile detection for AWS keychecker 2024-09-07 17:36:29 -05:00
khanon 96fe974ad0 Use AWS Inference Profiles for higher rate limits (khanon/oai-reverse-proxy!78) 2024-09-01 22:55:07 +00:00
nai-degen 578615fbd2 fixes typo in new Claude system prompt schema 2024-08-30 10:23:57 -05:00
nai-degen 5dc4050e52 disable periodic GCP key rechecks to workaround keychecker bug 2024-08-29 15:25:37 -05:00
nai-degen cf615ee62c applies prettier to GCP checker 2024-08-29 15:15:56 -05:00
nai-degen ee61f9be2b removes unnecessary log from last commit 2024-08-27 23:58:32 -05:00
nai-degen 0c448cb59d fixes azure dalle using wrong rate limit and out-of-spec Retry-After header 2024-08-27 23:53:28 -05:00
nai-degen 51a9ccceb2 supports alternate claude system prompt format 2024-08-27 23:27:20 -05:00
nai-degen ce490efd7d minor adjustments to HMAC signing 2024-08-22 19:54:02 -05:00
nai-degen 5000e59a61 fix for google makersuite prompt validation/transformation 2024-08-22 14:19:48 -05:00
nai-degen d54acad6ad adds support for sonnet 8192 output tokens on anthropic api 2024-08-15 11:55:13 -05:00
nai-degen 5e1fffe07d adds chatgpt-4o-latest 2024-08-15 11:54:42 -05:00
nai-degen f7fd5f00f2 fixes esponse_format schema for mistral la plateforme 2024-08-14 14:41:47 -05:00
nai-degen 6d323f6ea1 do not transform mistral chat prompts to text when using la plateforme 2024-08-14 12:26:27 -05:00
nai-degen 2959ed3f7f fixes aws keychecker not detecting claude 2.1 2024-08-14 10:49:02 -05:00
nai-degen b58e7cb830 always applies Mistral prompt fixes on messages input 2024-08-14 10:48:55 -05:00
khanon f531272b00 Refactor AWS service code and add AWS Mistral support (khanon/oai-reverse-proxy!75) 2024-08-14 04:40:41 +00:00
nai-degen 6c45c92ea0 updates dependencies 2024-08-12 19:10:15 -05:00
nai-degen b7cd326d2a handles 'invalid subscription' 403 errors from Mistral API 2024-08-07 14:14:53 -05:00
nai-degen 6c9f302fb9 minor gultra fix 2024-08-06 18:46:49 -05:00
nai-degen 9ab1e7d0ce adds new gpt4o id 2024-08-06 13:08:25 -05:00
nai-degen 81f8dc2613 updates README.md 2024-08-05 11:33:16 -05:00
khanon 0c936e97fe Merge GCP Vertex AI implementation from cg-dot/oai-reverse-proxy (khanon/oai-reverse-proxy!72) 2024-08-05 14:27:51 +00:00
nai-degen 29ed07492e fixes info page display for gemini flash/ultra 2024-08-03 22:18:05 -05:00
nai-degen 2f7315379c adds gemini/makersuite keychecker, native endpoint, and streaming fixes 2024-08-03 21:53:32 -05:00
nai-degen e91532f4f7 handle dead makersuite keys triggering 400 error instead of 401/403 2024-08-03 19:09:50 -05:00
nai-degen ca58770458 fixes issue with PROXY_KEY when used together with proof-of-work captcha 2024-07-29 19:41:57 -05:00
nai-degen 9a3cca6b80 adds new mistral models and updates older model lists/context limits 2024-07-28 13:15:03 -05:00
nai-degen 584bb3fbc7 addresses minor issue with quota refresh UI 2024-07-28 11:54:38 -05:00
nai-degen 2aa19e5b09 adds user-specific overrides for daily quota refresh 2024-07-27 14:25:53 -05:00
nai-degen f242777596 fixes token index used as msg idx in anthropic chat-to-openai SSE transformer 2024-07-07 13:33:33 -05:00
nai-degen edc0d094e2 tries to disable quarantined aws keys 2024-06-30 05:08:27 -05:00
nai-degen 994b30dcce adjusts gemini pro model assignment 2024-06-26 13:37:23 -05:00
nai-degen e3d1ab51d1 improves handling of AWS regions with Sonnet 3.5 enabled but Sonnet 3.0 disabled 2024-06-20 12:20:38 -05:00
nai-degen ff38eda066 improves model detection for AWS Sydney region 2024-06-20 12:19:44 -05:00
nai-degen 84b917f726 fixes AWS Sonnet 3.5 key assignment bug 2024-06-20 12:00:11 -05:00
nai-degen 5871025245 fixes AWS keychecker failure caused by Sonnet 3.5 gradual rollout 2024-06-20 11:24:47 -05:00
nai-degen b4fb97ca5c fixes model id typo 2024-06-20 10:42:48 -05:00
nai-degen eb700d3da6 adds untested claude 3.5 model ids and model assignment 2024-06-20 10:34:48 -05:00
nai-degen d706d4c59d adds USER_CONCURRENCY_LIMIT environment variable 2024-06-14 22:52:16 -05:00
nai-degen 0ea43f61c2 fixes incorrect variable name in .env.example docs 2024-06-09 11:36:20 -05:00
nai-degen ca4321b4cb adjusts openai schema validation to allow
ull stop sequence
2024-06-07 14:29:18 -05:00
nai-degen 7660ed8b94 allows enabling vision prompts on a per-service basis 2024-06-07 12:09:43 -05:00
nai-degen 55f1bbed3b adds ipv6 mask to default ADMIN_WHITELIST 2024-06-02 20:49:18 -05:00
nai-degen 57fd17ede0 makes it easier for clients to detect proxy errors programatically 2024-05-27 15:30:28 -05:00
nai-degen 9d00b8a9de adjusts max IP error message wording 2024-05-27 08:24:56 -05:00
nai-degen 155e185c6e fixes shutdown handler fuckup 2024-05-26 15:36:54 -05:00
nai-degen a59b6555e7 redacts de3u api-key from diagnostic logs 2024-05-26 15:13:21 -05:00
scrappyanon 2d82e55d72 Sqlite backend with user event logging (khanon/oai-reverse-proxy!69) 2024-05-26 17:31:12 +00:00
nai-degen 6352df5d5a fixes mixed ipv4-ipv6 handling in cidr module 2024-05-24 02:55:11 -05:00
nai-degen 7d517a4c5f fixes Refresh Token UI incorrectly discarding expired (but refreshable) temp tokens 2024-05-22 22:18:23 -05:00
nai-degen 0418951928 tries to provide better guidance on CSRF errors 2024-05-21 13:10:54 -05:00
nai-degen 3012aa651e adds slightly less-ugly global stylesheet; improves mobile compat 2024-05-21 12:56:25 -05:00
nai-degen 1b68ad7c6f docs update 2024-05-21 12:46:51 -05:00
nai-degen 68b48428de adjusts gatekeeper module to send auth errors as fake chat completions 2024-05-21 12:44:43 -05:00
nai-degen b76db652e0 adds configurable PoW timeout and iteration count 2024-05-21 12:38:41 -05:00
nai-degen 63ab1a7685 reverts debug change that broke info page 2024-05-20 07:47:46 -05:00
nai-degen a3462e21bc adds config setting for PoW verification timeout 2024-05-19 15:17:25 -05:00
nai-degen 8d2ed23522 fixes inverted refreshtoken logic 2024-05-19 12:35:15 -05:00
khanon 205ffa69ce Temporary usertokens via proof-of-work challenge (khanon/oai-reverse-proxy!68) 2024-05-19 16:31:56 +00:00
nai-degen 930bac0072 bumps ejs package version 2024-05-17 21:46:27 -05:00
nai-degen 3ad826851c adds proper GPT4o model family for separate cost/quota tracking 2024-05-14 13:51:19 -05:00
nai-degen 6dabc82bcf adds preliminary gpt4o 2024-05-13 12:43:39 -05:00
nai-degen d3e7ef3c14 prevents leaking headers to upstream API when serving via Tailscale 2024-05-01 11:26:15 -05:00
nai-degen b1062dc9b3 minor adjustments to jsonl log backend to reduce filesize 2024-04-26 15:06:12 -05:00
nai-degen 32b623d6bc partial googleai fixes; adds jsonl file backend for promptlogger stolen from fiz 2024-04-23 03:43:38 -05:00
nai-degen 0a27345c29 upgrades firebase-admin from 11.10.1 to 12.1.0 2024-04-22 12:36:41 -05:00
nai-degen c15f07c0d8 adds OpenAI-to-AWS Claude3 compat endpoint 2024-04-17 21:23:30 -05:00
nai-degen db28e90c51 adds proper Opus model check to aws claude keychecker 2024-04-17 21:09:00 -05:00
nai-degen c0cd2c7549 adds aws opus maybe, idk cannot test 2024-04-16 11:33:44 -05:00
nai-degen 9445110727 adds gpt-4-turbo stable 2024-04-09 16:31:42 -05:00
nai-degen 34a673a80a adds option to disable multimodal prompts 2024-03-23 14:30:14 -05:00
nai-degen 8cb960e174 fixes incorrect model assignment when requesting Haiku from AWS 2024-03-21 23:21:27 -05:00
nai-degen 32fea30c91 handles Anthropic keys which cannot support multimodal requests 2024-03-20 00:03:10 -05:00
nai-degen 3f9fd25004 exempt 'special' token type from context size limits 2024-03-19 11:14:51 -05:00
nai-degen e068edcf48 adds Anthropic key tier detection and trial key display 2024-03-18 15:20:34 -05:00
nai-degen 2098948b7a reduces Anthropic keychecker frequency 2024-03-18 15:19:41 -05:00
nai-degen 7705ee58a0 minor cleanup of error-generator.ts 2024-03-18 15:18:18 -05:00
nai-degen 7c64d9209e minor refactoring of response middleware handlers 2024-03-17 22:20:39 -05:00
nai-degen 59107af3d6 minor fixes for google sheets backend for anthropic-chat 2024-03-17 12:08:11 -05:00
nai-degen 435280fa04 fixes missing system prompt on AWS anthropic-chat schema 2024-03-16 16:00:59 -05:00
nai-degen d9117bf08e fixes AWS debug log 2024-03-14 21:34:07 -05:00
nai-degen 57d9791270 fixes uncounted tokens when Response stream is prematurely closed 2024-03-14 21:32:20 -05:00
nai-degen 367ac3d075 adds ?debug=true query param to have proxy respond with transformed prompt 2024-03-14 08:16:38 -05:00
nai-degen 276a1a1d44 small fix for recurring AWS logging check 2024-03-13 20:53:21 -05:00
nai-degen 6cf029112e adds Anthropic's SOTA Haiku model; misc code cleanup 2024-03-13 20:48:05 -05:00
nai-degen 4b86802eb2 adds separate model detection for gpt-4-32k-0314 2024-03-10 19:16:11 -05:00
nai-degen 7f431de98e sets cache-control on static user images 2024-03-10 15:50:40 -05:00
nai-degen e0bf10626e removes .reverse() from image history to avoid thumbnails shifting as users browse 2024-03-10 15:12:20 -05:00
nai-degen eb55f30414 adds input prompt to imagehistory 2024-03-10 15:08:44 -05:00
nai-degen e1fb53b461 pretty-prints dall-e image metadata JSON download 2024-03-10 15:04:44 -05:00
nai-degen 7610369c6d adds dall-e full history page and metadata downloader 2024-03-10 14:53:11 -05:00
nai-degen 37f17ded60 removes OpenAI max_tokens default as that isn't aligned with the real API 2024-03-10 12:32:15 -05:00
nai-degen 96b6ea9568 adds azure-image endpoint to service info; hides unavailable endpoints 2024-03-09 13:25:50 -06:00
nai-degen cec39328a2 adds azure dall-e support 2024-03-09 13:03:50 -06:00
nai-degen cab346787c fixes regression in anthropic text > anthropic chat api translation 2024-03-08 21:16:25 -06:00
nai-degen fab404b232 refactors api transformers and adds oai->anthropic chat api translation 2024-03-08 20:59:19 -06:00
nai-degen 8d84f289b2 fixes issue with mistral-large model family not being detected 2024-03-08 17:07:25 -06:00
nai-degen 9ce10b4f6a shows more helpful errors when users' prefills are invalid during AWS streaming 2024-03-07 13:28:23 -06:00
nai-degen 96756d32f3 fixes handling of DALL-E content_policy_violation errors 2024-03-07 12:56:35 -06:00
nai-degen 1fb3eac154 maybe shows clearer AWS ValidationExceptions when users have bad prefills 2024-03-06 05:12:47 -06:00
nai-degen 8f46bd4397 handles 'this organization is disabled' error from anthropic 2024-03-06 00:42:10 -06:00
nai-degen ddf34685df adds Claude 3 Vision support 2024-03-05 18:34:10 -06:00
nai-degen ea3aae5da6 allows selecting compat model via endpoint name and makes errors less confusing 2024-03-05 05:13:22 -06:00
nai-degen 055d650c5d fixes legacy compat endpoint 2024-03-05 01:38:39 -06:00
nai-degen 2643dfea61 improves aws sonnet key detection and no keys available error messaging 2024-03-05 01:04:08 -06:00
nai-degen 434445797a fixes bad handleCompatibilityRequest middleware fallthrough 2024-03-04 23:53:13 -06:00
nai-degen 03c5c473e1 improves error handling for sillytavern 2024-03-04 22:59:32 -06:00
nai-degen 068e7a834f fixes AWS legacy models for non-streaming requests 2024-03-04 21:22:43 -06:00
nai-degen 736803ad92 enables opus by default 2024-03-04 21:11:32 -06:00
nai-degen 6b22d17c50 fixes claude-opus token usage being attributed to regular claude 2024-03-04 17:03:02 -06:00
nai-degen 51ffca480a adds AWS Claude Chat Completions and Claude 3 Sonnet support 2024-03-04 16:25:06 -06:00
nai-degen 802d847cc6 enables Claude opus by default 2024-03-04 16:21:40 -06:00
nai-degen 90ddcac55b makes claude3 compat model customizable via environment variable 2024-03-04 14:21:55 -06:00
nai-degen 36923686f6 shows claude-opus key count on service info page 2024-03-04 14:12:38 -06:00
nai-degen 1edc93dc72 adds claude-opus model family 2024-03-04 14:08:59 -06:00
nai-degen f6c124c1d3 fixes issue with preamble-required claude keys and anthropic chat 2024-03-04 14:00:25 -06:00
nai-degen 90a053d0e0 detects and removes over-quota claude keys from keypool 2024-03-04 13:42:29 -06:00
khanon db318ec237 Implement Anthropic Chat Completions endpoint and Claude 3 (khanon/oai-reverse-proxy!64) 2024-03-04 19:06:46 +00:00
nai-degen b90abbda88 spoofs response for SillyTavern test messages 2024-02-28 15:57:18 -06:00
nai-degen 93cee1db9b removes claude v1 from AWS keychecker as it has been retired 2024-02-27 15:52:09 -06:00
nai-degen bd15728743 uses explicitly set keyprovider rather than inferring via requested model 2024-02-27 10:56:50 -06:00
nai-degen 627559b729 updates mistral modelids 2024-02-26 23:55:03 -06:00
nai-degen 428e103323 allows customizing the /proxy endpoint prefix 2024-02-26 18:20:34 -06:00
nai-degen fd742fc0cb Merge remote-tracking branch 'origin/main' 2024-02-26 18:12:23 -06:00
nai-degen 5e19e2756a adds mistral-large model family, untested 2024-02-26 18:12:08 -06:00
devvnull d3f7c675e3 add pricing for Azure GPT counterparts and update Claude pricing (khanon/oai-reverse-proxy!65) 2024-02-20 03:53:26 +00:00
nai-degen 59bda40bbc handles google streaming json response format variation 2024-02-19 00:12:09 -06:00
nai-degen 68d829bceb adds Claude over-quota detection 2024-02-17 15:56:22 -06:00
nai-degen 9c03290a3d detects anthropic copyright prefill pozzing 2024-02-16 10:22:45 -06:00
nai-degen 3498584a1f removes forceModel on Google AI endpoint 2024-02-15 11:41:34 -06:00
nai-degen 21d61da62b increases max image payload size for gpt4v 2024-02-12 21:59:48 -06:00
nai-degen 35dc0f4826 fixes 'Premature close' caused by fucked up AWS unmarshaller errors 2024-02-10 14:47:14 -06:00
nai-degen a2ae9f32db handles OpenAI organization check failures due to missing API scopes 2024-02-09 10:10:22 -06:00
devvnull 0ce4582f3b Improve "\n\nHuman" prefix requirement detection for Anthropic (khanon/oai-reverse-proxy!63) 2024-02-08 16:28:11 +00:00
nai-degen bbee056114 fixes Force Key Recheck admin function for azure/aws 2024-02-07 19:54:40 -06:00
nai-degen ecc804887b uses EventStreamMarshaller from AWS SDK to hopefully handle split messages 2024-02-05 19:56:41 -06:00
nai-degen a8fd3c7240 fixes AWS Claude throttlingException handling 2024-02-04 20:48:20 -06:00
nai-degen 40240601f5 refactors SSEStreamAdapter to fix leaking decoder streams 2024-02-04 18:38:06 -06:00
nai-degen 98cea2da02 replaces eventstream lib to (hopefully) fix interrupted AWS streams 2024-02-04 17:18:28 -06:00
nai-degen c88f47d0ed fixes middleware order breaking /proxy endpoint 2024-02-04 16:21:44 -06:00
nai-degen 43106d9c7f tracks Risu userid rather than IP address on usertokens 2024-02-04 14:14:36 -06:00
nai-degen fe429a7610 adds SERVICE_INFO_PASSWORD to gate infopage behind a password 2024-02-04 14:04:46 -06:00
nai-degen 235510e588 fixes incorrect AWS Claude 2.1 max context limit 2024-02-01 20:40:15 -06:00
nai-degen 7eb6eb90ad moves api schema validators from transform-outbound-payload into shared 2024-01-29 19:38:22 -06:00
nai-degen 924db33f7e attempts to auto-convert Mistral prompts for its more strict rules 2024-01-28 17:42:23 -06:00
nai-degen 3f2f30e605 updates gpt4-v tokenizer for previous Risu change 2024-01-27 13:35:46 -06:00
nai-degen c9791acd85 makes gpt4-v input validation less strict to accomodate Risu 2024-01-27 13:24:11 -06:00
nai-degen e871b8ecf1 removes logprobs default value since it breaks gpt-4-vision 2024-01-27 12:19:24 -06:00
nai-degen 37ca98ad30 adds dark mode (infopage only currently) 2024-01-25 16:24:11 -06:00
nai-degen e6dc4475e6 fixes max context size for nu-gpt4-turbo 2024-01-25 14:07:42 -06:00
nai-degen 5e646b1c86 adds gpt-4-0125-preview and gpt-4-turbo-preview alias 2024-01-25 13:27:03 -06:00
nai-degen 6f626e623e fixes OAI trial keys bricking the dall-e queue 2024-01-25 01:47:51 -06:00
nai-degen 02a54bf4e3 fixes azure openai logprobs (actually tested this time) 2024-01-25 01:17:18 -06:00
nai-degen 79b2e5b6fd adds very basic support for OpenAI function calling 2024-01-24 16:42:26 -06:00
nai-degen 935a633325 fixes typo in Azure logprob adjustment 2024-01-24 16:03:47 -06:00
nai-degen 4a4b60ebcd handles Azure deviation from OpenAI spec on logprobs param 2024-01-24 16:01:19 -06:00
nai-degen ad465be363 fixes logprobs schema validation for turbo instruct endpoint 2024-01-24 14:31:10 -06:00
nai-degen c7a351baa8 adds support for requesting logprobs from OpenAI Chat Completions API 2024-01-24 11:46:09 -06:00
nai-degen ba8b052b17 adds bindAddress to omitted config keys 2024-01-18 04:14:15 -06:00
nai-degen e813cd9d22 default claude 2.1 instead of 1.3 in openai compat endpoint since 1.3 is not accessible on all keys 2024-01-18 04:14:15 -06:00
nai-degen 4c2a2c1e6c improves handle-streamed-response comments/docs [skip-ci] 2024-01-18 04:14:15 -06:00
nai-degen f1d927fa62 updates README with building/forking info [skip-ci] 2024-01-15 11:46:09 -06:00
nai-degen ad6e5224e3 allows binding to loopback interface via app config instead of only docker 2024-01-15 11:32:26 -06:00
nai-degen 85d89bdb9f fixes CI image tagging on main branch 2024-01-15 01:37:50 -06:00
khanon f5e7195cc9 Add Gitlab CI and self-hosting instructions (khanon/oai-reverse-proxy!61) 2024-01-15 06:51:12 +00:00
nai-degen 81f1e2bc37 fixes broken GET models endpoint for openai/mistral 2024-01-14 05:33:24 -06:00
nai-degen c2a686f229 Revert "reduces max request body size for now"
This reverts commit 4ffa7fb12b.
2024-01-13 18:12:16 -06:00
twinkletoes 96a0f94041 Fix Mistral safe_prompt schema property (khanon/oai-reverse-proxy!60) 2024-01-14 00:11:39 +00:00
nai-degen d56043616e adds keychecker workaround for OpenAI API bug falsely returning gpt4-32k 2024-01-12 10:33:48 -06:00
nai-degen e3e06b065d fixes sourcemap dependency in package.json 2024-01-09 00:32:34 -06:00
nai-degen 1bbb515200 updates static service info 2024-01-08 23:32:25 -06:00
nai-degen a57cc4e8d4 updates dotenv 2024-01-08 23:25:02 -06:00
nai-degen 2239bead2c updates README.md 2024-01-08 19:36:35 -06:00
nai-degen 1a585ddd32 adds TRUSTED_PROXIES to .env.example 2024-01-08 16:41:30 -06:00
nai-degen be731691a1 allows configurable trust proxy setting for Render deployments 2024-01-08 16:39:28 -06:00
nai-degen c2e442e030 long overdue removal of tired in-joke 2024-01-08 11:01:44 -06:00
nai-degen d3ac3b362b trusts only one proxy hop (AWS WAF in huggingface's case) 2024-01-07 19:18:01 -06:00
210 changed files with 23679 additions and 5072 deletions
+122 -39
View File
@@ -5,12 +5,38 @@
# All values have reasonable defaults, so you only need to change the ones you
# want to override.
# Use production mode unless you are developing locally.
NODE_ENV=production
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# General settings:
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# URL for the image displayed on the login page.
# If not set, no image will be displayed.
# LOGIN_IMAGE_URL=https://example.com/your-logo.png
# Whether to enable the token-based or password-based login for the main info page.
# Defaults to true. Set to false to disable login and make the info page public.
# ENABLE_INFO_PAGE_LOGIN=true
# Authentication mode for the service info page. (token | password)
# If 'token', any valid user token is used (requires GATEKEEPER='user_token' mode).
# If 'password', SERVICE_INFO_PASSWORD is used.
# Defaults to 'token' if ENABLE_INFO_PAGE_LOGIN is true.
# SERVICE_INFO_AUTH_MODE=token
# Password for the service info page if SERVICE_INFO_AUTH_MODE is 'password'.
# SERVICE_INFO_PASSWORD=your-service-info-password
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
@@ -18,53 +44,90 @@
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=16384
# MAX_CONTEXT_TOKENS_OPENAI=32768
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
# MAX_OUTPUT_TOKENS_OPENAI=1024
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode.
# Disabled by default in local development mode, but enabled in production.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | gemini-pro | mistral-tiny | mistral-small | mistral-medium | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo
# By default, all models are allowed except for 'dall-e'. To allow DALL-E image
# generation, uncomment the line below and add 'dall-e' to the list.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,gemini-pro,mistral-tiny,mistral-small,mistral-medium,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
# | mistral-small | mistral-medium | mistral-large | aws-claude |
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
# | azure-gpt45 | azure-o1-mini | azure-o3-mini | deepseek | xai | o3 | o4-mini | gpt41 | gpt41-mini | gpt41-nano
# By default, all models are allowed
# To dissalow any, uncomment line below and edit
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt45,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o,azure-gpt45,azure-o1-mini,azure-o3-mini,deepseek
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai | xai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
# ALLOWED_VISION_SERVICES=
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# Whether cookies should be set without the Secure flag, for hosts that don't
# support SSL. True by default in development, false in production.
# USE_INSECURE_COOKIES=false
# Reorganizes requests in the queue according to their token count, placing
# larger prompts further back. The penalty is determined by (promptTokens *
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
# When there is no queue or it is very short, the effect is negligible (this
# setting only reorders the queue, it does not artificially delay requests).
# TOKENS_PUNISHMENT_FACTOR=0.0
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# -------------------------------------------------------------------------------
# Blocking settings:
# Allows blocking requests depending on content, referers, or IP addresses.
# This is a convenience feature; if you need more robust functionality it is
# highly recommended to put this application behind nginx or Cloudflare, as they
# will have better performance.
# IP addresses or CIDR blocks from which requests will be blocked.
# IP_BLACKLIST=10.0.0.1/24
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
# Surround phrases with quotes if they contain commas.
# Avoid short or common phrases as this tests the entire prompt.
# Comma-separated list of phrases that will be rejected. Surround phrases with
# quotes if they contain commas. You can use regular expression tokens.
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# The port to listen on.
# PORT=7860
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
# USE_INSECURE_COOKIES=false
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# REJECT_MESSAGE="You can't say that here."
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
@@ -73,8 +136,11 @@
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# Which persistence method to use. (memory | firebase_rtdb | sqlite)
# GATEKEEPER_STORE=memory
# If using sqlite store, path to the SQLite database file for user data.
# Defaults to data/user-store.sqlite in the project directory.
# SQLITE_USER_STORE_PATH=data/user-store.sqlite3
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
@@ -85,43 +151,60 @@
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
# which is similar to the cost of GPT-4 Turbo.
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_DALL_E=0
# TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# -------------------------------------------------------------------------------
# HTTP agent settings:
# If you need to change how the proxy makes requests to other servers, such
# as when checking keys or forwarding users' requests to external services,
# you can configure an alternative HTTP agent. Otherwise the default OS settings
# will be used.
# The name of the network interface to use. The first external IPv4 address
# belonging to this interface will be used for outgoing requests.
# HTTP_AGENT_INTERFACE=enp0s3
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
# Note that if your proxy server issues a self-signed certificate, you may need
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
# that variable in your environment, not in this file.
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
# ------------------------------------------------------------------------------
# Secrets and keys:
# Do not put any passwords or API keys directly in this file.
# For Huggingface, set them via the Secrets section in your Space's config UI.
# For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
GCP_CREDENTIALS=project-id:client-email:region:private-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# Restrict access to the admin interface to specific IP addresses, specified
# as a comma-separated list of CIDR ranges.
# ADMIN_WHITELIST=0.0.0.0/0
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+4 -1
View File
@@ -1,8 +1,11 @@
.env
.aider*
.env*
!.env.vault
.venv
.vscode
.idea
build
greeting.md
node_modules
.windsurfrules
http-client.private.env.json
+3 -4
View File
@@ -1,11 +1,10 @@
{
"plugins": ["prettier-plugin-ejs"],
"overrides": [
{
"files": [
"*.ejs"
],
"files": "*.ejs",
"options": {
"printWidth": 160,
"printWidth": 120,
"bracketSameLine": true
}
}
+33
View File
@@ -0,0 +1,33 @@
You are a Senior Full Stack Developer and an Expert in ReactJS, NextJS, JavaScript, TypeScript, HTML, CSS and modern UI/UX frameworks (e.g., TailwindCSS, Shadcn, Radix). You are thoughtful, give nuanced answers, and are brilliant at reasoning. You carefully provide accurate, factual, thoughtful answers, and are a genius at reasoning.
- Follow the users requirements carefully & to the letter.
- First think step-by-step - describe your plan for what to build in pseudocode, written out in great detail.
- Confirm, then write code!
- Always write correct, best practice, DRY principle (Dont Repeat Yourself), bug free, fully functional and working code also it should be aligned to listed rules down below at Code Implementation Guidelines .
- Focus on easy and readability code, over being performant.
- Fully implement all requested functionality.
- Leave NO todos, placeholders or missing pieces.
- Ensure code is complete! Verify thoroughly finalised.
- Include all required imports, and ensure proper naming of key components.
- Be concise Minimize any other prose.
- If you think there might not be a correct answer, you say so.
- If you do not know the answer, say so, instead of guessing.
### Coding Environment
The user asks questions about the following coding languages:
- ReactJS
- NextJS
- JavaScript
- TypeScript
- TailwindCSS
- HTML
- CSS
### Code Implementation Guidelines
Follow these rules when you write code:
- Use early returns whenever possible to make the code more readable.
- Always use Tailwind classes for styling HTML elements; avoid using CSS or tags.
- Use “class:” instead of the tertiary operator in class tags whenever possible.
- Use descriptive variable and function/const names. Also, event functions should be named with a “handle” prefix, like “handleClick” for onClick and “handleKeyDown” for onKeyDown.
- Implement accessibility features on elements. For example, a tag should have a tabindex=“0”, aria-label, on:click, and on:keydown, and similar attributes.
- Use consts instead of functions, for example, “const toggle = () =>”. Also, define a type if possible.
+321
View File
@@ -0,0 +1,321 @@
# Project Codebase Guide
This document serves as a guide and index for the project codebase, designed to help developers and AI agents quickly understand its structure, components, and how to contribute.
## Table of Contents
1. [Project Overview](#project-overview)
2. [Directory Structure](#directory-structure)
3. [Core Components](#core-components)
* [Configuration (`src/config.ts`)](#configuration)
* [Server Entry Point (`src/server.ts`)](#server-entry-point)
* [Proxy Layer (`src/proxy/`)](#proxy-layer)
* [User Management (`src/user/`)](#user-management)
* [Admin Interface (`src/admin/`)](#admin-interface)
* [Shared Utilities (`src/shared/`)](#shared-utilities)
4. [Proxy Functionality](#proxy-functionality)
* [Routing (`src/proxy/routes.ts`)](#proxy-routing)
* [Supported Models & Providers](#supported-models--providers)
* [Middleware (`src/proxy/middleware/`)](#proxy-middleware)
* [Adding New Models](#adding-new-models)
* [Adding New APIs/Providers](#adding-new-apisproviders)
5. [Model Management](#model-management)
* [Model Family Definitions](#model-family-definitions)
* [Adding OpenAI Models](#adding-openai-models)
* [Model Mapping & Routing](#model-mapping--routing)
* [Service Information](#service-information)
* [Step-by-Step Guide for Adding a New Model](#step-by-step-guide-for-adding-a-new-model)
* [Model Patterns and Versioning](#model-patterns-and-versioning)
* [Response Format Handling](#response-format-handling)
6. [Key Management](#key-management)
* [Key Pool System](#key-pool-system)
* [Provider-Specific Key Management](#provider-specific-key-management)
* [Key Rotation and Health Checks](#key-rotation-and-health-checks)
7. [Data Management](#data-management)
* [Database (`src/shared/database/`)](#database)
* [File Storage (`src/shared/file-storage/`)](#file-storage)
8. [Authentication & Authorization](#authentication--authorization)
9. [Logging & Monitoring](#logging--monitoring)
10. [Deployment](#deployment)
11. [Contributing](#contributing)
## Project Overview
This project provides a proxy layer for various Large Language Models (LLMs) and potentially other AI APIs. It aims to offer a unified interface, manage API keys securely, handle rate limiting, usage tracking, and potentially add features like response caching or prompt modification.
## Directory Structure
```
.
├── .env.example # Example environment variables
├── .gitattributes # Git attributes
├── .gitignore # Git ignore rules
├── .husky/ # Git hooks
├── .prettierrc # Code formatting rules
├── CODEBASE_GUIDE.md # This file
├── README.md # Project README
├── data/ # Data files (e.g., SQLite DB)
├── docker/ # Docker configuration
├── docs/ # Documentation files
├── http-client.env.json # HTTP client environment
├── package-lock.json # NPM lock file
├── package.json # Project dependencies and scripts
├── patches/ # Patches for dependencies
├── public/ # Static assets served by the web server
├── render.yaml # Render deployment configuration
├── scripts/ # Utility scripts
├── src/ # Source code
│ ├── admin/ # Admin interface logic
│ ├── config.ts # Application configuration
│ ├── info-page.ts # Logic for the info page
│ ├── logger.ts # Logging setup
│ ├── proxy/ # Core proxy logic for different providers
│ ├── server.ts # Express server setup and main entry point
│ ├── service-info.ts # Service information logic
│ ├── shared/ # Shared utilities, types, and modules
│ └── user/ # User management logic
├── tsconfig.json # TypeScript configuration
```
## Core Components
### Configuration (`src/config.ts`)
* Loads environment variables and defines application settings.
* Contains configuration for database connections, API keys (placeholders/retrieval methods), logging levels, rate limits, etc.
* Uses `dotenv` and potentially a schema validation library (like Zod) to ensure required variables are present.
### Server Entry Point (`src/server.ts`)
* Initializes the Express application.
* Sets up core middleware (e.g., body parsing, CORS, logging).
* Mounts routers for different parts of the application (admin, user, proxy).
* Starts the HTTP server.
### Proxy Layer (`src/proxy/`)
* The heart of the application, handling requests to downstream AI APIs.
* Contains individual modules for each supported provider (e.g., `openai.ts`, `anthropic.ts`).
* Handles request transformation, authentication against the target API, and response handling.
* Uses middleware for common proxy tasks.
### User Management (`src/user/`)
* Handles user registration, login, session management, and potentially API key generation/management for end-users.
* Likely interacts with the database (`src/shared/database/`).
### Admin Interface (`src/admin/`)
* Provides an interface for administrators to manage users, monitor usage, configure settings, etc.
* May have its own set of routes and views.
### Shared Utilities (`src/shared/`)
* Contains reusable code across different modules.
* `api-schemas/`: Zod schemas for API request/response validation.
* `database/`: Database connection, schemas (e.g., Prisma), and query logic.
* `errors.ts`: Custom error classes.
* `key-management/`: Logic for managing API keys (if applicable).
* `models.ts`: Core data models/types used throughout the application.
* `prompt-logging/`: Logic for logging prompts and responses.
* `tokenization/`: Utilities for counting tokens.
* `utils.ts`: General utility functions.
## Proxy Functionality
### Proxy Routing (`src/proxy/routes.ts`)
* Defines the API endpoints for the proxy service (e.g., `/v1/chat/completions`).
* Maps incoming requests to the appropriate provider-specific handler based on the request path, headers, or body content (e.g., model requested).
* Applies relevant middleware (authentication, rate limiting, queuing, etc.).
### Supported Models & Providers
* **OpenAI:** Handled in `src/proxy/openai.ts`. Supports models like GPT-4, GPT-3.5-turbo, as well as o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini). Handles chat completions and potentially image generation (`src/proxy/openai-image.ts`).
* **Anthropic:** Handled in `src/proxy/anthropic.ts`. Supports Claude models. May use AWS Bedrock (`src/proxy/aws-claude.ts`) or Anthropic's direct API.
* **Google AI / Vertex AI:** Handled in `src/proxy/google-ai.ts` and `src/proxy/gcp.ts`. Supports Gemini models (gemini-flash, gemini-pro, gemini-ultra).
* **Mistral AI:** Handled in `src/proxy/mistral-ai.ts`. Supports Mistral models via their API or potentially AWS (`src/proxy/aws-mistral.ts`).
* **Azure OpenAI:** Handled in `src/proxy/azure.ts`. Provides an alternative endpoint for OpenAI models via Azure.
* **Deepseek:** Handled in `src/proxy/deepseek.ts`.
* **Xai:** Handled in `src/proxy/xai.ts`.
* **AWS (General):** `src/proxy/aws.ts` might contain shared AWS logic (e.g., authentication).
### Middleware (`src/proxy/middleware/`)
* **`gatekeeper.ts`:** Likely handles initial request validation, authentication, and authorization checks before hitting provider logic. Checks origin (`check-origin.ts`), potentially custom tokens (`check-risu-token.ts`).
* **`rate-limit.ts`:** Implements rate limiting logic, potentially per-user or per-key.
* **`queue.ts`:** Manages request queuing, possibly to handle concurrency limits or prioritize requests.
### Adding New Models
1. **Identify the Provider:** Determine if the new model belongs to an existing provider (e.g., a new OpenAI model) or a new one.
2. **Update Provider Logic (if existing):**
* Modify the relevant provider file (e.g., `src/proxy/openai.ts`).
* Update model lists or logic that selects/validates models.
* Adjust any request/response transformations if the new model has a different API schema.
* Update model information in shared files like `src/shared/models.ts` if necessary.
3. **Update Routing (if necessary):** Modify `src/proxy/routes.ts` if the new model requires a different endpoint or routing logic.
4. **Configuration:** Add any new API keys or configuration parameters to `.env.example` and `src/config.ts`.
5. **Testing:** Add unit or integration tests for the new model.
### Adding New APIs/Providers
1. **Create Provider Module:** Create a new file in `src/proxy/` (e.g., `src/proxy/new-provider.ts`).
2. **Implement Handler:**
* Write the core logic to handle requests for this provider. This typically involves:
* Receiving the standardized request from the router.
* Transforming the request into the format expected by the new provider's API.
* Authenticating with the new provider's API (fetching keys from config).
* Making the API call (consider using a robust HTTP client like `axios` or `node-fetch`).
* Handling streaming responses if applicable (using helpers from `src/shared/streaming.ts`).
* Transforming the provider's response back into a standardized format.
* Handling errors gracefully.
3. **Add Routing:**
* Import the new handler in `src/proxy/routes.ts`.
* Add new routes or modify existing routing logic to direct requests to the new handler based on model name, path, or other criteria.
* Apply necessary middleware (gatekeeper, rate limiter, queue).
4. **Create Key Management:**
* Create a new directory in `src/shared/key-management/` for the provider.
* Implement provider-specific key management (key checkers, token counters).
5. **Configuration:**
* Add configuration variables (API keys, base URLs) to `.env.example` and `src/config.ts`.
* Update `src/config.ts` to load and validate the new variables.
6. **Model Information:** Add details about the new provider and its models to `src/shared/models.ts` or similar shared locations.
7. **Tokenization (if applicable):** If token counting is needed, add or update tokenization logic in `src/shared/tokenization/`.
8. **Testing:** Implement thorough tests for the new provider integration.
9. **Documentation:** Update this guide and any other relevant documentation.
## Model Management
### Model Family Definitions
* **Model Family Definitions:** The project uses a family-based approach to group similar models together. These are defined in `src/shared/models.ts`.
* Each model is part of a model family (e.g., "gpt4", "claude", "gemini-pro") which helps with routing, key management, and feature support.
* The `MODEL_FAMILIES` array contains all supported model families, and the `MODEL_FAMILY_SERVICE` mapping connects each family to its provider service.
### Adding OpenAI Models
When adding new OpenAI models to the codebase, there are several files that must be updated:
1. **Update Model Types (`src/shared/models.ts`):**
- Add the new model to the `OpenAIModelFamily` type
- Add the model to the `MODEL_FAMILIES` array
- Add the Azure variants for the model if applicable
- Add the model to `MODEL_FAMILY_SERVICE` mapping
- Update `OPENAI_MODEL_FAMILY_MAP` with regex patterns to match the model names
2. **Update Context Size Limits (`src/proxy/middleware/request/preprocessors/validate-context-size.ts`):**
- Add regex matching for the new model
- Set the appropriate context token limit for the model
3. **Update Token Cost Tracking (`src/shared/stats.ts`):**
- Add pricing information for the new model in the `getTokenCostUsd` function
- Include both input and output prices in the comments for clarity
4. **Update Feature Support Checks (`src/proxy/openai.ts`):**
- If the model supports special features like the reasoning API parameter (`isO1Model` function), update the appropriate function
- For model feature detection, prefer using regex patterns over explicit lists when possible, as this handles date-stamped versions better
5. **Update Display Names (`src/info-page.ts`):**
- Add friendly display names for the new models in the `MODEL_FAMILY_FRIENDLY_NAME` object
6. **Update Key Management Provider Files:**
- For OpenAI keys in `src/shared/key-management/openai/provider.ts`, add token counters for the new models
- For Azure OpenAI keys in `src/shared/key-management/azure/provider.ts`, add token counters for the Azure versions
### Model Patterns and Versioning
The codebase handles several patterns for model naming and versioning:
1. **Date-stamped Models:** Many models include date stamps (e.g., `gpt-4-0125-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$`.
2. **O-Series Models:** OpenAI's o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini) follow a different naming convention. The codebase handles these with dedicated model families and regex patterns.
3. **Preview/Non-Preview Variants:** Some models have preview variants (e.g., `gpt-4.5-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$`.
When adding new models, try to follow the existing patterns for consistency.
### Response Format Handling
The codebase includes special handling for different API response formats:
1. **Chat vs. Text Completions:** There's transformation logic in `openai.ts` to convert between chat completions and text completions formats (`transformTurboInstructResponse`).
2. **Newer API Formats:** For newer APIs like the Responses API, there's transformation logic (`transformResponsesApiResponse`) to convert responses to a format compatible with existing clients.
When adding support for new models or APIs, consider whether transformation is needed to maintain compatibility with existing clients.
## Key Management
### Key Pool System
The project uses a sophisticated key pool system (`src/shared/key-management/key-pool.ts`) to manage API keys for different providers. Key features include:
* **Key Selection:** The system selects the appropriate key based on model family, region preferences, and other criteria.
* **Rotation:** Keys are rotated to distribute usage and avoid hitting rate limits.
* **Health Checks:** Keys are checked periodically to ensure they're still valid and within rate limits.
### Provider-Specific Key Management
Each provider has its own key management module in `src/shared/key-management/`:
* **Key Checkers:** Each provider implements key checkers to validate keys and check their status.
* **Token Counters:** Providers implement token counting logic specific to their pricing model.
* **Models Support:** Keys are associated with specific model families they support.
When adding a new model or provider, you'll need to update or create the appropriate key management files.
### Key Rotation and Health Checks
The key pool system includes logic for:
* **Rotation Strategy:** Keys are selected based on a prioritization strategy (`prioritize-keys.ts`).
* **Disabling Unhealthy Keys:** Keys that fail health checks are temporarily disabled.
* **Rate Limit Awareness:** The system tracks usage to avoid hitting provider rate limits.
## Data Management
### Database (`src/shared/database/`)
* Likely uses Prisma or a similar ORM.
* Defines database schemas (e.g., for users, API keys, usage logs).
* Provides functions for interacting with the database.
* Configuration is managed in `src/config.ts`.
### File Storage (`src/shared/file-storage/`)
* May be used for storing logs, cached data, or user-uploaded files.
* Could integrate with local storage or cloud providers (e.g., S3, GCS).
## Authentication & Authorization
* **User Auth:** Handled in `src/user/` potentially using sessions (`src/shared/with-session.ts`) or JWTs.
* **Proxy Auth:** The `gatekeeper.ts` middleware likely verifies incoming requests to the proxy endpoints. This could involve checking:
* Custom API keys stored in the database (`src/shared/database/`).
* Specific tokens (`check-risu-token.ts`).
* HMAC signatures (`src/shared/hmac-signing.ts`).
* Origin checks (`check-origin.ts`).
* **Downstream Auth:** Each provider module (`src/proxy/*.ts`) handles authentication with the actual AI service API using keys from the configuration.
## Logging & Monitoring
* **Logging:** Configured in `src/logger.ts`, likely using a library like `pino` or `winston`. Logs requests, errors, and important events.
* **Prompt Logging:** Specific logic for logging prompts and responses might exist in `src/shared/prompt-logging/`.
* **Stats/Monitoring:** `src/shared/stats.ts` might handle collecting and exposing application metrics.
## Deployment
* **Docker:** The project likely includes Docker configuration for containerized deployment.
* **Render:** The `render.yaml` file suggests the project is or can be deployed on Render.
* **Environment Variables:** The `.env.example` file provides a template for required environment variables in production.
## Contributing
When contributing to this project:
1. **Follow Coding Standards:** Use the established patterns and standards in the codebase. The `.prettierrc` file defines code formatting rules.
2. **Update Documentation:** Keep this guide updated when adding new components or changing existing ones.
3. **Add Tests:** Ensure your changes are tested appropriately.
4. **Update Configuration:** If your changes require new environment variables, update `.env.example`.
*This guide provides a high-level overview. For detailed information, refer to the specific source code files.*
+62 -32
View File
@@ -1,42 +1,72 @@
# OAI Reverse Proxy
Reverse proxy server for the OpenAI and Anthropic APIs. Forwards text generation requests while rejecting administrative/billing requests. Includes optional rate limiting and prompt filtering to prevent abuse.
# OAI Reverse Proxy - just a shitty fork
Reverse proxy server for various LLM APIs.
### Table of Contents
- [What is this?](#what-is-this)
- [Why?](#why)
- [Usage Instructions](#setup-instructions)
- [Deploy to Huggingface (Recommended)](#deploy-to-huggingface-recommended)
- [Deploy to Repl.it (WIP)](#deploy-to-replit-wip)
- [Local Development](#local-development)
<!-- TOC -->
* [OAI Reverse Proxy](#oai-reverse-proxy)
* [Table of Contents](#table-of-contents)
* [What is this?](#what-is-this)
* [Features](#features)
* [Usage Instructions](#usage-instructions)
* [Personal Use (single-user)](#personal-use-single-user)
* [Updating](#updating)
* [Local Development](#local-development)
* [Self-hosting](#self-hosting)
* [Building](#building)
* [Forking](#forking)
<!-- TOC -->
## What is this?
If you would like to provide a friend access to an API via keys you own, you can use this to keep your keys safe while still allowing them to generate text with the API. You can also use this if you'd like to build a client-side application which uses the OpenAI or Anthropic APIs, but don't want to build your own backend. You should never embed your real API keys in a client-side application. Instead, you can have your frontend connect to this reverse proxy and forward requests to the downstream service.
This project allows you to run a reverse proxy server for various LLM APIs.
This keeps your keys safe and allows you to use the rate limiting and prompt filtering features of the proxy to prevent abuse.
## Why?
OpenAI keys have full account permissions. They can revoke themselves, generate new keys, modify spend quotas, etc. **You absolutely should not share them, post them publicly, nor embed them in client-side applications as they can be easily stolen.**
This proxy only forwards text generation requests to the downstream service and rejects requests which would otherwise modify your account.
---
## Features
- [x] Support for multiple APIs
- [x] [OpenAI](https://openai.com/)
- [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/) (Claude4 is fucked, dont care)
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
- [x] Multiple API keys with rotation and rate limit handling
- [x] Basic user management
- [x] Simple role-based permissions
- [x] Per-model token quotas
- [x] Temporary user accounts
- [x] Event audit logging
- [x] Optional full logging of prompts and completions
- [x] Abuse detection and prevention
- [x] IP address and user token model invocation rate limits
- [x] IP blacklists
- [x] Proof-of-work challenge for access by anonymous users
## Usage Instructions
If you'd like to run your own instance of this proxy, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like.
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
### Deploy to Huggingface (Recommended)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
### Deploy to Render
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
## Local Development
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
1. Clone the repo
2. Install dependencies with `npm install`
### Personal Use (single-user)
If you just want to run the proxy server to use yourself without hosting it for others:
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
2. Clone this repository
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
4. Start the server in development mode with `npm run start:dev`.
4. Install dependencies with `npm install`
5. Run `npm run build`
6. Run `npm start`
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
#### Updating
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
#### Local Development
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
### Self-hosting
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
## Building
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
## Forking
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
+21
View File
@@ -0,0 +1,21 @@
stages:
- build
build_image:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- |
if [ "$CI_COMMIT_REF_NAME" = "main" ]; then
TAG="latest"
else
TAG=$CI_COMMIT_REF_NAME
fi
- echo "Building image with tag $TAG"
- BASE64_AUTH=$(echo -n "$DOCKER_HUB_USERNAME:$DOCKER_HUB_ACCESS_TOKEN" | base64)
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"auth\":\"$BASE64_AUTH\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/docker/ci/Dockerfile --destination docker.io/khanonci/oai-reverse-proxy:$TAG --build-arg CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME --build-arg CI_COMMIT_SHA=$CI_COMMIT_SHA --build-arg CI_PROJECT_PATH=$CI_PROJECT_PATH
only:
- main
+22
View File
@@ -0,0 +1,22 @@
FROM node:18-bullseye-slim
WORKDIR /app
COPY . .
RUN npm ci
RUN npm run build
RUN npm prune --production
EXPOSE 7860
ENV PORT=7860
ENV NODE_ENV=production
ARG CI_COMMIT_REF_NAME
ARG CI_COMMIT_SHA
ARG CI_PROJECT_PATH
ENV GITGUD_BRANCH=$CI_COMMIT_REF_NAME
ENV GITGUD_COMMIT=$CI_COMMIT_SHA
ENV GITGUD_PROJECT=$CI_PROJECT_PATH
CMD [ "npm", "start" ]
+17
View File
@@ -0,0 +1,17 @@
# Before running this, create a .env and greeting.md file.
# Refer to .env.example for the required environment variables.
# User-generated content is stored in the data directory.
# When self-hosting, it's recommended to run this behind a reverse proxy like
# nginx or Caddy to handle SSL/TLS and rate limiting. Refer to
# docs/self-hosting.md for more information and an example nginx config.
version: '3.8'
services:
oai-reverse-proxy:
image: khanonci/oai-reverse-proxy:latest
ports:
- "127.0.0.1:7860:7860"
env_file:
- ./.env
volumes:
- ./greeting.md:/app/greeting.md
- ./data:/app/data
+1 -2
View File
@@ -17,9 +17,8 @@ ARG GREETING_URL
RUN if [ -n "$GREETING_URL" ]; then \
curl -sL "$GREETING_URL" > greeting.md; \
fi
COPY package*.json greeting.md* ./
RUN npm install
COPY . .
RUN npm install
RUN npm run build
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
EXPOSE 10000
+1 -1
View File
@@ -45,7 +45,7 @@ You can also request Claude Instant, but support for this isn't fully implemente
### Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `anthropic.claude-v1` (~18k context, claude 1.3)
- `anthropic.claude-v1` (~18k context, claude 1.3 -- EOL 2024-02-28)
- `anthropic.claude-v2` (~100k context, claude 2.0)
- `anthropic.claude-v2:1` (~200k context, claude 2.1)
- **Claude Instant**
+2
View File
@@ -1,5 +1,7 @@
# Deploy to Huggingface Space
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend.
### 1. Get an API key
+5
View File
@@ -1,4 +1,7 @@
# Deploy to Render.com
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
### 1. Create account
@@ -28,6 +31,8 @@ The service will be created according to the instructions in the `render.yaml` f
- For example, `OPENAI_KEY=sk-abc123`.
- Click **Save Changes**.
**IMPORTANT:** Set `TRUSTED_PROXIES=3`, otherwise users' IP addresses will not be recorded correctly (the server will see the IP address of Render's load balancer instead of the user's real IP address).
The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page.
If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet.
+35
View File
@@ -0,0 +1,35 @@
# Configuring the proxy for Vertex AI (GCP)
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Setup Vertex AI](#setup-vertex-ai)
- [Supported model IDs](#supported-model-ids)
## Setting keys
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
For example:
```
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
```
## Setup Vertex AI
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
6. The required credential is in the JSON file you just downloaded.
## Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `claude-3-haiku@20240307`
- `claude-3-sonnet@20240229`
- `claude-3-opus@20240229`
- `claude-3-5-sonnet@20240620`
+135
View File
@@ -0,0 +1,135 @@
# Proof-of-work Verification
You can require users to complete a proof-of-work before they can access the
proxy. This can increase the cost of denial of service attacks and slow down
automated abuse.
When configured, users access the challenge UI and request a token. The server
sends a challenge to the client, which asks the user's browser to find a
solution to the challenge that meets a certain constraint (the difficulty
level). Once the user has found a solution, they can submit it to the server
and get a user token valid for a period you specify.
The proof-of-work challenge uses the argon2id hash function.
## Configuration
To enable proof-of-work verification, set the following environment variables:
```
GATEKEEPER=user_token
CAPTCHA_MODE=proof_of_work
# Validity of the token in hours
POW_TOKEN_HOURS=24
# Max number of IPs that can use a user_token issued via proof-of-work
POW_TOKEN_MAX_IPS=2
# The difficulty level of the proof-of-work challenge. You can use one of the
# predefined levels specified below, or you can specify a custom number of
# expected hash iterations.
POW_DIFFICULTY_LEVEL=low
# The time limit for solving the challenge, in minutes
POW_CHALLENGE_TIMEOUT=30
```
## Difficulty Levels
The difficulty level controls how long, on average, it will take for a user to
solve the proof-of-work challenge. Due to randomness, the actual time can very
significantly; lucky users may solve the challenge in a fraction of the average
time, while unlucky users may take much longer.
The difficulty level doesn't affect the speed of the hash function itself, only
the number of hashes that will need to be computed. Therefore, the time required
to complete the challenge scales linearly with the difficulty level's iteration
count.
You can adjust the difficulty level while the proxy is running from the admin
interface.
Be aware that there is a time limit for solving the challenge, by default set to
30 minutes. Above 'high' difficulty, you will probably need to increase the time
limit or it will be very hard for users with slow devices to find a solution
within the time limit.
### Low
- Average of 200 iterations required
- Default setting.
### Medium
- Average of 900 iterations required
### High
- Average of 1900 iterations required
### Extreme
- Average of 4000 iterations required
- Not recommended unless you are expecting very high levels of abuse
- May require increasing `POW_CHALLENGE_TIMEOUT`
### Custom
Setting `POW_DIFFICULTY_LEVEL` to an integer will use that number of iterations
as the difficulty level.
## Other challenge settings
- `POW_CHALLENGE_TIMEOUT`: The time limit for solving the challenge, in minutes.
Default is 30.
- `POW_TOKEN_HOURS`: The period of time for which a user token issued via proof-
of-work can be used. Default is 24 hours. Starts when the challenge is solved.
- `POW_TOKEN_MAX_IPS`: The maximum number of unique IPs that can use a single
user token issued via proof-of-work. Default is 2.
- `POW_TOKEN_PURGE_HOURS`: The period of time after which an expired user token
issued via proof-of-work will be removed from the database. Until it is
purged, users can refresh expired tokens by completing a half-difficulty
challenge. Default is 48 hours.
- `POW_MAX_TOKENS_PER_IP`: The maximum number of active user tokens that can
be associated with a single IP address. After this limit is reached, the
oldest token will be forcibly expired when a new token is issued. Set to 0
to disable this feature. Default is 0.
## Custom argon2id parameters
You can set custom argon2id parameters for the proof-of-work challenge.
Generally, you should not need to change these unless you have a specific
reason to do so.
The listed values are the defaults.
```
ARGON2_TIME_COST=8
ARGON2_MEMORY_KB=65536
ARGON2_PARALLELISM=1
ARGON2_HASH_LENGTH=32
```
Increasing parallelism will not do much except increase memory consumption for
both the client and server, because browser proof-of-work implementations are
single-threaded. It's better to increase the time cost if you want to increase
the difficulty.
Increasing memory too much may cause memory exhaustion on some mobile devices,
particularly on iOS due to the way Safari handles WebAssembly memory allocation.
## Tested hash rates
These were measured with the default argon2id parameters listed above. These
tests were not at all scientific so take them with a grain of salt.
Safari does not like large WASM memory usage, so concurrency is limited to 4 to
avoid overallocating memory on mobile WebKit browsers. Thermal throttling can
also significantly reduce hash rates on mobile devices.
- Intel Core i9-13900K (Chrome): 33-35 H/s
- Intel Core i9-13900K (Firefox): 29-32 H/s
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
- This is a 2019 phone almost matching an iPhone five years newer because of
bad Safari performance.
+150
View File
@@ -0,0 +1,150 @@
# Quick self-hosting guide
Temporary guide for self-hosting. This will be improved in the future to provide more robust instructions and options. Provided commands are for Ubuntu.
This uses prebuilt Docker images for convenience. If you want to make adjustments to the code you can instead clone the repo and follow the Local Development guide in the [README](../README.md).
## Table of Contents
- [Requirements](#requirements)
- [Running the application](#running-the-application)
- [Setting up a reverse proxy](#setting-up-a-reverse-proxy)
- [trycloudflare](#trycloudflare)
- [nginx](#nginx)
- [Example basic nginx configuration (no SSL)](#example-basic-nginx-configuration-no-ssl)
- [Example with Cloudflare SSL](#example-with-cloudflare-ssl)
- [Updating/Restarting the application](#updatingrestarting-the-application)
## Requirements
- Docker
- Docker Compose
- A VPS with at least 512MB of RAM (1GB recommended)
- A domain name
If you don't have a VPS and domain name you can use TryCloudflare to set up a temporary URL that you can share with others. See [trycloudflare](#trycloudflare) for more information.
## Running the application
- Install Docker and Docker Compose
- Create a new directory for the application
- This will contain your .env file, greeting file, and any user-generated files
- Execute the following commands:
- ```
touch .env
touch greeting.md
echo "OPENAI_KEY=your-openai-key" >> .env
curl https://gitgud.io/khanon/oai-reverse-proxy/-/raw/main/docker/docker-compose-selfhost.yml -o docker-compose.yml
```
- You can set further environment variables and keys in the `.env` file. See [.env.example](../.env.example) for a list of available options.
- You can set a custom greeting in `greeting.md`. This will be displayed on the homepage.
- Run `docker compose up -d`
You can check logs with `docker compose logs -n 100 -f`.
The provided docker-compose file listens on port 7860 but binds to localhost only. You should use a reverse proxy to expose the application to the internet as described in the next section.
## Setting up a reverse proxy
Rather than exposing the application directly to the internet, it is recommended to set up a reverse proxy. This will allow you to use HTTPS and add additional security measures.
### trycloudflare
This will give you a temporary (72 hours) URL that you can use to let others connect to your instance securely, without having to set up a reverse proxy. If you are running the server on your home network, this is probably the best option.
- Install `cloudflared` following the instructions at [try.cloudflare.com](https://try.cloudflare.com/).
- Run `cloudflared tunnel --url http://localhost:7860`
- You will be given a temporary URL that you can share with others.
If you have a VPS, you should use a proper reverse proxy like nginx instead for a more permanent solution which will allow you to use your own domain name, handle SSL, and add additional security/anti-abuse measures.
### nginx
First, install nginx.
- `sudo apt update && sudo apt install nginx`
#### Example basic nginx configuration (no SSL)
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:7860;
}
}
```
- Replace `example.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
- `sudo nginx -t`
- This will check the configuration file for errors.
- `sudo systemctl restart nginx`
- This will restart nginx and apply the new configuration.
#### Example with Cloudflare SSL
This allows you to use a self-signed certificate on the server, and have Cloudflare handle client SSL. You need to have a Cloudflare account and have your domain set up with Cloudflare already, pointing to your server's IP address.
- Set Cloudflare to use Full SSL mode. Since we are using a self-signed certificate, don't use Full (strict) mode.
- Create a self-signed certificate:
- `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/ssl/private/nginx-selfsigned.key -out /etc/ssl/certs/nginx-selfsigned.crt`
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 443 ssl;
server_name yourdomain.com www.yourdomain.com;
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
# Only allow inbound traffic from Cloudflare
allow 173.245.48.0/20;
allow 103.21.244.0/22;
allow 103.22.200.0/22;
allow 103.31.4.0/22;
allow 141.101.64.0/18;
allow 108.162.192.0/18;
allow 190.93.240.0/20;
allow 188.114.96.0/20;
allow 197.234.240.0/22;
allow 198.41.128.0/17;
allow 162.158.0.0/15;
allow 104.16.0.0/13;
allow 104.24.0.0/14;
allow 172.64.0.0/13;
allow 131.0.72.0/22;
deny all;
location / {
proxy_pass http://localhost:7860;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
}
```
- Replace `yourdomain.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
## Updating/Restarting the application
After making an .env change, you need to restart the application for it to take effect.
- `docker compose down`
- `docker compose up -d`
To update the application to the latest version:
- `docker compose pull`
- `docker compose down`
- `docker compose up -d`
- `docker image prune -f`
+22
View File
@@ -12,6 +12,8 @@ Several of these features require you to set secrets in your environment. If usi
- [Memory](#memory)
- [Firebase Realtime Database](#firebase-realtime-database)
- [Firebase setup instructions](#firebase-setup-instructions)
- [SQLite Database](#sqlite-database)
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
## No user management (`GATEKEEPER=none`)
@@ -61,3 +63,23 @@ To use Firebase Realtime Database to persist user data, set the following enviro
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
### SQLite Database
To use a local SQLite database file to persist user data, set the following environment variables:
- `GATEKEEPER_STORE`: Set this to `sqlite`.
- `SQLITE_USER_STORE_PATH` (Optional): Specifies the path to the SQLite database file.
- If not set, it defaults to `data/user-store.sqlite` within the project directory.
- Ensure that the directory where the SQLite file will be created (e.g., the `data/` directory) is writable by the application process.
Using SQLite provides a simple way to persist user data locally without relying on external services. User data will be saved to the specified file and will be available across server restarts.
## Whitelisting admin IP addresses
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
You can provide a comma-separated list containing individual IPv4 or IPv6 addresses, or CIDR ranges.
To whitelist an entire IP range, use CIDR notation. For example, `192.168.0.1/24` would whitelist all addresses from `192.168.0.0` to `192.168.0.255`.
To disable the whitelist, set `ADMIN_WHITELIST=0.0.0.0/0,::0`, which will allow access from any IPv4 or IPv6 address. This is the default behavior.
+2734 -1238
View File
File diff suppressed because it is too large Load Diff
+36 -20
View File
@@ -4,10 +4,12 @@
"description": "Reverse proxy for the OpenAI API",
"scripts": {
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"database:migrate": "ts-node scripts/migrate.ts",
"postinstall": "patch-package",
"prepare": "husky install",
"start": "node build/server.js",
"start": "node --trace-deprecation --trace-warnings build/server.js",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
"start:replit": "tsc && node build/server.js",
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
"start:watch": "nodemon --require source-map-support/register build/server.js",
"type-check": "tsc --noEmit"
},
@@ -18,40 +20,53 @@
"license": "MIT",
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"@aws-crypto/sha256-js": "^5.1.0",
"@smithy/protocol-http": "^3.0.6",
"@smithy/signature-v4": "^2.0.10",
"@smithy/types": "^2.3.4",
"axios": "^1.3.5",
"@aws-crypto/sha256-js": "^5.2.0",
"@huggingface/jinja": "^0.3.0",
"@node-rs/argon2": "^1.8.3",
"@smithy/eventstream-codec": "^2.1.3",
"@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.7.4",
"better-sqlite3": "^10.0.0",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1",
"cors": "^2.8.5",
"csrf-csrf": "^2.3.0",
"dotenv": "^16.0.3",
"ejs": "^3.1.9",
"express": "^4.18.2",
"dotenv": "^16.3.1",
"ejs": "^3.1.10",
"express": "^4.19.3",
"express-session": "^1.17.3",
"firebase-admin": "^11.10.1",
"firebase-admin": "^12.5.0",
"glob": "^10.3.12",
"googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1",
"lifion-aws-event-stream": "^1.0.7",
"http-proxy": "1.18.1",
"http-proxy-middleware": "^3.0.2",
"ipaddr.js": "^2.1.0",
"memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1",
"patch-package": "^8.0.0",
"pino": "^8.11.0",
"pino-http": "^8.3.3",
"sanitize-html": "^2.11.0",
"sharp": "^0.32.6",
"proxy-agent": "^6.4.0",
"sanitize-html": "^2.13.0",
"sharp": "^0.34.2",
"showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
"tiktoken": "^1.0.10",
"tinyws": "^0.1.0",
"uuid": "^9.0.0",
"zlib": "^1.0.5",
"zod": "^3.22.3",
"zod-error": "^1.5.0"
},
"devDependencies": {
"@smithy/types": "^3.3.0",
"@types/better-sqlite3": "^7.6.10",
"@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13",
"@types/express": "^4.17.17",
@@ -63,18 +78,19 @@
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1",
"concurrently": "^8.0.1",
"esbuild": "^0.17.16",
"esbuild": "^0.25.5",
"esbuild-register": "^3.4.2",
"husky": "^8.0.3",
"nodemon": "^3.0.1",
"pino-pretty": "^10.2.3",
"prettier": "^3.0.3",
"source-map-support": "^0.5.21",
"prettier-plugin-ejs": "^1.0.3",
"ts-node": "^10.9.1",
"typescript": "^5.1.3"
"typescript": "^5.4.2"
},
"overrides": {
"google-gax": "^3.6.1",
"postcss": "^8.4.31"
"node-fetch@2.x": {
"whatwg-url": "14.x"
}
}
}
+23
View File
@@ -0,0 +1,23 @@
# Patches
Contains monkey patches for certain packages, applied using `patch-package`.
## `http-proxy+1.18.1.patch`
Modifies the `http-proxy` package to work around an incompatibility with
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
when `socks-proxy-agent` is used instead of a generic http.Agent.
Modification involves adjusting the `buffer` property on ProxyServer's `options`
object to be a function that returns a stream instead of a stream itself. This
allows us to give it a function which produces a new Readable from the already-
parsed request body.
With the old implementation we would need to create an entirely new ProxyServer
instance for each request, which is not ideal under heavy load.
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
### See also
https://github.com/chimurai/http-proxy-middleware/issues/40
https://github.com/chimurai/http-proxy-middleware/issues/299
https://github.com/http-party/node-http-proxy/pull/1027
+13
View File
@@ -0,0 +1,13 @@
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
index 7ae7355..c825c27 100644
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
@@ -167,7 +167,7 @@ module.exports = {
}
}
- (options.buffer || req).pipe(proxyReq);
+ (options.buffer(req) || req).pipe(proxyReq);
proxyReq.on('response', function(proxyRes) {
if(server) { server.emit('proxyRes', proxyRes, req, res); }
+349
View File
@@ -0,0 +1,349 @@
/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
/* Document
========================================================================== */
/**
* 1. Correct the line height in all browsers.
* 2. Prevent adjustments of font size after orientation changes in iOS.
*/
html {
line-height: 1.15; /* 1 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/* Sections
========================================================================== */
/**
* Remove the margin in all browsers.
*/
body {
margin: 0;
}
/**
* Render the `main` element consistently in IE.
*/
main {
display: block;
}
/**
* Correct the font size and margin on `h1` elements within `section` and
* `article` contexts in Chrome, Firefox, and Safari.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/* Grouping content
========================================================================== */
/**
* 1. Add the correct box sizing in Firefox.
* 2. Show the overflow in Edge and IE.
*/
hr {
box-sizing: content-box; /* 1 */
height: 0; /* 1 */
overflow: visible; /* 2 */
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
pre {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/* Text-level semantics
========================================================================== */
/**
* Remove the gray background on active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* 1. Remove the bottom border in Chrome 57-
* 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
*/
abbr[title] {
border-bottom: none; /* 1 */
text-decoration: underline; /* 2 */
text-decoration: underline dotted; /* 2 */
}
/**
* Add the correct font weight in Chrome, Edge, and Safari.
*/
b,
strong {
font-weight: bolder;
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
code,
kbd,
samp {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/**
* Add the correct font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` elements from affecting the line height in
* all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sub {
bottom: -0.25em;
}
sup {
top: -0.5em;
}
/* Embedded content
========================================================================== */
/**
* Remove the border on images inside links in IE 10.
*/
img {
border-style: none;
}
/* Forms
========================================================================== */
/**
* 1. Change the font styles in all browsers.
* 2. Remove the margin in Firefox and Safari.
*/
button,
input,
optgroup,
select,
textarea {
font-family: inherit; /* 1 */
font-size: 100%; /* 1 */
line-height: 1.15; /* 1 */
margin: 0; /* 2 */
}
/**
* Show the overflow in IE.
* 1. Show the overflow in Edge.
*/
button,
input { /* 1 */
overflow: visible;
}
/**
* Remove the inheritance of text transform in Edge, Firefox, and IE.
* 1. Remove the inheritance of text transform in Firefox.
*/
button,
select { /* 1 */
text-transform: none;
}
/**
* Correct the inability to style clickable types in iOS and Safari.
*/
button,
[type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button;
}
/**
* Remove the inner border and padding in Firefox.
*/
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
border-style: none;
padding: 0;
}
/**
* Restore the focus styles unset by the previous rule.
*/
button:-moz-focusring,
[type="button"]:-moz-focusring,
[type="reset"]:-moz-focusring,
[type="submit"]:-moz-focusring {
outline: 1px dotted ButtonText;
}
/**
* Correct the padding in Firefox.
*/
fieldset {
padding: 0.35em 0.75em 0.625em;
}
/**
* 1. Correct the text wrapping in Edge and IE.
* 2. Correct the color inheritance from `fieldset` elements in IE.
* 3. Remove the padding so developers are not caught out when they zero out
* `fieldset` elements in all browsers.
*/
legend {
box-sizing: border-box; /* 1 */
color: inherit; /* 2 */
display: table; /* 1 */
max-width: 100%; /* 1 */
padding: 0; /* 3 */
white-space: normal; /* 1 */
}
/**
* Add the correct vertical alignment in Chrome, Firefox, and Opera.
*/
progress {
vertical-align: baseline;
}
/**
* Remove the default vertical scrollbar in IE 10+.
*/
textarea {
overflow: auto;
}
/**
* 1. Add the correct box sizing in IE 10.
* 2. Remove the padding in IE 10.
*/
[type="checkbox"],
[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Correct the cursor style of increment and decrement buttons in Chrome.
*/
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Correct the odd appearance in Chrome and Safari.
* 2. Correct the outline style in Safari.
*/
[type="search"] {
-webkit-appearance: textfield; /* 1 */
outline-offset: -2px; /* 2 */
}
/**
* Remove the inner padding in Chrome and Safari on macOS.
*/
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* 1. Correct the inability to style clickable types in iOS and Safari.
* 2. Change font properties to `inherit` in Safari.
*/
::-webkit-file-upload-button {
-webkit-appearance: button; /* 1 */
font: inherit; /* 2 */
}
/* Interactive
========================================================================== */
/*
* Add the correct display in Edge, IE 10+, and Firefox.
*/
details {
display: block;
}
/*
* Add the correct display in all browsers.
*/
summary {
display: list-item;
}
/* Misc
========================================================================== */
/**
* Add the correct display in IE 10+.
*/
template {
display: none;
}
/**
* Add the correct display in IE 10.
*/
[hidden] {
display: none;
}
+231
View File
@@ -0,0 +1,231 @@
/* modified https://github.com/oxalorg/sakura */
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #c9c9c9;
background-color: #222222;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0px;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: #ffffff;
}
a {
text-decoration: none;
color: #ffffff;
}
a:visited {
color: #e6e6e6;
}
a:hover {
color: #c9c9c9;
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0px;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0px;
margin-right: 0px;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid #ffffff;
margin-bottom: 2.5rem;
background-color: #4a4a4a;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0px;
margin-bottom: 2.5rem;
}
pre {
background-color: #4a4a4a;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0px;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #4a4a4a;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #4a4a4a;
}
input,
textarea {
border: 1px solid #c9c9c9;
}
input:focus,
textarea:focus {
border: 1px solid #ffffff;
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: #ffffff;
color: #222222;
border-radius: 1px;
border: 1px solid #ffffff;
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: #c9c9c9;
color: #222222;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #c9c9c9;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #4a4a4a;
border: 1px solid #4a4a4a;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid #ffffff;
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted #ffffff;
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
+237
View File
@@ -0,0 +1,237 @@
/* modified https://github.com/oxalorg/sakura */
:root {
--accent-color: #4a4a4a;
--accent-color-hover: #5a5a5a;
--link-color: #58739c;
--link-visted-color: #6f5e6f;
}
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #4a4a4a;
background-color: #f9f9f9;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: var(--accent-color);
}
a {
text-decoration: none;
color: var(--link-color);
}
a:visited {
color: var(--link-visted-color);
}
a:hover {
color: var(--accent-color-hover);
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0;
margin-right: 0;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid var(--accent-color);
margin-bottom: 2.5rem;
background-color: #f1f1f1;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0;
margin-bottom: 2.5rem;
}
pre {
background-color: #f1f1f1;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #f1f1f1;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #f1f1f1;
}
input,
textarea {
border: 1px solid #4a4a4a;
}
input:focus,
textarea:focus {
border: 1px solid var(--accent-color);
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: var(--accent-color);
color: #f9f9f9;
border-radius: 2px;
border: 1px solid var(--accent-color);
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: var(--accent-color-hover);
color: #f9f9f9;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #4a4a4a;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #f1f1f1;
border: 1px solid #f1f1f1;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid var(--accent-color);
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted var(--accent-color);
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
+120
View File
@@ -0,0 +1,120 @@
importScripts(
"https://cdn.jsdelivr.net/npm/hash-wasm@4.11.0/dist/argon2.umd.min.js"
);
let active = false;
let nonce = 0;
let signature = "";
let lastNotify = 0;
let hashesSinceLastNotify = 0;
let params = {
salt: null,
hashLength: 0,
iterations: 0,
memorySize: 0,
parallelism: 0,
targetValue: BigInt(0),
safariFix: false,
};
self.onmessage = async (event) => {
const { data } = event;
switch (data.type) {
case "stop":
active = false;
self.postMessage({ type: "paused", hashes: hashesSinceLastNotify });
return;
case "start":
active = true;
signature = data.signature;
nonce = data.nonce;
const c = data.challenge;
const salt = new Uint8Array(c.s.length / 2);
for (let i = 0; i < c.s.length; i += 2) {
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
}
params = {
salt: salt,
hashLength: c.hl,
iterations: c.t,
memorySize: c.m,
parallelism: c.p,
targetValue: BigInt(c.d.slice(0, -1)),
safariFix: data.isMobileWebkit,
};
console.log("Started", params);
self.postMessage({ type: "started" });
setTimeout(solve, 0);
break;
}
};
const doHash = async (password) => {
const { salt, hashLength, iterations, memorySize, parallelism } = params;
return await self.hashwasm.argon2id({
password,
salt,
hashLength,
iterations,
memorySize,
parallelism,
});
};
const checkHash = (hash) => {
const { targetValue } = params;
const hashValue = BigInt(`0x${hash}`);
return hashValue <= targetValue;
};
const solve = async () => {
if (!active) {
console.log("Stopped solver", nonce);
return;
}
// Safari WASM doesn't like multiple calls in one worker
const batchSize = 1;
const batch = [];
for (let i = 0; i < batchSize; i++) {
batch.push(nonce++);
}
try {
const results = await Promise.all(
batch.map(async (nonce) => {
const hash = await doHash(String(nonce));
return { hash, nonce };
})
);
hashesSinceLastNotify += batchSize;
const solution = results.find(({ hash }) => checkHash(hash));
if (solution) {
console.log("Solution found", solution, params.salt);
self.postMessage({ type: "solved", nonce: solution.nonce });
active = false;
} else {
if (Date.now() - lastNotify >= 500) {
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
lastNotify = Date.now();
hashesSinceLastNotify = 0;
}
setTimeout(solve, 10);
}
} catch (error) {
console.error("Error", error);
const stack = error.stack;
const debug = {
stack,
lastNonce: nonce,
targetValue: params.targetValue,
};
self.postMessage({ type: "error", error: error.message, debug });
active = false;
}
};
+39
View File
@@ -0,0 +1,39 @@
import Database from "better-sqlite3";
import { DATABASE_VERSION, migrateDatabase } from "../src/shared/database";
import { logger } from "../src/logger";
import { config } from "../src/config";
const log = logger.child({ module: "scripts/migrate" });
async function runMigration() {
let targetVersion = Number(process.argv[2]) || undefined;
if (!targetVersion) {
log.info("Enter target version or leave empty to use the latest version.");
process.stdin.resume();
process.stdin.setEncoding("utf8");
const input = await new Promise<string>((resolve) => {
process.stdin.on("data", (text) => {
resolve((String(text) || "").trim());
});
});
process.stdin.pause();
targetVersion = Number(input);
if (!targetVersion) {
targetVersion = DATABASE_VERSION;
}
}
const db = new Database(config.sqliteDataPath, {
verbose: (msg, ...args) => log.debug({ args }, String(msg)),
});
const currentVersion = db.pragma("user_version", { simple: true });
log.info({ currentVersion, targetVersion }, "Running migrations.");
migrateDatabase(targetVersion, db);
}
runMigration().catch((error) => {
log.error(error, "Migration failed.");
process.exit(1);
});
+33
View File
@@ -230,6 +230,39 @@ Content-Type: application/json
]
}
###
# @name Proxy / GCP Claude -- Native Completion
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
+102
View File
@@ -0,0 +1,102 @@
import Database from "better-sqlite3";
import { v4 as uuidv4 } from "uuid";
import { config } from "../src/config";
function generateRandomIP() {
return (
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255)
);
}
function generateRandomDate() {
const end = new Date();
const start = new Date(end);
start.setDate(end.getDate() - 90);
const randomDate = new Date(
start.getTime() + Math.random() * (end.getTime() - start.getTime())
);
return randomDate.toISOString();
}
function generateMockSHA256() {
const characters = 'abcdef0123456789';
let hash = '';
for (let i = 0; i < 64; i++) {
const randomIndex = Math.floor(Math.random() * characters.length);
hash += characters[randomIndex];
}
return hash;
}
function getRandomModelFamily() {
const modelFamilies = [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"aws-claude-opus",
"gcp-claude",
"gcp-claude-opus",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"dall-e",
"azure-dall-e",
];
return modelFamilies[Math.floor(Math.random() * modelFamilies.length)];
}
(async () => {
const db = new Database(config.sqliteDataPath);
const numRows = 100;
const insertStatement = db.prepare(`
INSERT INTO events (type, ip, date, model, family, hashes, userToken, inputTokens, outputTokens)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`);
const users = Array.from({ length: 10 }, () => uuidv4());
function getRandomUser() {
return users[Math.floor(Math.random() * users.length)];
}
const transaction = db.transaction(() => {
for (let i = 0; i < numRows; i++) {
insertStatement.run(
"chat_completion",
generateRandomIP(),
generateRandomDate(),
getRandomModelFamily() + "-" + Math.floor(Math.random() * 100),
getRandomModelFamily(),
Array.from(
{ length: Math.floor(Math.random() * 10) },
generateMockSHA256
).join(","),
getRandomUser(),
Math.floor(Math.random() * 500),
Math.floor(Math.random() * 6000)
);
}
});
transaction();
console.log(`Inserted ${numRows} rows into the events table.`);
db.close();
})();
+118
View File
@@ -0,0 +1,118 @@
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
import axios from "axios";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
// Copied from amazon bedrock docs
// List models
// ListFoundationModels
// Service: Amazon Bedrock
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
// Bedrock User Guide.
// Request Syntax
// GET /foundation-models?
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
// HTTP/1.1
// URI Request Parameters
// The request uses the following URI parameters.
// byCustomizationType (p. 38)
// List by customization type.
// Valid Values: FINE_TUNING
// byInferenceType (p. 38)
// List by inference type.
// Valid Values: ON_DEMAND | PROVISIONED
// byOutputModality (p. 38)
// List by output modality type.
// Valid Values: TEXT | IMAGE | EMBEDDING
// byProvider (p. 38)
// A Bedrock model provider.
// Pattern: ^[a-z0-9-]{1,63}$
// Request Body
// The request does not have a request body
// Run inference on a text model
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
// parameter to accept any content type in the response.
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
// -H accept: */*
// -H content-type: application/json
// Payload
// {"inputText": "Hello world"}
// Example response
// Response for the above request.
// -H content-type: application/json
// Payload
// <the model response>
const AMZ_REGION = "us-east-1";
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
async function listModels() {
const httpRequest = new HttpRequest({
method: "GET",
protocol: "https:",
hostname: AMZ_HOST,
path: "/foundation-models",
headers: { ["Host"]: AMZ_HOST },
});
const signedRequest = await signRequest(httpRequest);
const response = await axios.get(
`https://${signedRequest.hostname}${signedRequest.path}`,
{ headers: signedRequest.headers }
);
console.log(response.data);
}
async function invokeModel() {
const model = "anthropic.claude-v1";
const httpRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: AMZ_HOST,
path: `/model/${model}/invoke`,
headers: {
["Host"]: AMZ_HOST,
["accept"]: "*/*",
["content-type"]: "application/json",
},
body: JSON.stringify({
temperature: 0.5,
prompt: "\n\nHuman:Hello world\n\nAssistant:",
max_tokens_to_sample: 10,
}),
});
console.log("httpRequest", httpRequest);
const signedRequest = await signRequest(httpRequest);
const response = await axios.post(
`https://${signedRequest.hostname}${signedRequest.path}`,
signedRequest.body,
{ headers: signedRequest.headers }
);
console.log(response.status);
console.log(response.headers);
console.log(response.data);
console.log("full url", response.request.res.responseUrl);
}
async function signRequest(request: HttpRequest) {
const signer = new SignatureV4({
sha256: Sha256,
credentials: {
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY,
},
region: AMZ_REGION,
service: "bedrock",
});
return await signer.sign(request, { signingDate: new Date() });
}
// listModels();
// invokeModel();
+53
View File
@@ -0,0 +1,53 @@
const axios = require("axios");
function randomInteger(max) {
return Math.floor(Math.random() * max + 1);
}
async function testQueue() {
const requests = Array(10).fill(undefined).map(async function() {
const maxTokens = randomInteger(2000);
const headers = {
"Authorization": "Bearer test",
"Content-Type": "application/json",
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
};
const payload = {
model: "gpt-4o-mini-2024-07-18",
max_tokens: 20 + maxTokens,
stream: false,
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
temperature: 0,
};
try {
const response = await axios.post(
"http://localhost:7860/proxy/openai/v1/chat/completions",
payload,
{ headers }
);
if (response.status !== 200) {
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
return;
}
const content = response.data.choices[0].message.content;
console.log(
`Request ${maxTokens} `,
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
);
} catch (error) {
const msg = error.response;
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
}
});
await Promise.all(requests);
console.log("All requests finished");
}
testQueue();
+49
View File
@@ -0,0 +1,49 @@
import { Router } from "express";
import { z } from "zod";
import { encodeCursor, decodeCursor } from "../../shared/utils";
import { eventsRepo } from "../../shared/database/repos/event";
const router = Router();
/**
* Returns events for the given user token.
* GET /admin/events/:token
* @query first - The number of events to return.
* @query after - The cursor to start returning events from (exclusive).
*/
router.get("/:token", (req, res) => {
const schema = z.object({
token: z.string(),
first: z.coerce.number().int().positive().max(200).default(25),
after: z
.string()
.optional()
.transform((v) => {
try {
return decodeCursor(v);
} catch {
return null;
}
})
.nullable(),
sort: z.string().optional(),
});
const args = schema.safeParse({ ...req.params, ...req.query });
if (!args.success) {
return res.status(400).json({ error: args.error });
}
const data = eventsRepo
.getUserEvents(args.data.token, {
limit: args.data.first,
cursor: args.data.after,
})
.map((e) => ({ node: e, cursor: encodeCursor(e.date) }));
res.json({
data,
endCursor: data[data.length - 1]?.cursor,
});
});
export { router as eventsApiRouter };
+57 -4
View File
@@ -1,17 +1,32 @@
import express, { Router } from "express";
import { authorize } from "./auth";
import { createWhitelistMiddleware } from "../shared/cidr";
import { HttpError } from "../shared/errors";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { injectLocals } from "../shared/inject-locals";
import { withSession } from "../shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { config } from "../config";
import { renderPage } from "../info-page";
import { buildInfo } from "../service-info";
import { authorize } from "./auth";
import { loginRouter } from "./login";
import { usersApiRouter as apiRouter } from "./api/users";
import { eventsApiRouter } from "./api/events";
import { usersApiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
import { logger } from "../logger";
import { keyPool } from "../shared/key-management";
const adminRouter = Router();
const whitelist = createWhitelistMiddleware(
"ADMIN_WHITELIST",
config.adminWhitelist
);
if (!whitelist.ranges.length && config.adminKey?.length) {
logger.error("ADMIN_WHITELIST is empty. No admin requests will be allowed. Set 0.0.0.0/0 to allow all.");
}
adminRouter.use(whitelist);
adminRouter.use(
express.json({ limit: "20mb" }),
express.urlencoded({ extended: true, limit: "20mb" })
@@ -19,7 +34,45 @@ adminRouter.use(
adminRouter.use(withSession);
adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
// Special endpoint to validate organization verification status for all OpenAI keys
// This checks both gpt-image-1 and o3 streaming access which require verified organizations
adminRouter.post("/validate-gpt-image-keys", authorize({ via: "header" }), async (req, res) => {
try {
logger.info("Manual validation of organization verification status initiated");
// Use the specialized validation function that tests each key's organization verification
// status using o3 streaming and waits for the results
const results = await keyPool.validateGptImageAccess();
logger.info({
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length
}, "Manual organization verification check completed");
return res.json({
success: true,
message: "Organization verification check completed",
results: {
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length,
// Only include hashes, not full keys
verified_keys: results.verified,
removed_keys: results.removed,
error_details: results.errors
}
});
} catch (error) {
logger.error({ error }, "Error validating organization verification status for OpenAI keys");
return res.status(500).json({ error: "Failed to validate keys", details: error.message });
}
});
adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
+288 -14
View File
@@ -1,4 +1,5 @@
import { Router } from "express";
import ipaddr from "ipaddr.js";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
@@ -6,7 +7,7 @@ import { HttpError } from "../../shared/errors";
import * as userStore from "../../shared/users/user-store";
import { parseSort, sortBy, paginate } from "../../shared/utils";
import { keyPool } from "../../shared/key-management";
import { MODEL_FAMILIES } from "../../shared/models";
import { LLMService, MODEL_FAMILIES } from "../../shared/models";
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
import {
User,
@@ -14,6 +15,9 @@ import {
UserSchema,
UserTokenCounts,
} from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
import { invalidatePowChallenges } from "../../user/web/pow-captcha";
const router = Router();
@@ -39,6 +43,74 @@ router.get("/create-user", (req, res) => {
});
});
router.get("/anti-abuse", (_req, res) => {
const wl = [...whitelists.entries()];
const bl = [...blacklists.entries()];
res.render("admin_anti-abuse", {
captchaMode: config.captchaMode,
difficulty: config.powDifficultyLevel,
whitelists: wl.map((w) => ({
name: w[0],
mode: "whitelist",
ranges: w[1].ranges,
})),
blacklists: bl.map((b) => ({
name: b[0],
mode: "blacklist",
ranges: b[1].ranges,
})),
});
});
router.post("/cidr", (req, res) => {
const body = req.body;
const valid = z
.object({
action: z.enum(["add", "remove"]),
mode: z.enum(["whitelist", "blacklist"]),
name: z.string().min(1),
mask: z.string().min(1),
})
.safeParse(body);
if (!valid.success) {
throw new HttpError(
400,
valid.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
const { mode, name, mask } = valid.data;
const list = (mode === "whitelist" ? whitelists : blacklists).get(name);
if (!list) {
throw new HttpError(404, "List not found");
}
if (valid.data.action === "remove") {
const newRanges = new Set(list.ranges);
newRanges.delete(mask);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
} else if (valid.data.action === "add") {
const result = parseCidrs(mask);
if (result.length === 0) {
throw new HttpError(400, "Invalid CIDR mask");
}
const newRanges = new Set([...list.ranges, mask]);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
}
});
router.post("/create-user", (req, res) => {
const body = req.body;
@@ -60,10 +132,11 @@ router.post("/create-user", (req, res) => {
)
.transform((data: any) => {
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
limits[model] = data[`temporaryUserQuota_${model}`];
const tokenLimits = MODEL_FAMILIES.reduce((limits, modelFamily) => {
const quotaValue = data[`temporaryUserQuota_${modelFamily}`];
limits[modelFamily] = typeof quotaValue === 'number' ? quotaValue : 0;
return limits;
}, {} as UserTokenCounts);
}, {} as any);
return { ...data, expiresAt, tokenLimits };
});
@@ -117,7 +190,70 @@ router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) throw new HttpError(400, "No file uploaded");
const data = JSON.parse(req.file.buffer.toString());
const result = z.array(UserPartialSchema).safeParse(data.users);
// Transform old token count format to new format
const transformedUsers = data.users.map((user: any) => {
if (user.tokenCounts) {
const transformedTokenCounts: any = {};
for (const [family, value] of Object.entries(user.tokenCounts)) {
if (typeof value === 'number') {
// Old format: just a number (legacy_total)
transformedTokenCounts[family] = {
input: 0,
output: 0,
legacy_total: value
};
} else if (typeof value === 'object' && value !== null) {
// New format or partially new format
const transformedCounts: { input: number; output: number; legacy_total?: number } = {
input: (value as any).input || 0,
output: (value as any).output || 0
};
if ((value as any).legacy_total !== undefined) {
transformedCounts.legacy_total = (value as any).legacy_total;
}
transformedTokenCounts[family] = transformedCounts;
}
}
user.tokenCounts = transformedTokenCounts;
}
// Handle tokenLimits - should be flat numbers
if (user.tokenLimits) {
const transformedTokenLimits: any = {};
for (const [family, value] of Object.entries(user.tokenLimits)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenLimits[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenLimits[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenLimits = transformedTokenLimits;
}
// Handle tokenRefresh - should be flat numbers
if (user.tokenRefresh) {
const transformedTokenRefresh: any = {};
for (const [family, value] of Object.entries(user.tokenRefresh)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenRefresh[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenRefresh[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenRefresh = transformedTokenRefresh;
}
return user;
});
const result = z.array(UserPartialSchema).safeParse(transformedUsers);
if (!result.success) throw new HttpError(400, result.error.toString());
const upserts = result.data.map((user) => userStore.upsertUser(user));
@@ -196,13 +332,21 @@ router.post("/maintenance", (req, res) => {
let flash = { type: "", message: "" };
switch (action) {
case "recheck": {
keyPool.recheck("openai");
keyPool.recheck("anthropic");
const size = keyPool
const checkable: LLMService[] = [
"openai",
"anthropic",
"aws",
"gcp",
"azure",
"google-ai"
];
checkable.forEach((s) => keyPool.recheck(s));
const keyCount = keyPool
.list()
.filter((k) => k.service !== "google-ai").length;
.filter((k) => checkable.includes(k.service)).length;
flash.type = "success";
flash.message = `Scheduled recheck of ${size} keys for OpenAI and Anthropic.`;
flash.message = `Scheduled recheck of ${keyCount} keys.`;
break;
}
case "resetQuotas": {
@@ -220,14 +364,139 @@ router.post("/maintenance", (req, res) => {
flash.message = `All users' token usage records reset.`;
break;
}
case "downloadImageMetadata": {
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
generations: getLastNImages(),
},
null,
2
);
res.setHeader(
"Content-Disposition",
`attachment; filename=image-metadata-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
case "expireTempTokens": {
const users = userStore.getUsers();
const temps = users.filter((u) => u.type === "temporary");
temps.forEach((user) => {
user.expiresAt = Date.now();
user.disabledReason = "Admin forced expiration.";
userStore.upsertUser(user);
});
invalidatePowChallenges();
flash.type = "success";
flash.message = `${temps.length} temporary users marked for expiration.`;
break;
}
case "cleanTempTokens": {
const users = userStore.getUsers();
const disabledTempUsers = users.filter(
(u) => u.type === "temporary" && u.expiresAt && u.expiresAt < Date.now()
);
disabledTempUsers.forEach((user) => {
user.disabledAt = 1; //will be cleaned up by the next cron job
userStore.upsertUser(user);
});
flash.type = "success";
flash.message = `${disabledTempUsers.length} disabled temporary users marked for cleanup.`;
break;
}
case "setDifficulty": {
const selected = req.body["pow-difficulty"];
const valid = ["low", "medium", "high", "extreme"];
const isNumber = Number.isInteger(Number(selected));
if (!selected || !valid.includes(selected) && !isNumber) {
throw new HttpError(400, "Invalid difficulty " + selected);
}
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
invalidatePowChallenges();
break;
}
case "generateTempIpReport": {
const tempUsers = userStore
.getUsers()
.filter((u) => u.type === "temporary");
const ipv4RangeMap = new Map<string, Set<string>>();
const ipv6RangeMap = new Map<string, Set<string>>();
tempUsers.forEach((u) => {
u.ip.forEach((ip) => {
try {
const parsed = ipaddr.parse(ip);
if (parsed.kind() === "ipv4") {
const subnet =
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
".0/24";
const userSet = ipv4RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv4RangeMap.set(subnet, userSet);
} else if (parsed.kind() === "ipv6") {
const subnet =
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
"::/48";
const userSet = ipv6RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv6RangeMap.set(subnet, userSet);
}
} catch (e) {
req.log.warn(
{ ip, error: e.message },
"Invalid IP address; skipping"
);
}
});
});
const ipv4Ranges = Array.from(ipv4RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => b.distinctTokens - a.distinctTokens);
const ipv6Ranges = Array.from(ipv6RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => {
if (a.distinctTokens === b.distinctTokens) {
return a.subnet.localeCompare(b.subnet);
}
return b.distinctTokens - a.distinctTokens;
});
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
ipv4Ranges,
ipv6Ranges,
},
null,
2
);
res.setHeader(
"Content-Disposition",
`attachment; filename=temp-ip-report-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
default: {
throw new HttpError(400, "Invalid action");
}
}
req.session.flash = flash;
const referer = req.get("referer");
return res.redirect(`/admin/manage`);
return res.redirect(referer || "/admin/manage");
});
router.get("/download-stats", (_req, res) => {
@@ -342,9 +611,14 @@ router.post("/generate-stats", (req, res) => {
function getSumsForUser(user: User) {
const sums = MODEL_FAMILIES.reduce(
(s, model) => {
const tokens = user.tokenCounts[model] ?? 0;
s.sumTokens += tokens;
s.sumCost += getTokenCostUsd(model, tokens);
const counts = user.tokenCounts[model] ?? { input: 0, output: 0 };
// Ensure inputTokens and outputTokens are numbers, defaulting to 0 if NaN or undefined
const inputTokens = Number(counts.input) || 0;
const outputTokens = Number(counts.output) || 0;
// We could also consider legacy_total here if input and output are 0
// For now, sumTokens and sumCost will be based on current input/output.
s.sumTokens += inputTokens + outputTokens;
s.sumCost += getTokenCostUsd(model, inputTokens, outputTokens);
return s;
},
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
+160
View File
@@ -0,0 +1,160 @@
<%- include("partials/shared_header", { title: "Proof of Work Verification Settings - OAI Reverse Proxy Admin" }) %>
<style>
details {
margin-top: 1em;
}
details summary {
font-weight: bold;
cursor: pointer;
}
details p {
margin-left: 1em;
}
#token-manage {
display: flex;
width: 100%;
}
#token-manage button {
flex-grow: 1;
margin: 0 0.5em;
}
</style>
<h1>Abuse Mitigation Settings</h1>
<div>
<h2>Proof-of-Work Verification</h2>
<p>
The Proof-of-Work difficulty level is used to determine how much work a client must perform to earn a temporary user
token. Higher difficulty levels require more work, which can help mitigate abuse by making it more expensive for
attackers to generate tokens. However, higher difficulty levels can also make it more difficult for legitimate users
to generate tokens. Refer to documentation for guidance.
</p>
<%if (captchaMode === "none") { %>
<p>
<strong>PoW verification is not enabled. Set <code>CAPTCHA_MODE=proof_of_work</code> to enable.</strong>
</p>
<% } else { %>
<h3>Difficulty Level</h3>
<div>
<label for="difficulty">Difficulty Level:</label>
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
<option value="low">Low</option>
<option value="medium">Medium</option>
<option value="high">High</option>
<option value="extreme">Extreme</option>
<option value="custom">Custom</option>
</select>
<div id="custom-difficulty-container" style="display: none">
<label for="customDifficulty">Hashes required (average):</label>
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
</div>
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
</div>
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
<% } %>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<input id="hiddenDifficulty" type="hidden" name="pow-difficulty" value="" />
</form>
<h3>Manage Temporary User Tokens</h3>
<div id="token-manage">
<p><button onclick='doAction("expireTempTokens")'>🕒 Expire All Temp Tokens</button></p>
<p><button onclick='doAction("cleanTempTokens")'>🧹 Delete Expired Temp Tokens</button></p>
<p><button onclick='doAction("generateTempIpReport")'>📊 Generate Temp Token IP Report</button></p>
</div>
</div>
<div>
<h2>IP Whitelists and Blacklists</h2>
<p>
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
addresses or
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
supported but not recommended for use with the current version of the proxy.
</p>
<p>
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
you can copy the values to your deployment configuration.
</p>
<% for (let i = 0; i < whitelists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
<% } %>
<% for (let i = 0; i < blacklists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: blacklists[i] }) %>
<% } %>
<form action="/admin/manage/cidr" method="post" id="cidrForm">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input type="hidden" name="action" value="add" />
<input type="hidden" name="name" value="" />
<input type="hidden" name="mode" value="" />
<input type="hidden" name="mask" value="" />
</form>
<details>
<summary>Copy environment variables</summary>
<p>
If you have made changes with the UI, you can copy the values below to your deployment configuration to persist
them across server restarts.
</p>
<pre>
<% for (let i = 0; i < whitelists.length; i++) { %><%= whitelists[i].name %>=<%= whitelists[i].ranges.join(",") %><% } %>
<% for (let i = 0; i < blacklists.length; i++) { %><%= blacklists[i].name %>=<%= blacklists[i].ranges.join(",") %><% } %>
</pre>
</details>
</div>
<script>
function difficultyChanged(event) {
const value = event.target.value;
if (value === "custom") {
document.getElementById("custom-difficulty-container").style.display = "block";
} else {
document.getElementById("custom-difficulty-container").style.display = "none";
}
}
function doAction(action) {
document.getElementById("hiddenAction").value = action;
if (action === "setDifficulty") {
const selected = document.getElementById("difficulty").value;
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
if (selected === "custom") {
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
} else {
hiddenDifficulty.value = selected;
}
}
document.getElementById("maintenanceForm").submit();
}
function onAddCidr(event) {
const list = event.target.dataset;
const newMask = prompt("Enter the IP or CIDR range to add to the list:");
if (!newMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "add";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = newMask;
form.submit();
}
function onRemoveCidr(event) {
const list = event.target.dataset;
const removeMask = event.target.dataset.mask;
if (!removeMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "remove";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = removeMask;
form.submit();
}
</script>
<%- include("partials/admin-footer") %>
+2 -3
View File
@@ -51,9 +51,8 @@
<legend>Temporary User Options</legend>
<div class="temporary-user-fieldset">
<p class="full-width">
Temporary users will be disabled after the specified duration, and their records will be deleted 72 hours after that.
These options apply only to new
temporary users; existing ones use whatever options were in effect when they were created.
Temporary users will be disabled after the specified duration, and their records will be permanently deleted after some time.
These options apply only to new temporary users; existing ones use whatever options were in effect when they were created.
</p>
<label for="temporaryUserDuration" class="full-width">Access duration (in minutes)</label>
<input type="number" name="temporaryUserDuration" id="temporaryUserDuration" value="60" class="full-width" />
+27 -36
View File
@@ -5,18 +5,6 @@
flex-direction: column;
}
#statsForm div {
display: flex;
flex-direction: row;
margin-bottom: 0.5em;
}
#statsForm div label {
width: 6em;
text-align: right;
margin-right: 1em;
}
#statsForm ul {
margin: 0;
padding-left: 2em;
@@ -33,17 +21,17 @@
}
</style>
<h1>Download Stats</h1>
<p>
Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.
</p>
<p>Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.</p>
<div>
<h3>Options</h3>
<form id="statsForm" action="/admin/manage/generate-stats" method="post"
style="display: flex; flex-direction: column;">
<form
id="statsForm"
action="/admin/manage/generate-stats"
method="post"
style="display: flex; flex-direction: column">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<div>
<label for="anon">Anonymize</label>
<input id="anon" type="checkbox" name="anon" value="true" />
<label for="anon"><input id="anon" type="checkbox" name="anon" value="true" /> <span>Anonymize</span></label>
</div>
<div>
<label for="sort">Sort</label>
@@ -64,11 +52,12 @@
</select>
</div>
<div>
<label for="format">Custom Format <ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul></label>
<label for="format">Custom Format</label>
<ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul>
<textarea id="format" name="format" rows="10" cols="50" placeholder="{{stats}}">
# Stats
{{header}}
@@ -115,33 +104,35 @@
loadDefaults();
async function fetchAndCopy() {
const form = document.getElementById('statsForm');
const form = document.getElementById("statsForm");
const formData = new FormData(form);
const response = await fetch(form.action, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
credentials: 'same-origin',
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
credentials: "same-origin",
body: new URLSearchParams(formData),
});
if (response.ok) {
const content = await response.text();
copyToClipboard(content);
} else {
throw new Error('Failed to fetch generated stats. Try reloading the page.');
throw new Error("Failed to fetch generated stats. Try reloading the page.");
}
}
function copyToClipboard(text) {
navigator.clipboard.writeText(text).then(() => {
alert('Copied to clipboard');
}).catch(err => {
alert('Failed to copy to clipboard. Try downloading the file instead.');
});
navigator.clipboard
.writeText(text)
.then(() => {
alert("Copied to clipboard");
})
.catch((err) => {
alert("Failed to copy to clipboard. Try downloading the file instead.");
});
}
document.getElementById('copyButton').addEventListener('click', fetchAndCopy);
document.getElementById("copyButton").addEventListener("click", fetchAndCopy);
</script>
<%- include("partials/admin-footer") %>
+11 -5
View File
@@ -18,13 +18,19 @@
</li>
<li>
<code>tokenCounts</code> (optional): the number of tokens the user has
consumed. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
consumed. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing an object with
<code>input</code> and <code>output</code> token counts.
</li>
<li>
<code>tokenLimits</code> (optional): the number of tokens the user can
consume. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
<code>tokenLimits</code> (optional): the maximum number of tokens the user can
consume. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing a single number
representing the total token quota.
</li>
<li>
<code>tokenRefresh</code> (optional): the amount of tokens to refresh when quotas
are reset. Same format as <code>tokenLimits</code>.
</li>
<li>
<code>createdAt</code> (optional): the timestamp when the user was created
+10 -2
View File
@@ -25,13 +25,14 @@
<li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li>
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
<li><a href="/admin/manage/anti-abuse">Abuse Mitigation Settings</a></li>
<li><a href="/admin/service-info">Service Info</a></li>
</ul>
<h3>Maintenance</h3>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<div display="flex" flex-direction="column">
<div>
<fieldset>
<legend>Key Recheck</legend>
<button id="recheck-keys" type="button" onclick="submitForm('recheck')">Force Key Recheck</button>
@@ -42,7 +43,7 @@
<legend>Bulk Quota Management</legend>
<p>
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
Immediately refreshes all users' quotas by the configured amounts.
</p>
<p>
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
@@ -50,6 +51,13 @@
</p>
</fieldset>
<% } %>
<% if (imageGenerationEnabled) { %>
<fieldset>
<legend>Image Generation</legend>
<button id="download-image-metadata" type="button" onclick="submitForm('downloadImageMetadata')">Download Image Metadata</button>
<label for="download-image-metadata">Downloads a metadata file containing URL, prompt, and truncated user token for all cached images.</label>
</fieldset>
<% } %>
</div>
</form>
+2 -3
View File
@@ -4,9 +4,8 @@
<% if (users.length === 0) { %>
<p>No users found.</p>
<% } else { %>
<input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
<label for="toggle-nicknames">Show Nicknames</label>
<table>
<label for="toggle-nicknames"><input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" /> Show Nicknames</label>
<table class="striped full-width">
<thead>
<tr>
<th>User</th>
+33 -14
View File
@@ -55,8 +55,9 @@
<td><%- user.disabledReason %></td>
<% if (user.disabledAt) { %>
<td class="actions">
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason"
data-token="<%= user.token %>">✏️</a>
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason" data-token="<%= user.token %>"
>✏️</a
>
</td>
<% } %>
</tr>
@@ -72,7 +73,8 @@
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: false }) %></td>
</tr>
<tr>
<th scope="row">Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
<th scope="row">
Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
</th>
<td><%- user.adminNote ?? "none" %></td>
<td class="actions">
@@ -85,14 +87,24 @@
<td colspan="2"><%- user.expiresAt %></td>
</tr>
<% } %>
<% if (user.meta) { %>
<tr>
<th scope="row">Meta</th>
<td colspan="2"><%- JSON.stringify(user.meta) %></td>
</tr>
<% } %>
</tbody>
</table>
<form style="display:none" id="current-values">
<form style="display: none" id="current-values">
<input type="hidden" name="token" value="<%- user.token %>" />
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
<% }); %>
<!-- tokenRefresh_ keys are dynamically generated -->
<% Object.entries(quota).forEach(([family]) => { %>
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
<% }); %>
</form>
<h3>Quota Information</h3>
@@ -102,7 +114,8 @@
<input type="hidden" name="_csrf" value="<%- csrfToken %>" />
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
</form>
<% } %> <%- include("partials/shared_quota-info", { quota, user }) %>
<% } %>
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
<p><a href="/admin/manage/list-users">Back to User List</a></p>
@@ -113,18 +126,25 @@
const token = a.dataset.token;
const field = a.dataset.field;
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
let value = prompt(`Enter new value for '${field}'':`, existingValue);
let value = prompt(`Enter new value for '${field}':`, existingValue);
if (value !== null) {
if (value === "") {
value = null;
}
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
if (field.startsWith("tokenRefresh_")) {
const family = field.slice("tokenRefresh_".length);
payload.tokenRefresh = { [family]: Number(value) };
} else {
payload[field] = value;
}
fetch(`/admin/manage/edit-user/${token}`, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify({
[field]: value,
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
}),
body: JSON.stringify(payload),
headers: { "Content-Type": "application/json", Accept: "application/json" },
})
.then((res) => Promise.all([res.ok, res.json()]))
@@ -132,9 +152,7 @@
const url = new URL(window.location.href);
const params = new URLSearchParams();
if (!ok) {
params.set("flash", `error: ${json.error.message}`);
} else {
params.set("flash", `success: User's ${field} updated.`);
alert(`Failed to edit user: ${json.message}`);
}
url.search = params.toString();
window.location.assign(url);
@@ -144,4 +162,5 @@
});
</script>
<%- include("partials/admin-ban-xhr-script") %> <%- include("partials/admin-footer") %>
<%- include("partials/admin-ban-xhr-script") %>
<%- include("partials/admin-footer") %>
@@ -0,0 +1,13 @@
<h3>
<%= list.name %>
(<%= list.mode %>)
</h3>
<ul>
<% list.ranges.forEach(function(mask) { %>
<li>
<%= mask %>
<button class="remove" data-mode="<%= list.mode %>" data-name="<%= list.name %>" data-mask="<%= mask %>" onclick="onRemoveCidr(event)">Remove</button>
</li>
<% }); %>
</ul>
<button class="add" data-mode="<%= list.mode %>" data-name="<%= list.name %>" onclick="onAddCidr(event)">Add</button>
+452 -64
View File
@@ -1,8 +1,9 @@
import crypto from "crypto";
import dotenv from "dotenv";
import type firebase from "firebase-admin";
import path from "path";
import pino from "pino";
import type { ModelFamily } from "./shared/models";
import type { LLMService, ModelFamily } from "./shared/models";
import { MODEL_FAMILIES } from "./shared/models";
dotenv.config();
@@ -16,6 +17,8 @@ export const USER_ASSETS_DIR = path.join(DATA_DIR, "user-files");
type Config = {
/** The port the proxy server will listen on. */
port: number;
/** The network interface the proxy server will listen on. */
bindAddress: string;
/** Comma-delimited list of OpenAI API keys. */
openaiKey?: string;
/** Comma-delimited list of Anthropic API keys. */
@@ -26,10 +29,40 @@ type Config = {
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Google AI experimental model names that are
* allowed to bypass the experimental model block. By default, all models
* containing "exp" are blocked, but specific models listed here will be
* permitted.
*
* @example "gemini-2.0-flash-exp,gemini-exp-1206"
*/
allowedExpModels?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/**
* Comma-delimited list of Deepseek API keys.
*/
deepseekKey?: string;
/**
* Comma-delimited list of Xai (Grok) API keys.
*/
xaiKey?: string;
/**
* Comma-delimited list of Cohere API keys.
*/
cohereKey?: string;
/**
* Comma-delimited list of Qwen API keys.
*/
qwenKey?: string;
/**
* Comma-delimited list of Moonshot API keys.
*/
moonshotKey?: string;
/**
* Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region.
@@ -42,6 +75,13 @@ type Config = {
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/
awsCredentials?: string;
/**
* Comma-delimited list of GCP credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and GCP region.
*
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
*/
gcpCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
@@ -79,10 +119,14 @@ type Config = {
* - `memory`: Users are stored in memory and are lost on restart (default)
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
* - `sqlite`: Users are stored in an SQLite database; requires
* `sqliteUserStorePath` to be set.
*/
gatekeeperStore: "memory" | "firebase_rtdb";
gatekeeperStore: "memory" | "firebase_rtdb" | "sqlite";
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
firebaseRtdbUrl?: string;
/** Path to the SQLite database file for storing user data. */
sqliteUserStorePath?: string;
/**
* Base64-encoded Firebase service account key if using the Firebase RTDB
* store. Note that you should encode the *entire* JSON key file, not just the
@@ -100,9 +144,70 @@ type Config = {
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
*/
maxIpsAutoBan: boolean;
/** Per-IP limit for requests per minute to text and chat models. */
/**
* Which captcha verification mode to use. Requires `user_token` gatekeeper.
* Allows users to automatically obtain a token by solving a captcha.
* - `none`: No captcha verification; tokens are issued manually.
* - `proof_of_work`: Users must solve an Argon2 proof of work to obtain a
* temporary usertoken valid for a limited period.
*/
captchaMode: "none" | "proof_of_work";
/**
* Duration (in hours) for which a PoW-issued temporary user token is valid.
*/
powTokenHours: number;
/**
* The maximum number of IPs from which a single temporary user token can be
* used. Upon reaching the limit, the `maxIpsAutoBan` behavior is triggered.
*/
powTokenMaxIps: number;
/**
* Difficulty level for the proof-of-work challenge.
* - `low`: 200 iterations
* - `medium`: 900 iterations
* - `high`: 1900 iterations
* - `extreme`: 4000 iterations
* - `number`: A custom number of iterations to use.
*
* Difficulty level only affects the number of iterations used in the PoW,
* not the complexity of the hash itself. Therefore, the average time-to-solve
* will scale linearly with the number of iterations.
*
* Refer to docs/proof-of-work.md for guidance and hashrate benchmarks.
*/
powDifficultyLevel: "low" | "medium" | "high" | "extreme" | number;
/**
* Duration (in minutes) before a PoW challenge expires. Users' browsers must
* solve the challenge within this time frame or it will be rejected. Should
* be kept somewhat low to prevent abusive clients from working on many
* challenges in parallel, but you may need to increase this value for higher
* difficulty levels or older devices will not be able to solve the challenge
* in time.
*
* Defaults to 30 minutes.
*/
powChallengeTimeout: number;
/**
* Duration (in hours) before expired temporary user tokens are purged from
* the user database. Users can refresh expired tokens by solving a faster PoW
* challenge as long as the original token has not been purged. Once purged,
* the user must solve a full PoW challenge to obtain a new token.
*
* Defaults to 48 hours. At 0, tokens are purged immediately upon expiry.
*/
powTokenPurgeHours: number;
/**
* Maximum number of active temporary user tokens that can be associated with
* a single IP address. Note that this may impact users sending requests from
* hosted AI chat clients such as Agnaistic or RisuAI, as they may share IPs.
*
* When the limit is reached, the oldest token with the same IP will be
* expired. At 0, no limit is enforced. Defaults to 0.
*/
// powMaxTokensPerIp: number;
/** Per-user limit for requests per minute to text and chat models. */
textModelRateLimit: number;
/** Per-IP limit for requests per minute to image generation models. */
/** Per-user limit for requests per minute to image generation models. */
imageModelRateLimit: number;
/**
* For OpenAI, the maximum number of context tokens (prompt + max output) a
@@ -139,10 +244,38 @@ type Config = {
* key and can't attach the policy, you can set this to true.
*/
allowAwsLogging?: boolean;
/**
* Path to the SQLite database file for storing data such as event logs. By
* default, the database will be stored at `data/database.sqlite`.
*
* Ensure target is writable by the server process, and be careful not to
* select a path that is served publicly. The default path is safe.
*/
sqliteDataPath?: string;
/**
* Whether to log events, such as generated completions, to the database.
* Events are associated with IP+user token pairs. If user_token mode is
* disabled, no events will be logged.
*
* Currently there is no pruning mechanism for the events table, so it will
* grow indefinitely. You may want to periodically prune the table manually.
*/
eventLogging?: boolean;
/**
* When hashing prompt histories, how many messages to trim from the end.
* If zero, only the full prompt hash will be stored.
* If greater than zero, for each number N, a hash of the prompt with the
* last N messages removed will be stored.
*
* Experimental function, config may change in future versions.
*/
eventLoggingTrim?: number;
/** Whether prompts and responses should be logged to persistent storage. */
promptLogging?: boolean;
/** Which prompt logging backend to use. */
promptLoggingBackend?: "google_sheets";
promptLoggingBackend?: "google_sheets" | "file";
/** Prefix for prompt logging files when using the file backend. */
promptLoggingFilePrefix?: string;
/** Base64-encoded Google Sheets API key. */
googleSheetsKey?: string;
/** Google Sheets spreadsheet ID. */
@@ -198,57 +331,190 @@ type Config = {
* configured ADMIN_KEY and go to /admin/service-info.
**/
staticServiceInfo?: boolean;
/**
* Trusted proxy hops. If you are deploying the server behind a reverse proxy
* (Nginx, Cloudflare Tunnel, AWS WAF, etc.) the IP address of incoming
* requests will be the IP address of the proxy, not the actual user.
*
* Depending on your hosting configuration, there may be multiple proxies/load
* balancers between your server and the user. Each one will append the
* incoming IP address to the `X-Forwarded-For` header. The user's real IP
* address will be the first one in the list, assuming the header has not been
* tampered with. Setting this value correctly ensures that the server doesn't
* trust values in `X-Forwarded-For` not added by trusted proxies.
*
* In order for the server to determine the user's real IP address, you need
* to tell it how many proxies are between the user and the server so it can
* select the correct IP address from the `X-Forwarded-For` header.
*
* *WARNING:* If you set it incorrectly, the proxy will either record the
* wrong IP address, or it will be possible for users to spoof their IP
* addresses and bypass rate limiting. Check the request logs to see what
* incoming X-Forwarded-For values look like.
*
* Examples:
* - X-Forwarded-For: "34.1.1.1, 172.1.1.1, 10.1.1.1" => trustedProxies: 3
* - X-Forwarded-For: "34.1.1.1" => trustedProxies: 1
* - no X-Forwarded-For header => trustedProxies: 0 (the actual IP of the incoming request will be used)
*
* As of 2024/01/08:
* For HuggingFace or Cloudflare Tunnel, use 1.
* For Render, use 3.
* For deployments not behind a load balancer, use 0.
*
* You should double check against your actual request logs to be sure.
*
* Defaults to 1, as most deployments are on HuggingFace or Cloudflare Tunnel.
*/
trustedProxies?: number;
/**
* Whether to allow OpenAI tool usage. The proxy doesn't impelment any
* support for tools/function calling but can pass requests and responses as
* is. Note that the proxy also cannot accurately track quota usage for
* requests involving tools, so you must opt in to this feature at your own
* risk.
*/
allowOpenAIToolUsage?: boolean;
/**
* Which services will accept prompts containing images, for use with
* multimodal models. Users with `special` role are exempt from this
* restriction.
*
* Do not enable this feature for untrusted users, as malicious users could
* send images which violate your provider's terms of service or local laws.
*
* Defaults to no services, meaning image prompts are disabled. Use a comma-
* separated list. Available services are:
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure,xai
*/
allowedVisionServices: LLMService[];
/**
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
* A leading slash is required.
*/
proxyEndpointRoute: string;
/**
* If set, only requests from these IP addresses will be permitted to use the
* admin API and UI. Provide a comma-separated list of IP addresses or CIDR
* ranges. If not set, the admin API and UI will be open to all requests.
*/
adminWhitelist: string[];
/**
* If set, requests from these IP addresses will be blocked from using the
* application. Provide a comma-separated list of IP addresses or CIDR ranges.
* If not set, no IP addresses will be blocked.
*
* Takes precedence over the adminWhitelist.
*/
ipBlacklist: string[];
/**
* If set, pushes requests further back into the queue according to their
* token costs by factor*tokens*milliseconds (or more intuitively
* factor*thousands_of_tokens*seconds).
* Accepts floats.
*/
tokensPunishmentFactor: number;
/**
* Configuration for HTTP requests made by the proxy to other servers, such
* as when checking keys or forwarding users' requests to external services.
* If not set, all requests will be made using the default agent.
*
* If set, the proxy may make requests to other servers using the specified
* settings. This is useful if you wish to route users' requests through
* another proxy or VPN, or if you have multiple network interfaces and want
* to use a specific one for outgoing requests.
*/
httpAgent?: {
/**
* The name of the network interface to use. The first external IPv4 address
* belonging to this interface will be used for outgoing requests.
*/
interface?: string;
/**
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
* HTTPS. If not set, the proxy will be made using the default agent.
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
* - HTTP: `http://proxy-server-over-tcp.com:3128`
* - HTTPS: `https://proxy-server-over-tls.com:3129`
*
* **Note:** If your proxy server issues a certificate, you may need to set
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
* application will reject TLS connections.
*/
proxyUrl?: string;
};
/** URL for the image on the login page. Defaults to empty string (no image). */
loginImageUrl?: string;
/** Whether to enable the token-based login page for the service info page. Defaults to true. */
enableInfoPageLogin?: boolean;
/** Authentication mode for the service info page. (token | password) */
serviceInfoAuthMode: "token" | "password";
/** Password for the service info page if serviceInfoAuthMode is 'password'. */
serviceInfoPassword?: string;
};
// To change configs, create a file called .env in the root directory.
// See .env.example for an example.
export const config: Config = {
port: getEnvWithDefault("PORT", 7860),
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
qwenKey: getEnvWithDefault("QWEN_KEY", ""),
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
allowedExpModels: getEnvWithDefault("ALLOWED_EXP_MODELS", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
deepseekKey: getEnvWithDefault("DEEPSEEK_KEY", ""),
xaiKey: getEnvWithDefault("XAI_KEY", ""),
cohereKey: getEnvWithDefault("COHERE_KEY", ""),
moonshotKey: getEnvWithDefault("MOONSHOT_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
sqliteDataPath: getEnvWithDefault(
"SQLITE_DATA_PATH",
path.join(DATA_DIR, "database.sqlite")
),
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory") as Config["gatekeeperStore"],
sqliteUserStorePath: getEnvWithDefault(
"SQLITE_USER_STORE_PATH",
path.join(DATA_DIR, "user-store.sqlite")
),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", true),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
powTokenHours: getEnvWithDefault("POW_TOKEN_HOURS", 24),
powTokenMaxIps: getEnvWithDefault("POW_TOKEN_MAX_IPS", 2),
powDifficultyLevel: getEnvWithDefault("POW_DIFFICULTY_LEVEL", "low"),
powChallengeTimeout: getEnvWithDefault("POW_CHALLENGE_TIMEOUT", 30),
powTokenPurgeHours: getEnvWithDefault("POW_TOKEN_PURGE_HOURS", 48),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 32768),
maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC",
0
32768
),
maxOutputTokensOpenAI: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
400
1024
),
maxOutputTokensAnthropic: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
400
1024
),
allowedModelFamilies: getEnvWithDefault(
"ALLOWED_MODEL_FAMILIES",
getDefaultModelFamilies()
),
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"aws-claude",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-turbo",
"azure-gpt4-32k",
]),
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE",
@@ -260,6 +526,10 @@ export const config: Config = {
allowAwsLogging: getEnvWithDefault("ALLOW_AWS_LOGGING", false),
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
promptLoggingFilePrefix: getEnvWithDefault(
"PROMPT_LOGGING_FILE_PREFIX",
"prompt-logs"
),
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
googleSheetsSpreadsheetId: getEnvWithDefault(
"GOOGLE_SHEETS_SPREADSHEET_ID",
@@ -286,19 +556,64 @@ export const config: Config = {
showRecentImages: getEnvWithDefault("SHOW_RECENT_IMAGES", true),
useInsecureCookies: getEnvWithDefault("USE_INSECURE_COOKIES", isDev),
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
allowedVisionServices: parseCsv(
getEnvWithDefault("ALLOWED_VISION_SERVICES", "")
) as LLMService[],
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
adminWhitelist: parseCsv(
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
),
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
httpAgent: {
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
},
loginImageUrl: getEnvWithDefault("LOGIN_IMAGE_URL", ""),
enableInfoPageLogin: getEnvWithDefault("ENABLE_INFO_PAGE_LOGIN", true),
serviceInfoAuthMode: getEnvWithDefault("SERVICE_INFO_AUTH_MODE", "token") as Config["serviceInfoAuthMode"],
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", undefined),
} as const;
function generateCookieSecret() {
function generateSigningKey() {
if (process.env.COOKIE_SECRET !== undefined) {
// legacy, replaced by SIGNING_KEY
return process.env.COOKIE_SECRET;
} else if (process.env.SIGNING_KEY !== undefined) {
return process.env.SIGNING_KEY;
}
const seed = "" + config.adminKey + config.openaiKey + config.anthropicKey;
const crypto = require("crypto");
const secrets = [
config.adminKey,
config.openaiKey,
config.anthropicKey,
config.googleAIKey,
config.mistralAIKey,
config.deepseekKey,
config.xaiKey,
config.awsCredentials,
config.gcpCredentials,
config.azureCredentials,
];
if (secrets.filter((s) => s).length === 0) {
startupLogger.warn(
"No SIGNING_KEY or secrets are set. All sessions, cookies, and proofs of work will be invalidated on restart."
);
return crypto.randomBytes(32).toString("hex");
}
startupLogger.info("No SIGNING_KEY set; one will be generated from secrets.");
startupLogger.info(
"It's recommended to set SIGNING_KEY explicitly to ensure users' sessions and cookies always persist across restarts."
);
const seed = secrets.map((s) => s || "n/a").join("");
return crypto.createHash("sha256").update(seed).digest("hex");
}
export const COOKIE_SECRET = generateCookieSecret();
const signingKey = generateSigningKey();
export const SECRET_SIGNING_KEY = signingKey;
export async function assertConfigIsValid() {
if (process.env.MODEL_RATE_LIMIT !== undefined) {
@@ -314,6 +629,23 @@ export async function assertConfigIsValid() {
);
}
if (process.env.ALLOW_IMAGE_PROMPTS === "true") {
const hasAllowedServices = config.allowedVisionServices.length > 0;
if (!hasAllowedServices) {
config.allowedVisionServices = ["openai", "anthropic"];
startupLogger.warn(
{ allowedVisionServices: config.allowedVisionServices },
"ALLOW_IMAGE_PROMPTS is deprecated. Use ALLOWED_VISION_SERVICES instead."
);
}
}
if (config.promptLogging && !config.promptLoggingBackend) {
throw new Error(
"Prompt logging is enabled but no backend is configured. Set PROMPT_LOGGING_BACKEND to 'google_sheets' or 'file'."
);
}
if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) {
throw new Error(
`Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.`
@@ -326,15 +658,32 @@ export async function assertConfigIsValid() {
);
}
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
if (
config.captchaMode === "proof_of_work" &&
config.gatekeeper !== "user_token"
) {
throw new Error(
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
"Captcha mode 'proof_of_work' requires gatekeeper mode 'user_token'."
);
}
if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
if (config.captchaMode === "proof_of_work") {
const val = config.powDifficultyLevel;
const isDifficulty =
typeof val === "string" &&
["low", "medium", "high", "extreme"].includes(val);
const isIterations =
typeof val === "number" && Number.isInteger(val) && val > 0;
if (!isDifficulty && !isIterations) {
throw new Error(
"Invalid POW_DIFFICULTY_LEVEL. Must be one of: low, medium, high, extreme, or a positive integer."
);
}
}
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
throw new Error(
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
);
}
@@ -347,6 +696,41 @@ export async function assertConfigIsValid() {
);
}
if (config.gatekeeperStore === "sqlite" && !config.sqliteUserStorePath) {
throw new Error(
"SQLite user store requires `SQLITE_USER_STORE_PATH` to be set."
);
}
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
delete config.httpAgent;
} else if (config.httpAgent) {
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
throw new Error(
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
);
}
}
if (config.enableInfoPageLogin) {
if (!["token", "password"].includes(config.serviceInfoAuthMode)) {
throw new Error(
`Invalid SERVICE_INFO_AUTH_MODE: ${config.serviceInfoAuthMode}. Must be 'token' or 'password'.`
);
}
if (config.serviceInfoAuthMode === "password" && !config.serviceInfoPassword) {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'password' but SERVICE_INFO_PASSWORD is not set."
);
}
// If service info login is token-based, gatekeeper must be 'user_token' mode for getUser() to be effective.
if (config.serviceInfoAuthMode === "token" && config.gatekeeper !== "user_token") {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'token' for info page login, but GATEKEEPER is not 'user_token'. User token authentication will not work."
);
}
}
// Ensure forks which add new secret-like config keys don't unwittingly expose
// them to users.
for (const key of getKeys(config)) {
@@ -360,15 +744,16 @@ export async function assertConfigIsValid() {
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
);
}
await maybeInitializeFirebase();
}
/**
* Config keys that are masked on the info page, but not hidden as their
* presence may be relevant to the user due to privacy implications.
*/
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
export const SENSITIVE_KEYS: (keyof Config)[] = [
"googleSheetsSpreadsheetId",
"httpAgent",
];
/**
* Config keys that are not displayed on the info page at all, generally because
@@ -376,20 +761,33 @@ export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
*/
export const OMITTED_KEYS = [
"port",
"bindAddress",
"logLevel",
"openaiKey",
"anthropicKey",
"googleAIKey",
"deepseekKey",
"xaiKey",
"cohereKey",
"qwenKey",
"moonshotKey",
"mistralAIKey",
"awsCredentials",
"gcpCredentials",
"azureCredentials",
"proxyKey",
"adminKey",
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
"promptLoggingFilePrefix",
"googleSheetsKey",
"firebaseKey",
"firebaseRtdbUrl",
"sqliteDataPath",
"sqliteUserStorePath",
"eventLogging",
"eventLoggingTrim",
"gatekeeperStore",
"maxIpsPerUser",
"blockedOrigins",
@@ -401,6 +799,14 @@ export const OMITTED_KEYS = [
"staticServiceInfo",
"checkKeys",
"allowedModelFamilies",
"trustedProxies",
"proxyEndpointRoute",
"adminWhitelist",
"ipBlacklist",
"powTokenPurgeHours",
"loginImageUrl",
"enableInfoPageLogin",
"serviceInfoPassword",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
@@ -461,7 +867,9 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
"ANTHROPIC_KEY",
"GOOGLE_AI_KEY",
"AWS_CREDENTIALS",
"GCP_CREDENTIALS",
"AZURE_CREDENTIALS",
"QWEN_KEY",
].includes(String(env))
) {
return value as unknown as T;
@@ -478,32 +886,6 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
}
}
let firebaseApp: firebase.app.App | undefined;
async function maybeInitializeFirebase() {
if (!config.gatekeeperStore.startsWith("firebase")) {
return;
}
const firebase = await import("firebase-admin");
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
const app = firebase.initializeApp({
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
databaseURL: config.firebaseRtdbUrl,
});
await app.database().ref("connection-test").set(Date.now());
firebaseApp = app;
}
export function getFirebaseApp(): firebase.app.App {
if (!firebaseApp) {
throw new Error("Firebase app not initialized.");
}
return firebaseApp;
}
function parseCsv(val: string): string[] {
if (!val) return [];
@@ -511,3 +893,9 @@ function parseCsv(val: string): string[] {
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter(
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
) as ModelFamily[];
}
+286 -71
View File
@@ -1,40 +1,177 @@
/** This whole module kinda sucks */
/* ──────────────────────────────────────────────────────────────
Login-gated info page
drop-in replacement for src/info-page.ts
──────────────────────────────────────────────────────────── */
import fs from "fs";
import { Request, Response } from "express";
import express, { Router, Request, Response } from "express";
import showdown from "showdown";
import { config } from "./config";
import { buildInfo, ServiceInfo } from "./service-info";
import { getLastNImages } from "./shared/file-storage/image-history";
import { keyPool } from "./shared/key-management";
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
import { withSession } from "./shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "./shared/inject-csrf";
import { getUser } from "./shared/users/user-store";
/* ──────────────── TYPES: extend express-session ──────────── */
declare module "express-session" {
interface Session {
infoPageAuthed?: boolean;
}
}
/* ──────────────── misc constants ─────────────────────────── */
const INFO_PAGE_TTL = 2_000; // ms
const LOGIN_ROUTE = "/";
const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
"turbo": "GPT-3.5 Turbo",
"gpt4": "GPT-4",
qwen: "Qwen",
cohere: "Cohere",
deepseek: "Deepseek",
xai: "Grok",
moonshot: "Moonshot",
turbo: "GPT-4o Mini / 3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
gpt4o: "GPT-4o",
gpt41: "GPT-4.1",
"gpt41-mini": "GPT-4.1 Mini",
"gpt41-nano": "GPT-4.1 Nano",
gpt5: "GPT-5",
"gpt5-mini": "GPT-5 Mini",
"gpt5-nano": "GPT-5 Nano",
"gpt5-chat-latest": "GPT-5 Chat Latest",
gpt45: "GPT-4.5",
o1: "OpenAI o1",
"o1-mini": "OpenAI o1 mini",
"o1-pro": "OpenAI o1 pro",
"o3-pro": "OpenAI o3 pro",
"o3-mini": "OpenAI o3 mini",
"o3": "OpenAI o3",
"o4-mini": "OpenAI o4 mini",
"codex-mini": "OpenAI Codex Mini",
"dall-e": "DALL-E",
"claude": "Claude",
"gpt-image": "GPT Image",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-flash": "Gemini Flash",
"gemini-pro": "Gemini Pro",
"gemini-ultra": "Gemini Ultra",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mixtral 8x7B",
"mistral-medium": "Mistral Medium (prototype)",
"aws-claude": "AWS Claude",
"mistral-small": "Mistral Nemo",
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"aws-claude-opus": "AWS Claude (Opus)",
"aws-mistral-tiny": "AWS Mistral 7B",
"aws-mistral-small": "AWS Mistral Nemo",
"aws-mistral-medium": "AWS Mistral Medium",
"aws-mistral-large": "AWS Mistral Large",
"gcp-claude": "GCP Claude (Sonnet)",
"gcp-claude-opus": "GCP Claude (Opus)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-gpt4o": "Azure GPT-4o",
"azure-gpt45": "Azure GPT-4.5",
"azure-gpt41": "Azure GPT-4.1",
"azure-gpt41-mini": "Azure GPT-4.1 Mini",
"azure-gpt41-nano": "Azure GPT-4.1 Nano",
"azure-gpt5": "Azure GPT-5",
"azure-gpt5-mini": "Azure GPT-5 Mini",
"azure-gpt5-nano": "Azure GPT-5 Nano",
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
"azure-o1": "Azure o1",
"azure-o1-mini": "Azure o1 mini",
"azure-o1-pro": "Azure o1 pro",
"azure-o3-pro": "Azure o3 pro",
"azure-o3-mini": "Azure o3 mini",
"azure-o3": "Azure o3",
"azure-o4-mini": "Azure o4 mini",
"azure-codex-mini": "Azure Codex Mini",
"azure-dall-e": "Azure DALL-E",
"azure-gpt-image": "Azure GPT Image",
};
const converter = new showdown.Converter();
/* optional markdown greeting */
const customGreeting = fs.existsSync("greeting.md")
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
: "";
/* ──────────────── Login page ──────────────────────── */
function renderLoginPage(csrf: string, error?: string) {
const errBlock = error
? `<div class="error-message">${escapeHtml(error)}</div>`
: "";
const pageTitle = getServerTitle();
return `<!DOCTYPE html>
<html>
<head>
<title>${pageTitle} Login</title>
<style>
body{font-family:Arial, sans-serif;display:flex;justify-content:center;
align-items:center;height:100vh;margin:0;padding:20px;background:#f5f5f5;}
.login-container{background:#fff;border-radius:8px;box-shadow:0 4px 8px rgba(0,0,0,.1);
padding:30px;width:100%;max-width:400px;text-align:center;}
.logo-image{max-width:200px;margin-bottom:20px;}
.form-group{margin-bottom:20px;}
input[type=text], input[type=password]{width:100%;padding:10px;border:1px solid #ddd;border-radius:4px;
box-sizing:border-box;font-size:16px;}
button{background:#4caf50;color:#fff;border:none;padding:12px 20px;border-radius:4px;
cursor:pointer;font-size:16px;width:100%;}
button:hover{background:#45a049;}
.error-message{color:#f44336;margin-bottom:15px;}
@media (prefers-color-scheme: dark) {
body { background: #2c2c2c; color: #e0e0e0; }
.login-container { background: #383838; box-shadow: 0 4px 12px rgba(0,0,0,0.4); border: 1px solid #4a4a4a; }
input[type=text], input[type=password] { background: #4a4a4a; color: #e0e0e0; border: 1px solid #5a5a5a; }
input[type=text]::placeholder, input[type=password]::placeholder { color: #999; }
button { background: #007bff; } /* Using a blue for dark mode button */
button:hover { background: #0056b3; }
.error-message { color: #ff8a80; } /* Lighter red for errors in dark mode */
}
</style>
</head>
<body>
<div class="login-container">
${config.loginImageUrl ? `<img src="${config.loginImageUrl}" alt="Logo" class="logo-image">` : ''}
${errBlock}
<form method="POST" action="${LOGIN_ROUTE}">
<div class="form-group">
${config.serviceInfoAuthMode === "password"
? `<input type="password" id="password" name="password" required placeholder="Service Password">`
: `<input type="text" id="token" name="token" required placeholder="Your token">`}
<input type="hidden" name="_csrf" value="${csrf}">
</div>
<button type="submit">Access Dashboard</button>
</form>
</div>
</body>
</html>`;
}
/* ──────────────── login-required middleware ──────────────── */
function requireLogin(
req: Request,
res: Response,
next: express.NextFunction
) {
if (req.session?.infoPageAuthed) return next();
return res.send(renderLoginPage(res.locals.csrfToken));
}
/* ──────────────── INFO PAGE CACHING ──────────────────────── */
let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0;
export const handleInfoPage = (req: Request, res: Response) => {
export function handleInfoPage(req: Request, res: Response) {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
return res.send(infoPageHtml);
}
@@ -44,42 +181,51 @@ export const handleInfoPage = (req: Request, res: Response) => {
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
: req.protocol + "://" + req.get("host");
const info = buildInfo(baseUrl + "/proxy");
const info = buildInfo(baseUrl + config.proxyEndpointRoute);
infoPageHtml = renderPage(info);
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
};
}
/* ──────────────── RENDER FULL INFO PAGE ──────────────────── */
export function renderPage(info: ServiceInfo) {
const title = getServerTitle();
const headerHtml = buildInfoPageHeader(info);
return `<!DOCTYPE html>
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
</head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
${headerHtml}
<hr />
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
${getSelfServiceLinks()}
</body>
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" />
<link rel="stylesheet" href="/res/css/sakura.css" />
<link rel="stylesheet" href="/res/css/sakura-dark.css"
media="screen and (prefers-color-scheme: dark)" />
<style>
body{font-family:sans-serif;padding:1em;max-width:900px;margin:0;}
.self-service-links{display:flex;justify-content:center;margin-bottom:1em;
padding:0.5em;font-size:0.8em;}
.self-service-links a{margin:0 0.5em;}
</style>
</head>
<body>
${headerHtml}
<hr/>
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
</body>
</html>`;
}
/**
* If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page.
**/
/* ──────────────── header & helper functions ──────────────── */
/* (all copied verbatim from original file) */
function buildInfoPageHeader(info: ServiceInfo) {
const title = getServerTitle();
// TODO: use some templating engine instead of this mess
let infoBody = `# ${title}`;
if (config.promptLogging) {
infoBody += `\n## Prompt Logging Enabled
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
@@ -98,20 +244,20 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
for (const modelFamily of config.allowedModelFamilies) {
const service = MODEL_FAMILY_SERVICE[modelFamily];
const hasKeys = keyPool.list().some((k) => {
return k.service === service && k.modelFamilies.includes(modelFamily);
});
const hasKeys = keyPool.list().some(
(k) => k.service === service && k.modelFamilies.includes(modelFamily)
);
const wait = info[modelFamily]?.estimatedQueueTime;
if (hasKeys && wait) {
waits.push(`**${MODEL_FAMILY_FRIENDLY_NAME[modelFamily] || modelFamily}**: ${wait}`);
waits.push(
`**${MODEL_FAMILY_FRIENDLY_NAME[modelFamily] || modelFamily}**: ${wait}`
);
}
}
infoBody += "\n\n" + waits.join(" / ");
infoBody += customGreeting;
infoBody += buildRecentImageSection();
return converter.makeHtml(infoBody);
@@ -119,53 +265,60 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
function getSelfServiceLinks() {
if (config.gatekeeper !== "user_token") return "";
return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
const links = [["Check your user token", "/user/lookup"]];
if (config.captchaMode !== "none") {
links.unshift(["Request a user token", "/user/captcha"]);
}
return `<div class="self-service-links">${links
.map(([t, l]) => `<a href="${l}">${t}</a>`)
.join(" | ")}</div>`;
}
function getServerTitle() {
// Use manually set title if available
if (process.env.SERVER_TITLE) {
return process.env.SERVER_TITLE;
}
// Huggingface
if (process.env.SPACE_ID) {
if (process.env.SERVER_TITLE) return process.env.SERVER_TITLE;
if (process.env.SPACE_ID)
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
}
// Render
if (process.env.RENDER) {
if (process.env.RENDER)
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
}
return "OAI Reverse Proxy";
return "Tunnel";
}
function buildRecentImageSection() {
const imageModels: ModelFamily[] = [
"azure-dall-e",
"dall-e",
"gpt-image",
"azure-gpt-image",
];
// Condition 1: Is the feature enabled via config?
// Condition 2: Is at least one relevant image model family allowed in config?
if (
!config.allowedModelFamilies.includes("dall-e") ||
!config.showRecentImages
!config.showRecentImages ||
imageModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return ""; // Exit if feature is disabled or no relevant models are allowed
}
// Condition 3: Are there any actual images to display?
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
// If the feature is enabled and models are allowed, but no images exist,
// do not render the section, including its title.
return "";
}
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
// If all conditions pass (feature enabled, models allowed, images exist), build and return the HTML
let html = `<h2>Recent Image Generations</h2>`;
html += `<div style="display:flex;flex-wrap:wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
html += `<div style="margin:0.5em" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}"
alt="${escapedPrompt}" style="max-width:150px;max-height:150px;"/></a></div>`;
}
html += `</div>`;
html += `</div><p style="clear:both;text-align:center;">
<a href="/user/image-history">View all recent images</a></p>`;
return html;
}
@@ -175,14 +328,76 @@ function escapeHtml(unsafe: string) {
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;");
.replace(/'/g, "&#39;")
.replace(/\[/g, "&#91;")
.replace(/]/g, "&#93;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
try {
const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
} catch (e) {
const [u, s] = spaceId.split("/");
return `https://${u}-${s.replace(/_/g, "-")}.hf.space`;
} catch {
return "";
}
}
/* ──────────────── ROUTER ─────────────────────────────────── */
const infoPageRouter = Router();
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" }),
withSession,
injectCsrfToken,
checkCsrfToken
);
/* login attempt */
infoPageRouter.post(LOGIN_ROUTE, (req, res) => {
if (config.serviceInfoAuthMode === "password") {
const password = (req.body.password || "").trim();
// Simple string comparison; for production, consider a timing-safe comparison library
if (config.serviceInfoPassword && password === config.serviceInfoPassword) {
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else {
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid password. Please try again."));
}
} else {
// Token-based authentication (using any valid user token)
const token = (req.body.token || "").trim();
const user = getUser(token); // returns undefined if invalid
if (user && !user.disabledAt) {
// Only allow access if user exists AND is not disabled
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else if (user && user.disabledAt) {
// User exists but is disabled
const reason = user.disabledReason || "Your account has been disabled";
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, `Access denied: ${reason}`));
} else {
// User doesn't exist
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid token. Please try again."));
}
}
});
/* GET / either login form or info page */
if (config.enableInfoPageLogin) {
infoPageRouter.get(LOGIN_ROUTE, requireLogin, handleInfoPage);
} else {
infoPageRouter.get(LOGIN_ROUTE, handleInfoPage);
}
/* ─── Removed the public /status route : simply not added ─── */
export { infoPageRouter };
+9
View File
@@ -0,0 +1,9 @@
import { NextFunction, Request, Response } from "express";
export function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/") && !req.path.match(/^\/(v1alpha|v1beta)\//)) {
req.url = `/v1${req.url}`;
}
next();
}
+300 -104
View File
@@ -1,21 +1,16 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
addAnthropicPreamble,
createPreprocessorMiddleware,
finalizeBody,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { claudeModels } from "../shared/claude-models";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
let modelsCache: any = null;
let modelsCacheTime = 0;
@@ -25,37 +20,32 @@ const getModelsResponse = () => {
return modelsCache;
}
if (!config.anthropicKey) return { object: "list", data: [] };
if (!config.anthropicKey) return { object: "list", data: [], has_more: false, first_id: null, last_id: null };
const claudeVariants = [
"claude-v1",
"claude-v1-100k",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-v1.3",
"claude-v1.3-100k",
"claude-v1.2",
"claude-v1.0",
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
"claude-2",
"claude-2.0",
"claude-2.1",
];
const models = claudeVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
const date = new Date()
const models = claudeModels.map(model => ({
// Common
id: model.anthropicId,
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
// Anthropic
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
// OpenAI
object: "model",
created: date.getTime(),
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
modelsCache = {
// Common
object: "list",
data: models,
// Anthropic
has_more: false,
first_id: models[0]?.id,
last_id: models[models.length - 1]?.id,
};
modelsCacheTime = date.getTime();
return modelsCache;
};
@@ -64,8 +54,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -75,30 +64,50 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAnthropicTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to Anthropic chat format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
if (req.inboundApi === "openai") {
req.log.info("Transforming Anthropic response to OpenAI format");
body = transformAnthropicResponse(body, req);
}
if (req.tokenizerInfo) {
body.proxy_tokenizer = req.tokenizerInfo;
}
res.status(200).json(body);
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAnthropicResponse(
function flattenChatResponse(
content: { type: string; text: string }[]
): string {
return content
.map((part: { type: string; text: string }) =>
part.type === "text" ? part.text : ""
)
.join("\n");
}
export function transformAnthropicChatResponseToAnthropicText(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
type: "completion",
id: "ant-" + anthropicBody.id,
completion: flattenChatResponse(anthropicBody.content),
stop_reason: anthropicBody.stop_reason,
stop: anthropicBody.stop_sequence,
model: anthropicBody.model,
usage: anthropicBody.usage,
};
}
function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any>,
req: Request
): Record<string, any> {
@@ -126,63 +135,250 @@ function transformAnthropicResponse(
};
}
const anthropicProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.anthropic.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
}),
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
error: handleProxyError,
},
pathRewrite: {
// Send OpenAI-compat requests to the real Anthropic endpoint.
"^/v1/chat/completions": "/v1/complete",
},
}),
export function transformAnthropicChatResponseToOpenAI(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
id: "ant-" + anthropicBody.id,
object: "chat.completion",
created: Date.now(),
model: anthropicBody.model,
usage: anthropicBody.usage,
choices: [
{
message: {
role: "assistant",
content: flattenChatResponse(anthropicBody.content),
},
finish_reason: anthropicBody.stop_reason,
index: 0,
},
],
};
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Sonnet.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (model.includes("claude")) return; // use whatever model the user requested
req.body.model = "claude-3-5-sonnet-latest";
}
/**
* If client requests more than 4096 output tokens the request must have a
* particular version header.
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
*
* Also adds the required beta header for 1-hour cache duration if requested.
* Also validates Claude 4.1 Opus parameters (temperature/top_p).
*/
function setAnthropicBetaHeader(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const { max_tokens_to_sample } = req.body;
// Initialize beta headers array
const betaHeaders: string[] = [];
// Add max tokens beta header if needed
if (max_tokens_to_sample > 4096) {
betaHeaders.push("max-tokens-3-5-sonnet-2024-07-15");
}
// Add extended cache TTL beta header if 1h cache is requested
if (req.body.cache_control?.ttl === "1h") {
betaHeaders.push("extended-cache-ttl-2025-04-11");
}
// Set the combined beta headers if any were added
if (betaHeaders.length > 0) {
req.headers["anthropic-beta"] = betaHeaders.join(",");
}
}
/**
* Adds web search tool for Claude-3.5 and Claude-3.7 models when enable_web_search is true
*
* Supports all optional parameters documented in the Claude API:
* - max_uses: Limit the number of searches per request
* - allowed_domains: Only include results from these domains
* - blocked_domains: Never include results from these domains
* - user_location: Localize search results
*/
function addWebSearchTool(req: Request) {
// Check if this is a Claude model that supports web search and if web search is enabled
const isClaude35 = req.body.model?.includes("claude-3-5") || req.body.model?.includes("claude-3.5");
const isClaude37 = req.body.model?.includes("claude-3-7") || req.body.model?.includes("claude-3.7");
const isClaude4 = req.body.model?.includes("claude-sonnet-4") || req.body.model?.includes("claude-opus-4");
const useWebSearch = (isClaude35 || isClaude37 || isClaude4) && Boolean(req.body.enable_web_search);
if (useWebSearch) {
// Create the base web search tool
const webSearchTool: any = {
'type': 'web_search_20250305',
'name': 'web_search',
};
// Add optional parameters if provided by the client
// max_uses: Limit the number of searches per request
if (typeof req.body.web_search_max_uses === 'number') {
webSearchTool.max_uses = req.body.web_search_max_uses;
delete req.body.web_search_max_uses;
}
// allowed_domains: Only include results from these domains
if (Array.isArray(req.body.web_search_allowed_domains)) {
webSearchTool.allowed_domains = req.body.web_search_allowed_domains;
delete req.body.web_search_allowed_domains;
}
// blocked_domains: Never include results from these domains
if (Array.isArray(req.body.web_search_blocked_domains)) {
webSearchTool.blocked_domains = req.body.web_search_blocked_domains;
delete req.body.web_search_blocked_domains;
}
// user_location: Localize search results
if (req.body.web_search_user_location) {
webSearchTool.user_location = req.body.web_search_user_location;
delete req.body.web_search_user_location;
}
// Add the web search tool to the tools array
req.body.tools = [...(req.body.tools || []), webSearchTool];
}
// Delete custom parameters as they're not standard Claude API parameters
delete req.body.enable_web_search;
delete req.body.reasoning_effort;
}
function selectUpstreamPath(manager: ProxyReqManager) {
const req = manager.request;
const pathname = req.url.split("?")[0];
req.log.debug({ pathname }, "Anthropic path filter");
const isText = req.outboundApi === "anthropic-text";
const isChat = req.outboundApi === "anthropic-chat";
if (isChat && pathname === "/v1/complete") {
manager.setPath("/v1/messages");
}
if (isText && pathname === "/v1/chat/completions") {
manager.setPath("/v1/complete");
}
if (isChat && pathname === "/v1/chat/completions") {
manager.setPath("/v1/messages");
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
manager.setPath("/v1/messages");
}
}
const anthropicProxy = createQueuedProxyMiddleware({
target: "https://api.anthropic.com",
mutations: [selectUpstreamPath, addKey, finalizeBody],
blockingResponseHandler: anthropicBlockingResponseHandler,
});
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const nativeTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
const model = req.body.model;
const isClaude4Model = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.startsWith("claude-3") || isClaude4Model) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader] }
);
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
const model = req.body.model;
const isClaude4 = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.includes("claude-3") || isClaude4) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
}
};
const anthropicRouter = Router();
anthropicRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
anthropicRouter.post(
"/v1/complete",
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware({
inApi: "anthropic",
outApi: "anthropic",
service: "anthropic",
}),
nativeAnthropicChatPreprocessor,
anthropicProxy
);
// OpenAI-to-Anthropic compatibility endpoint.
// Anthropic text completion endpoint. Translates to Anthropic chat completion
// if the requested model is a Claude 3 model.
anthropicRouter.post(
"/v1/complete",
ipLimiter,
preprocessAnthropicTextRequest,
anthropicProxy
);
// OpenAI-to-Anthropic compatibility endpoint. Accepts an OpenAI chat completion
// request and transforms/routes it to the appropriate Anthropic format and
// endpoint based on the requested model.
anthropicRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic", service: "anthropic" },
{ afterTransform: [maybeReassignModel] }
),
preprocessOpenAICompatRequest,
anthropicProxy
);
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (!model.startsWith("gpt-")) return;
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const contextSize = req.promptTokens! + req.outputTokens!;
if (contextSize > 8500) {
req.log.debug(
{ model: bigModel, contextSize },
"Using Claude 100k model for OpenAI-to-Anthropic request"
);
req.body.model = bigModel;
}
}
export const anthropic = anthropicRouter;
+341
View File
@@ -0,0 +1,341 @@
import { Request, RequestHandler, Router } from "express";
import { v4 } from "uuid";
import {
transformAnthropicChatResponseToAnthropicText,
transformAnthropicChatResponseToOpenAI,
} from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsClaudeProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest, finalizeSignedRequest],
blockingResponseHandler: awsBlockingResponseHandler,
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
oaiToAwsChatPreprocessor(req, res, next);
} else {
oaiToAwsTextPreprocessor(req, res, next);
}
};
const awsClaudeRouter = Router();
// Native(ish) Anthropic text completion endpoint.
awsClaudeRouter.post(
"/v1/complete",
ipLimiter,
preprocessAwsTextRequest,
awsClaudeProxy
);
// Native Anthropic chat completion endpoint.
awsClaudeRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsClaudeProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsClaudeRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
awsClaudeProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends AWS model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-AWS model name to AWS model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.includes("anthropic.claude")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620 (old format: number-model)
// - claude-3-opus-latest (old format: number-model)
// - claude-sonnet-4-20250514 (new format: model-number)
// - claude-opus-4-latest (new format: model-number)
// - anthropic.claude-3-sonnet-20240229-v1:0 (AWS format with old naming)
// - anthropic.claude-sonnet-4-20250514-v1:0 (AWS format with new naming)
const pattern =
/^(?:anthropic\.)?claude-(?:(?:(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*))|(?:(sonnet-|opus-|haiku-)(\d+)([.-](\d))?(-\d+k)?-(latest|\d+)))(?:-v\d+(?::\d+)?)?$/i;
const match = model.match(pattern);
if (!match) {
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
}
// Check which format matched (old or new)
// New format: claude-sonnet-4-20250514 or anthropic.claude-sonnet-4-20250514-v1:0
// Old format: claude-3-sonnet-20240229 or anthropic.claude-3-sonnet-20240229-v1:0
const isNewFormat = !!match[9];
let major, minor, name, rev;
if (isNewFormat) {
// New format: claude-sonnet-4-20250514
// match[9] = sonnet-/opus-/haiku-
// match[10] = 4 (major version)
// match[12] = minor version (if any, from [.-](\d) pattern)
// match[14] = revision (latest or date)
const modelType = match[9]?.match(/([a-z]+)/)?.[1] || "";
name = modelType;
major = match[10];
minor = match[12];
rev = match[14];
// Special case: if revision is a single digit and no minor version,
// treat revision as minor version (e.g., claude-opus-4-1 -> version 4.1)
if (!minor && rev && /^\d$/.test(rev)) {
minor = rev;
rev = undefined;
}
// Handle instant case for completeness
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
} else {
// Old format: claude-3-sonnet-20240229
// match[1] = instant- (if any)
// match[3] = 3 (major version)
// match[5] = minor version (if any)
// match[7] = -sonnet-/-opus-/-haiku- (if any)
// match[8] = revision (latest or date)
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
major = match[3];
minor = match[5];
name = match[7]?.match(/([a-z]+)/)?.[1] || "";
rev = match[8];
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "1":
case "1.0":
req.body.model = "anthropic.claude-v1";
return;
case "2":
case "2.0":
req.body.model = "anthropic.claude-v2";
return;
case "2.1":
req.body.model = "anthropic.claude-v2:1";
return;
case "3":
case "3.0":
// there is only one snapshot for all Claude 3 models so there is no need
// to check the revision
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
case "haiku":
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
return;
}
break;
case "3.5":
switch (name) {
case "sonnet":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
return;
case "20240620":
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
return;
}
break;
case "haiku":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
return;
}
case "opus":
// Add after model id is announced never
break;
}
case "3.7":
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-7-sonnet-20250219-v1:0";
return;
}
break;
case "4":
case "4.0":
// Mapping "claude-4-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-sonnet-4-20250514-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-opus-4-20250514-v1:0";
return;
// No case for "haiku" here, as "claude-4-haiku" is not defined
// in claude-models.ts. It will fall through and throw an error.
}
break;
case "4.1":
// Mapping "claude-4.1-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "opus":
req.body.model = "anthropic.claude-opus-4-1-20250805-v1:0";
return;
// No sonnet or haiku variants for 4.1 yet
}
break;
}
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
}
export const awsClaude = awsClaudeRouter;
+95
View File
@@ -0,0 +1,95 @@
import { Request, Router } from "express";
import {
detectMistralInputApi,
transformMistralTextToMistralChat,
} from "./mistral-ai";
import { ipLimiter } from "./rate-limit";
import { ProxyResHandlerWithBody } from "./middleware/response";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
newBody = transformMistralTextToMistralChat(body);
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const awsMistralProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest,finalizeSignedRequest],
blockingResponseHandler: awsMistralBlockingResponseHandler,
});
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.startsWith("mistral.")) {
return;
}
// Mistral 7B Instruct
else if (model.includes("7b")) {
req.body.model = "mistral.mistral-7b-instruct-v0:2";
}
// Mistral 8x7B Instruct
else if (model.includes("8x7b")) {
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
}
// Mistral Large (Feb 2024)
else if (model.includes("large-2402")) {
req.body.model = "mistral.mistral-large-2402-v1:0";
}
// Mistral Large 2 (July 2024)
else if (model.includes("large")) {
req.body.model = "mistral.mistral-large-2407-v1:0";
}
// Mistral Small (Feb 2024)
else if (model.includes("small")) {
req.body.model = "mistral.mistral-small-2402-v1:0";
} else {
throw new Error(
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
);
}
}
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
{
beforeTransform: [detectMistralInputApi],
afterTransform: [maybeReassignModel],
}
);
const awsMistralRouter = Router();
awsMistralRouter.post(
"/v1/chat/completions",
ipLimiter,
nativeMistralChatPreprocessor,
awsMistralProxy
);
export const awsMistral = awsMistralRouter;
+82 -202
View File
@@ -1,218 +1,98 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
/* Shared code between AWS Claude and AWS Mistral endpoints. */
import { Request, Response, Router } from "express";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signAwsRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
const LATEST_AWS_V2_MINOR_VERSION = "1";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.awsCredentials) return { object: "list", data: [] };
const variants = [
"anthropic.claude-v1",
"anthropic.claude-v2",
"anthropic.claude-v2:1",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const awsResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
}
if (req.inboundApi === "openai") {
req.log.info("Transforming AWS Claude response to OpenAI format");
body = transformAwsResponse(body, req);
}
if (req.tokenizerInfo) {
body.proxy_tokenizer = req.tokenizerInfo;
}
// AWS does not confirm the model in the response, so we have to add it
body.model = req.body.model;
res.status(200).json(body);
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAwsResponse(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError,
},
}),
});
import { addV1 } from "./add-v1";
import { awsClaude } from "./aws-claude";
import { awsMistral } from "./aws-mistral";
import { AwsBedrockKey, keyPool } from "../shared/key-management";
import { claudeModels, findByAwsId } from "../shared/claude-models";
const awsRouter = Router();
awsRouter.get("/v1/models", handleModelRequest);
// Native(ish) Anthropic chat completion endpoint.
awsRouter.post(
"/v1/complete",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic", outApi: "anthropic", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
awsRouter.use("/claude", addV1, awsClaude);
awsRouter.use("/mistral", addV1, awsMistral);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
const MODELS_CACHE_TTL = 10000;
let modelsCache: Record<string, any> = {};
let modelsCacheTime: Record<string, number> = {};
function handleModelsRequest(req: Request, res: Response) {
if (!config.awsCredentials) return { object: "list", data: [] };
// If client already specified an AWS Claude model ID, use it
if (model.includes("anthropic.claude")) {
return;
const vendor = req.params.vendor?.length
? req.params.vendor === "claude"
? "anthropic"
: req.params.vendor
: "all";
const cacheTime = modelsCacheTime[vendor] || 0;
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
return res.json(modelsCache[vendor]);
}
const pattern = /^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?$/i;
const match = model.match(pattern);
// If there's no match, return the latest v2 model
if (!match) {
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
const availableAwsModelIds = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "aws") continue;
(key as AwsBedrockKey).modelIds.forEach((id) => availableAwsModelIds.add(id));
}
const [, , instant, , major, , minor] = match;
const mistralMappings = new Map([
["mistral.mistral-7b-instruct-v0:2", "Mistral 7B Instruct"],
["mistral.mixtral-8x7b-instruct-v0:1", "Mixtral 8x7B Instruct"],
["mistral.mistral-large-2402-v1:0", "Mistral Large 2402"],
["mistral.mistral-large-2407-v1:0", "Mistral Large 2407"],
["mistral.mistral-small-2402-v1:0", "Mistral Small 2402"],
]);
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
const date = new Date();
const claudeModelsList = claudeModels
.filter(model => availableAwsModelIds.has(model.awsId))
.map(model => ({
id: model.anthropicId,
owned_by: "anthropic",
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
object: "model",
created: date.getTime(),
permission: [],
root: "anthropic",
parent: null,
}));
const mistralModelsList = Array.from(mistralMappings.keys())
.filter(id => availableAwsModelIds.has(id))
.map(id => {
return {
id,
owned_by: "mistral",
type: "model",
display_name: mistralMappings.get(id) || id.split('.')[1],
created_at: date.toISOString(),
object: "model",
created: date.getTime(),
permission: [],
root: "mistral",
parent: null,
};
});
// There's only one v1 model
if (major === "1") {
req.body.model = "anthropic.claude-v1";
return;
}
const allModels = [...claudeModelsList, ...mistralModelsList];
const filteredModels = vendor === "all"
? allModels
: allModels.filter(m => m.root === vendor);
// Try to map Anthropic API v2 models to AWS v2 models
if (major === "2") {
if (minor === "0") {
req.body.model = "anthropic.claude-v2";
return;
}
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
modelsCache[vendor] = {
object: "list",
data: filteredModels,
has_more: false,
first_id: filteredModels[0]?.id,
last_id: filteredModels[filteredModels.length - 1]?.id,
};
modelsCacheTime[vendor] = date.getTime();
// Fallback to latest v2 model
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
return res.json(modelsCache[vendor]);
}
export const aws = awsRouter;
+34 -85
View File
@@ -1,73 +1,30 @@
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
import {
ModelFamily,
AzureOpenAIModelFamily,
getAzureOpenAIModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { KNOWN_OPENAI_MODELS } from "./openai";
import { createQueueMiddleware } from "./queue";
import { generateModelList } from "./openai";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addAzureKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
let modelsCache: any = null;
let modelsCacheTime = 0;
function getModelsResponse() {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
let available = new Set<AzureOpenAIModelFamily>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "azure") continue;
key.modelFamilies.forEach((family) =>
available.add(family as AzureOpenAIModelFamily)
);
}
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
const models = KNOWN_OPENAI_MODELS.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "azure",
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return res.status(200).json(modelsCache);
}
if (!config.azureCredentials) return { object: "list", data: [] };
const result = generateModelList("azure");
modelsCache = { object: "list", data: result };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
};
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
@@ -80,38 +37,20 @@ const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
}
if (req.tokenizerInfo) {
body.proxy_tokenizer = req.tokenizerInfo;
}
res.status(200).json(body);
res.status(200).json({ ...body, proxy: body.proxy });
};
const azureOpenAIProxy = createQueueMiddleware({
beforeProxy: addAzureKey,
proxyMiddleware: createProxyMiddleware({
target: "will be set by router",
router: (req) => {
if (!req.signedRequest) throw new Error("signedRequest not set");
const { hostname, path } = req.signedRequest;
return `https://${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
error: handleProxyError,
},
}),
const azureOpenAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { hostname, protocol } = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addAzureKey, finalizeSignedRequest],
blockingResponseHandler: azureOpenaiResponseHandler,
});
const azureOpenAIRouter = Router();
azureOpenAIRouter.get("/v1/models", handleModelRequest);
azureOpenAIRouter.post(
@@ -124,5 +63,15 @@ azureOpenAIRouter.post(
}),
azureOpenAIProxy
);
azureOpenAIRouter.post(
"/v1/images/generations",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "azure",
}),
azureOpenAIProxy
);
export const azure = azureOpenAIRouter;
+222
View File
@@ -0,0 +1,222 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { CohereKey, keyPool } from "../shared/key-management";
import { isCohereModel, normalizeMessages } from "../shared/api-schemas/cohere";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "cohere" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const cohereResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Cohere key directly
const modelToUse = "command"; // Use any Cohere model here - just for key selection
const cohereKey = keyPool.get(modelToUse, "cohere") as CohereKey;
if (!cohereKey || !cohereKey.key) {
log.warn("No valid Cohere key available for model listing");
throw new Error("No valid Cohere API key available");
}
// Fetch models directly from Cohere API
const response = await axios.get("https://api.cohere.com/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${cohereKey.key}`,
"Cohere-Version": "2022-12-06"
},
});
if (!response.data || !response.data.models) {
throw new Error("Unexpected response format from Cohere API");
}
// Extract models and filter by those that support the chat endpoint
const filteredModels = response.data.models
.filter((model: any) => {
return model.endpoints && model.endpoints.includes("chat");
})
.map((model: any) => ({
id: model.name,
name: model.name,
// Adding additional OpenAI-compatible fields
context_window: model.context_window_size || 4096,
max_tokens: model.max_tokens || 4096
}));
log.debug({ modelCount: filteredModels.length, models: filteredModels.map((m: any) => m.id) }, "Filtered models from Cohere API");
// Format response to ensure OpenAI compatibility
const models = {
object: "list",
data: filteredModels.map((model: any) => ({
id: model.id,
object: "model",
created: Math.floor(Date.now() / 1000),
owned_by: "cohere",
permission: [],
root: model.id,
parent: null,
context_length: model.context_window,
})),
};
log.debug({ modelCount: filteredModels.length }, "Retrieved models from Cohere API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
// Provide detailed logging for better troubleshooting
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Cohere models"
);
} else {
log.error({ error }, "Unknown error fetching Cohere models");
}
// Return empty list as fallback
return {
object: "list",
data: [],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to prepare messages for Cohere API
function prepareMessages(req: Request) {
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages = normalizeMessages(req.body.messages);
}
}
// Function to remove parameters not supported by Cohere models
function removeUnsupportedParameters(req: Request) {
const model = req.body.model;
// Remove parameters that Cohere doesn't support
if (req.body.logit_bias !== undefined) {
delete req.body.logit_bias;
}
if (req.body.top_logprobs !== undefined) {
delete req.body.top_logprobs;
}
if (req.body.max_completion_tokens !== undefined) {
delete req.body.max_completion_tokens;
}
// Handle structured output format
if (req.body.response_format && req.body.response_format.schema) {
// Transform to Cohere's format if needed
const jsonSchema = req.body.response_format.schema;
req.body.response_format = {
type: "json_object",
schema: jsonSchema
};
}
// Logging for debugging
if (process.env.NODE_ENV !== 'production') {
log.debug({ body: req.body }, "Request after parameter cleanup");
}
}
// Set up count token functionality for Cohere models
function countCohereTokens(req: Request) {
const model = req.body.model;
if (isCohereModel(model)) {
// Count tokens using prompt tokens (simplified)
if (req.promptTokens) {
req.log.debug(
{ tokens: req.promptTokens },
"Estimated token count for Cohere prompt"
);
}
}
}
const cohereProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
// Add Cohere-Version header to every request
(manager) => {
manager.setHeader("Cohere-Version", "2022-12-06");
},
finalizeBody
],
target: "https://api.cohere.ai/compatibility",
blockingResponseHandler: cohereResponseHandler,
});
const cohereRouter = Router();
cohereRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [ prepareMessages, removeUnsupportedParameters, countCohereTokens ] }
),
cohereProxy
);
cohereRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [] }
),
cohereProxy
);
cohereRouter.get("/v1/models", handleModelRequest);
export const cohere = cohereRouter;
+135
View File
@@ -0,0 +1,135 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { DeepseekKey, keyPool } from "../shared/key-management";
let modelsCache: any = null;
let modelsCacheTime = 0;
const deepseekResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Deepseek key directly using keyPool.get()
const modelToUse = "deepseek-chat"; // Use any Deepseek model here - just for key selection
const deepseekKey = keyPool.get(modelToUse, "deepseek") as DeepseekKey;
if (!deepseekKey || !deepseekKey.key) {
throw new Error("Failed to get valid Deepseek key");
}
// Fetch models from Deepseek API with authorization
const response = await axios.get("https://api.deepseek.com/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${deepseekKey.key}`
},
});
// If successful, update the cache
if (response.data && response.data.data) {
modelsCache = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
owned_by: "deepseek",
})),
};
} else {
throw new Error("Unexpected response format from Deepseek API");
}
} catch (error) {
console.error("Error fetching Deepseek models:", error);
throw error; // No fallback - error will be passed to caller
}
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const modelsResponse = await getModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const deepseekProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.deepseek.com/beta",
blockingResponseHandler: deepseekResponseHandler,
});
const deepseekRouter = Router();
// combines all the assistant messages at the end of the context and adds the
// beta 'prefix' option, makes prefills work the same way they work for Claude
function enablePrefill(req: Request) {
// If you want to disable
if (process.env.NO_DEEPSEEK_PREFILL) return
const msgs = req.body.messages;
if (msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// maybe we should also add a newline between messages? no for now.
content = msgs[i--].content + content;
}
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
}
function removeReasonerStuff(req: Request) {
if (req.body.model === "deepseek-reasoner") {
// https://api-docs.deepseek.com/guides/reasoning_model
delete req.body.presence_penalty;
delete req.body.frequency_penalty;
delete req.body.temperature;
delete req.body.top_p;
delete req.body.logprobs;
delete req.body.top_logprobs;
}
}
deepseekRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "deepseek" },
{ afterTransform: [ enablePrefill, removeReasonerStuff ] }
),
deepseekProxy
);
deepseekRouter.get("/v1/models", handleModelRequest);
export const deepseek = deepseekRouter;
+63 -8
View File
@@ -1,6 +1,7 @@
import type { Request, RequestHandler } from "express";
import type { Request, Response, RequestHandler } from "express";
import { config } from "../config";
import { authenticate, getUser } from "../shared/users/user-store";
import { sendErrorToClient } from "./middleware/response/error-generator";
const GATEKEEPER = config.gatekeeper;
const PROXY_KEY = config.proxyKey;
@@ -11,6 +12,7 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
// pass the _proxy_ key in this header too, instead of providing it as a
// Bearer token in the Authorization header. So we need to check both.
// Prefer the Authorization header if both are present.
// Google AI uses a key querystring parameter.
if (req.headers.authorization) {
const token = req.headers.authorization?.slice("Bearer ".length);
@@ -24,6 +26,18 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
return token;
}
if (req.headers["x-goog-api-key"]) {
const token = req.headers["x-goog-api-key"]?.toString();
delete req.headers["x-goog-api-key"];
return token;
}
if (req.query.key) {
const token = req.query.key?.toString();
delete req.query.key;
return token;
}
return undefined;
}
@@ -46,24 +60,65 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
}
if (GATEKEEPER === "user_token" && token) {
const { user, result } = authenticate(token, req.ip);
// RisuAI users all come from a handful of aws lambda IPs so we cannot use
// IP alone to distinguish between them and prevent usertoken sharing.
// Risu sends a signed token in the request headers with an anonymous user
// ID that we can instead use to associate requests with an individual.
const ip = req.risuToken?.length
? `risu${req.risuToken}-${req.ip}`
: req.ip;
const { user, result } = authenticate(token, ip);
switch (result) {
case "success":
req.user = user;
return next();
case "limited":
return res.status(403).json({
error: `Forbidden: no more IPs can authenticate with this token`,
});
return sendError(
req,
res,
403,
`Forbidden: no more IP addresses allowed for this user token`,
{ currentIp: ip, maxIps: user?.maxIps }
);
case "disabled":
const bannedUser = getUser(token);
if (bannedUser?.disabledAt) {
const reason = bannedUser.disabledReason || "Token disabled";
return res.status(403).json({ error: `Forbidden: ${reason}` });
const reason = bannedUser.disabledReason || "User token disabled";
return sendError(req, res, 403, `Forbidden: ${reason}`);
}
}
}
res.status(401).json({ error: "Unauthorized" });
sendError(req, res, 401, "Unauthorized");
};
function sendError(
req: Request,
res: Response,
status: number,
message: string,
data: any = {}
) {
const isPost = req.method === "POST";
const hasBody = isPost && req.body;
const hasModel = hasBody && req.body.model;
if (!hasModel) {
return res.status(status).json({ error: message });
}
sendErrorToClient({
req,
res,
options: {
title: `Proxy gatekeeper error (HTTP ${status})`,
message,
format: "unknown",
statusCode: status,
reqId: req.id,
obj: data,
},
});
}
+257
View File
@@ -0,0 +1,257 @@
import { Request, RequestHandler, Router } from "express";
import { config } from "../config";
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signGcpRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.gcpCredentials) return { object: "list", data: [] };
// https://docs.anthropic.com/en/docs/about-claude/models
const variants = [
"claude-3-haiku@20240307",
"claude-3-5-haiku@20241022",
"claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-v2@20241022",
"claude-3-7-sonnet@20250219",
"claude-sonnet-4@20250514",
"claude-opus-4@20250514",
"claude-opus-4-1@20250805",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const gcpProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signGcpRequest, finalizeSignedRequest],
blockingResponseHandler: gcpBlockingResponseHandler,
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
oaiToChatPreprocessor(req, res, next);
};
const gcpRouter = Router();
gcpRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
gcpRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
),
gcpProxy
);
// OpenAI-to-GCP Anthropic compatibility endpoint.
gcpRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
gcpProxy
);
/**
* Tries to deal with:
* - frontends sending GCP model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that GCP doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends GCP model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-GCP model name to GCP model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
const DEFAULT_MODEL = "claude-3-5-sonnet-v2@20241022";
// If it looks like an GCP model, use it as-is
if (model.startsWith("claude-") && model.includes("@")) {
return;
}
// Anthropic model names can look like:
// - claude-3-sonnet
// - claude-3.5-sonnet
// - claude-3-5-haiku
// - claude-3-5-haiku-latest
// - claude-3-5-sonnet-20240620
// - claude-opus-4-1 (new format)
// - claude-4.1-opus (alternative format)
const pattern = /^claude-(?:(\d+)[.-]?(\d)?-(sonnet|opus|haiku)(?:-(latest|\d+))?|(opus|sonnet|haiku)-(\d+)[.-]?(\d)?(?:-(latest|\d+))?)/i;
const match = model.match(pattern);
if (!match) {
req.body.model = DEFAULT_MODEL;
return;
}
// Handle both formats: claude-3-5-sonnet and claude-opus-4-1
const [_, major1, minor1, flavor1, rev1, flavor2, major2, minor2, rev2] = match;
let major, minor, flavor, rev;
if (major1) {
// Old format: claude-3-5-sonnet
major = major1;
minor = minor1;
flavor = flavor1;
rev = rev1;
} else {
// New format: claude-opus-4-1
major = major2;
minor = minor2;
flavor = flavor2;
rev = rev2;
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "3":
case "3.0":
switch (flavor) {
case "haiku":
req.body.model = "claude-3-haiku@20240307";
break;
case "opus":
req.body.model = "claude-3-opus@20240229";
break;
case "sonnet":
req.body.model = "claude-3-sonnet@20240229";
break;
default:
req.body.model = "claude-3-sonnet@20240229";
}
return;
case "3.5":
switch (flavor) {
case "haiku":
req.body.model = "claude-3-5-haiku@20241022";
return;
case "opus":
// no 3.5 opus yet
req.body.model = DEFAULT_MODEL;
return;
case "sonnet":
if (rev === "20240620") {
req.body.model = "claude-3-5-sonnet@20240620";
} else {
// includes -latest, edit if anthropic actually releases 3.5 sonnet v3
req.body.model = DEFAULT_MODEL;
}
return;
default:
req.body.model = DEFAULT_MODEL;
}
return;
case "3.7":
switch (flavor) {
case "sonnet":
req.body.model = "claude-3-7-sonnet@20250219";
return;
}
break;
case "4":
case "4.0":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4@20250514";
return;
case "sonnet":
req.body.model = "claude-sonnet-4@20250514";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
case "4.1":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4-1@20250805";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
default:
req.body.model = DEFAULT_MODEL;
}
}
export const gcp = gcpRouter;
+206 -50
View File
@@ -1,26 +1,27 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { Request, RequestHandler, Router, Response, NextFunction } from "express";
import { v4 } from "uuid";
import { GoogleAIKey, keyPool } from "../shared/key-management";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
forceModel,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import axios from "axios";
let modelsCache: any = null;
let modelsCacheTime = 0;
// Cache for native Google AI models
let nativeModelsCache: any = null;
let nativeModelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
@@ -28,10 +29,24 @@ const getModelsResponse = () => {
if (!config.googleAIKey) return { object: "list", data: [] };
const googleAIVariants = ["gemini-pro"];
const keys = keyPool
.list()
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
if (keys.length === 0) {
modelsCache = { object: "list", data: [] };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const models = googleAIVariants.map((id) => ({
id,
// Get all model IDs from keys, excluding any with "bard" in the name
const modelIds = Array.from(
new Set(keys.map((k) => k.modelIds).flat())
).filter((id) => id.startsWith("models/") && !id.includes("bard"));
// Strip "models/" prefix from IDs before creating model objects
const models = modelIds.map((id) => ({
// Strip "models/" prefix from ID for consistency with request processing
id: id.startsWith("models/") ? id.slice("models/".length) : id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
@@ -46,12 +61,51 @@ const getModelsResponse = () => {
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
// Function to fetch native models from Google AI API
const getNativeModelsResponse = async () => {
// Return cached value if it was refreshed in the last minute
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
return nativeModelsCache;
}
/*
* The official Google API requires an API key. However SillyTavern only needs
* a list of model IDs and does not care about any other model metadata. We
* can therefore generate a **synthetic** response from the keys already
* loaded into the proxy (same source we use for the OpenAI-compatible
* endpoint) and completely avoid the outbound request. This removes the
* need for the frontend to supply the proxy password as an API key and
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
* is missing.
*/
const openaiStyle = getModelsResponse();
const models = (openaiStyle.data || []).map((m: any) => ({
// Google AI Studio returns names in the format "models/<id>"
name: `models/${m.id}`,
supportedGenerationMethods: ["generateContent"],
}));
nativeModelsCache = { models };
nativeModelsCacheTime = new Date().getTime();
return nativeModelsCache;
};
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
// Native Gemini API model list request
const handleNativeModelRequest: RequestHandler = async (_req: Request, res: any) => {
try {
const modelsResponse = await getNativeModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleNativeModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -61,21 +115,13 @@ const googleAIResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
}
let newBody = body;
if (req.inboundApi === "openai") {
req.log.info("Transforming Google AI response to OpenAI format");
body = transformGoogleAIResponse(body, req);
newBody = transformGoogleAIResponse(body, req);
}
if (req.tokenizerInfo) {
body.proxy_tokenizer = req.tokenizerInfo;
}
res.status(200).json(body);
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformGoogleAIResponse(
@@ -83,8 +129,30 @@ function transformGoogleAIResponse(
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
// Handle the case where content might have different structures
let content = "";
// Check if the response has the expected structure
if (resBody.candidates && resBody.candidates[0]) {
const candidate = resBody.candidates[0];
// Extract content text with multiple fallbacks
if (candidate.content?.parts && candidate.content.parts[0]?.text) {
// Regular format with parts array containing text
content = candidate.content.parts[0].text;
} else if (candidate.content?.text) {
// Alternate format with direct text property
content = candidate.content.text;
} else if (typeof candidate.content?.parts?.[0] === 'string') {
// Some formats might have string parts
content = candidate.content.parts[0];
}
// Apply cleanup to the content if needed
content = content.replace(/^(.{0,50}?): /, () => "");
}
return {
id: "goo-" + v4(),
object: "chat.completion",
@@ -98,41 +166,129 @@ function transformGoogleAIResponse(
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates[0].finishReason,
finish_reason: resBody.candidates?.[0]?.finishReason || "STOP",
index: 0,
},
],
};
}
const googleAIProxy = createQueueMiddleware({
beforeProxy: addGoogleAIKey,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
const { protocol, hostname, path } = signedRequest;
return `${protocol}//${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
error: handleProxyError,
},
}),
const googleAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }: { signedRequest: any }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { protocol, hostname} = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addGoogleAIKey, finalizeSignedRequest],
blockingResponseHandler: googleAIBlockingResponseHandler,
});
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
googleAIRouter.get("/:apiVersion(v1alpha|v1beta)/models", handleNativeModelRequest);
/**
* Processes the thinking budget for Gemini 2.5 Flash model.
* Validation has been disabled - budget is passed through without limits.
*/
function processThinkingBudget(req: Request) {
// Validation disabled - budget is passed through without any range limits
// Previously enforced 0-24576 token limit
}
function setStreamFlag(req: Request) {
const isStreaming = req.url.includes("streamGenerateContent");
if (isStreaming) {
req.body.stream = true;
req.isStreaming = true;
} else {
req.body.stream = false;
req.isStreaming = false;
}
}
/**
* Strips 'models/' prefix from the beginning of model IDs if present.
* No longer forces redirection to gemini-1.5-pro-latest for non-Gemini models.
**/
function maybeReassignModel(req: Request) {
// Ensure model is on body as a lot of middleware will expect it.
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
if (!model) {
throw new Error("You must specify a model with your request.");
}
req.body.model = model;
// Only strip the 'models/' prefix if present
if (model.startsWith("models/")) {
req.body.model = model.slice("models/".length);
req.log.info({ originalModel: model, updatedModel: req.body.model }, "Stripped 'models/' prefix from model ID");
}
// No longer redirecting non-Gemini models to gemini-1.5-pro-latest
// This allows the original model to be passed through to the API
// If it's an invalid model, the Google AI API will return the appropriate error
}
/**
* Middleware to check for and block requests to experimental models.
* This function is intended to be used as a RequestPreprocessor.
* It throws an error if an experimental model is detected, which should be
* caught by the proxy's onError handler.
*
* Models can be allowed through the ALLOWED_EXP_MODELS environment variable.
*/
function checkAndBlockExperimentalModels(req: Request) { // Changed signature
const modelId = req.body.model as string | undefined;
// Check if the model ID contains "exp" (case-insensitive)
if (modelId && modelId.toLowerCase().includes("exp")) {
// Check if this specific model is in the allowlist
const allowedModels = config.allowedExpModels
?.split(",")
.map(model => model.trim())
.filter(model => model.length > 0) || [];
const isAllowed = allowedModels.some(allowedModel =>
modelId.toLowerCase() === allowedModel.toLowerCase()
);
if (isAllowed) {
req.log.info({ modelId }, "Allowing experimental Google AI model via allowlist.");
return; // Allow the request to proceed
}
req.log.warn({ modelId }, "Blocking request to experimental Google AI model.");
const err: any = new Error("Experimental models are too unstable to be supported in proxy code. Please use preview models instead.");
err.statusCode = 400;
throw err;
}
// If no experimental model, do nothing, allowing request to proceed.
}
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/:apiVersion(v1alpha|v1beta)/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
{
beforeTransform: [maybeReassignModel],
afterTransform: [checkAndBlockExperimentalModels, setStreamFlag, processThinkingBudget]
}
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{ afterTransform: [forceModel("gemini-pro")] }
{
afterTransform: [maybeReassignModel, checkAndBlockExperimentalModels, processThinkingBudget]
}
),
googleAIProxy
);
+124 -40
View File
@@ -1,16 +1,24 @@
import { Request, Response } from "express";
import httpProxy from "http-proxy";
import http from "http";
import { Socket } from "net";
import { ZodError } from "zod";
import { generateErrorMessage } from "zod-error";
import { makeCompletionSSE } from "../../shared/streaming";
import { HttpError } from "../../shared/errors";
import { assertNever } from "../../shared/utils";
import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
import { sendErrorToClient } from "./response/error-generator";
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const OPENAI_RESPONSES_ENDPOINT = "/v1/responses";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
const GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT = "/v1alpha/models";
const GOOGLE_AI_BETA_COMPLETION_ENDPOINT = "/v1beta/models";
export function isTextGenerationRequest(req: Request) {
return (
@@ -18,7 +26,13 @@ export function isTextGenerationRequest(req: Request) {
[
OPENAI_CHAT_COMPLETION_ENDPOINT,
OPENAI_TEXT_COMPLETION_ENDPOINT,
OPENAI_RESPONSES_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT,
GOOGLE_AI_BETA_COMPLETION_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint))
);
}
@@ -36,7 +50,7 @@ export function isEmbeddingsRequest(req: Request) {
);
}
export function writeErrorResponse(
export function sendProxyError(
req: Request,
res: Response,
statusCode: number,
@@ -46,51 +60,48 @@ export function writeErrorResponse(
const msg =
statusCode === 500
? `The proxy encountered an error while trying to process your prompt.`
: `The proxy encountered an error while trying to send your prompt to the upstream service.`;
: `The proxy encountered an error while trying to send your prompt to the API.`;
// If we're mid-SSE stream, send a data event with the error payload and end
// the stream. Otherwise just send a normal error response.
if (
res.headersSent ||
String(res.getHeader("content-type")).startsWith("text/event-stream")
) {
const event = makeCompletionSSE({
sendErrorToClient({
options: {
format: req.inboundApi,
title: `Proxy error (HTTP ${statusCode} ${statusMessage})`,
message: `${msg} Further technical details are provided below.`,
message: `${msg} Further details are provided below.`,
obj: errorPayload,
reqId: req.id,
model: req.body?.model,
});
res.write(event);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
if (req.tokenizerInfo && typeof errorPayload.error === "object") {
errorPayload.error.proxy_tokenizer = req.tokenizerInfo;
}
res.status(statusCode).json(errorPayload);
}
},
req,
res,
});
}
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
req.log.error(err, `Error during http-proxy-middleware request`);
classifyErrorAndSend(err, req as Request, res as Response);
};
/**
* Handles errors thrown during preparation of a proxy request (before it is
* sent to the upstream API), typically due to validation, quota, or other
* pre-flight checks. Depending on the error class, this function will send an
* appropriate error response to the client, streaming it if necessary.
*/
export const classifyErrorAndSend = (
err: Error,
req: Request,
res: Response
res: Response | Socket
) => {
if (res instanceof Socket) {
// We should always have an Express response object here, but http-proxy's
// ErrorCallback type says it could be just a Socket.
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
return res.destroy();
}
try {
const { statusCode, statusMessage, userMessage, ...errorDetails } =
classifyError(err);
writeErrorResponse(req, res, statusCode, statusMessage, {
sendProxyError(req, res, statusCode, statusMessage, {
error: { message: userMessage, ...errorDetails },
});
} catch (error) {
req.log.error(error, `Error writing error response headers, giving up.`);
res.end();
}
};
@@ -113,6 +124,35 @@ function classifyError(err: Error): {
};
switch (err.constructor.name) {
case "HttpError":
const statusCode = (err as HttpError).status;
return {
statusCode,
statusMessage: `HTTP ${statusCode} ${http.STATUS_CODES[statusCode]}`,
userMessage: `Reverse proxy error: ${err.message}`,
type: "proxy_http_error",
};
case "BadRequestError":
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage: `Request is not valid. (${err.message})`,
type: "proxy_bad_request",
};
case "NotFoundError":
return {
statusCode: 404,
statusMessage: "Not Found",
userMessage: `Requested resource not found. (${err.message})`,
type: "proxy_not_found",
};
case "PaymentRequiredError":
return {
statusCode: 402,
statusMessage: "No Keys Available",
userMessage: err.message,
type: "proxy_no_keys_available",
};
case "ZodError":
const userMessage = generateErrorMessage((err as ZodError).issues, {
prefix: "Request validation failed. ",
@@ -194,14 +234,48 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
switch (format) {
case "openai":
case "mistral-ai":
return body.choices[0].message.content;
// Few possible values:
// - choices[0].message.content
// - choices[0].message with no content if model is invoking a tool
return body.choices?.[0]?.message?.content || "";
case "openai-responses":
// Handle the original Responses API format
if (body.output && Array.isArray(body.output)) {
// Look for a message type in the output array
for (const item of body.output) {
if (item.type === "message" && item.content && Array.isArray(item.content)) {
// Extract text content from each content item
return item.content
.filter((contentItem: any) => contentItem.type === "output_text")
.map((contentItem: any) => contentItem.text)
.join("");
}
}
}
// If we've been transformed to chat completion format already
return body.choices?.[0]?.message?.content || "";
case "mistral-text":
return body.outputs?.[0]?.text || "";
case "openai-text":
return body.choices[0].text;
case "anthropic":
case "anthropic-chat":
if (!body.content) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic chat completion"
);
return "";
}
return body.content
.map(({ text, type }: { type: string; text: string }) =>
type === "text" ? text : `[Unsupported content type: ${type}]`
)
.join("\n");
case "anthropic-text":
if (!body.completion) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic completion"
"Received empty Anthropic text completion"
);
return "";
}
@@ -210,7 +284,15 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
if ("choices" in body) {
return body.choices[0].message.content;
}
return body.candidates[0].content.parts[0].text;
const text = body.candidates[0].content?.parts?.[0]?.text;
if (!text) {
req.log.warn(
{ body: JSON.stringify(body) },
"Received empty Google AI text completion"
);
return "";
}
return text;
case "openai-image":
return body.data?.map((item: any) => item.url).join("\n");
default:
@@ -218,21 +300,23 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
}
}
export function getModelFromBody(req: Request, body: Record<string, any>) {
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "openai-text":
case "openai-responses":
return resBody.model;
case "mistral-ai":
return body.model;
case "mistral-text":
case "openai-image":
return req.body.model;
case "anthropic":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return body.model || req.body.model;
case "google-ai":
// Google doesn't confirm the model in the response.
// These formats don't have a model in the response body.
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return resBody.model || req.body.model;
default:
assertNever(format);
}
+28 -35
View File
@@ -1,42 +1,38 @@
import type { Request } from "express";
import type { ClientRequest } from "http";
import type { ProxyReqCallback } from "http-proxy";
export { createOnProxyReqHandler } from "./onproxyreq-factory";
import { ProxyReqManager } from "./proxy-req-manager";
export {
createPreprocessorMiddleware,
createEmbeddingsPreprocessorMiddleware,
} from "./preprocessor-factory";
// Express middleware (runs before http-proxy-middleware, can be async)
export { addAzureKey } from "./preprocessors/add-azure-key";
// Preprocessors (runs before request is queued, usually body transformation/validation)
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { languageFilter } from "./preprocessors/language-filter";
export { setApiFormat } from "./preprocessors/set-api-format";
export { signAwsRequest } from "./preprocessors/sign-aws-request";
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { validateModelFamily } from "./preprocessors/validate-model-family";
export { validateVision } from "./preprocessors/validate-vision";
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
export { checkModelFamily } from "./onproxyreq/check-model-family";
export { finalizeBody } from "./onproxyreq/finalize-body";
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
export { stripHeaders } from "./onproxyreq/strip-headers";
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
export { addAzureKey } from "./mutators/add-azure-key";
export { finalizeBody } from "./mutators/finalize-body";
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
export { signAwsRequest } from "./mutators/sign-aws-request";
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
export { stripHeaders } from "./mutators/strip-headers";
/**
* Middleware that runs prior to the request being handled by http-proxy-
* middleware.
* Middleware that runs prior to the request being queued or handled by
* http-proxy-middleware. You will not have access to the proxied
* request/response objects since they have not yet been sent to the API.
*
* Async functions can be used here, but you will not have access to the proxied
* request/response objects, nor the data set by ProxyRequestMiddleware
* functions as they have not yet been run.
*
* User will have been authenticated by the time this middleware runs, but your
* request won't have been assigned an API key yet.
* User will have been authenticated by the proxy's gatekeeper, but the request
* won't have been assigned an upstream API key yet.
*
* Note that these functions only run once ever per request, even if the request
* is automatically retried by the request queue middleware.
@@ -44,17 +40,14 @@ export { stripHeaders } from "./onproxyreq/strip-headers";
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
/**
* Callbacks that run immediately before the request is sent to the API in
* response to http-proxy-middleware's `proxyReq` event.
* Middleware that runs immediately before the request is proxied to the
* upstream API, after dequeueing the request from the request queue.
*
* Async functions cannot be used here as HPM's event emitter is not async and
* will not wait for the promise to resolve before sending the request.
*
* Note that these functions may be run multiple times per request if the
* first attempt is rate limited and the request is automatically retried by the
* request queue middleware.
* Because these middleware may be run multiple times per request if a retryable
* error occurs and the request put back in the queue, they must be idempotent.
* A change manager is provided to allow the middleware to make changes to the
* request which can be automatically reverted.
*/
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
export const forceModel = (model: string) => (req: Request) =>
void (req.body.model = model);
export type ProxyReqMutator = (
changeManager: ProxyReqManager
) => void | Promise<void>;
@@ -0,0 +1,84 @@
import {
APIFormat,
AzureOpenAIKey,
keyPool,
} from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
export const addAzureKey: ProxyReqMutator = async (manager) => {
const req = manager.request;
const validAPIs: APIFormat[] = ["openai", "openai-image"];
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
validAPIs.includes(api)
);
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
// TODO: untracked mutation to body, I think this should just be a
// RequestPreprocessor because we don't need to do it every dequeue.
req.body.model = model;
const key = keyPool.get(model, "azure");
manager.setKey(key);
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
// TODO: this should also probably be a RequestPreprocessor
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
// OpenAI wants logprobs: true/false and top_logprobs: number
// Azure seems to just want to combine them into logprobs: number
// if (typeof req.body.logprobs === "boolean") {
// req.body.logprobs = req.body.top_logprobs || undefined;
// delete req.body.top_logprobs
// }
// Temporarily just disabling logprobs for Azure because their model support
// is random: `This model does not support the 'logprobs' parameter.`
delete req.body.logprobs;
delete req.body.top_logprobs;
}
req.log.info(
{ key: key.hash, model },
"Assigned Azure OpenAI key to request"
);
const cred = req.key as AzureOpenAIKey;
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
const operation =
req.outboundApi === "openai" ? "/chat/completions" : "/images/generations";
const apiVersion =
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
path: `/openai/deployments/${deploymentId}${operation}?api-version=${apiVersion}`,
headers: {
["host"]: `${resourceName}.openai.azure.com`,
["content-type"]: "application/json",
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
});
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
const [resourceName, deploymentId, apiKey] = key.key.split(":");
if (!resourceName || !deploymentId || !apiKey) {
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
}
return { resourceName, deploymentId, apiKey };
}
@@ -0,0 +1,47 @@
import { keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
const req = manager.request;
const inboundValid =
req.inboundApi === "openai" || req.inboundApi === "google-ai";
const outboundValid = req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!inboundValid || !outboundValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
const model = req.body.model;
const key = keyPool.get(model, "google-ai");
manager.setKey(key);
req.log.info(
{ key: key.hash, model, stream: req.isStreaming },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
const payload = { ...req.body, stream: undefined, model: undefined };
// For OpenAI -> Google conversion we don't actually have the API version
const apiVersion = req.params.apiVersion || "v1beta"
// TODO: this isn't actually signed, so the manager api is a little unclear
// with the ProxyReqManager refactor, it's probably no longer necesasry to
// do this because we can modify the path using Manager.setPath.
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/${apiVersion}/models/${model}:${
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
}key=${key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(payload),
});
};
@@ -0,0 +1,152 @@
import { AnthropicChatMessage } from "../../../../shared/api-schemas";
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { assertNever } from "../../../../shared/utils";
import { ProxyReqMutator } from "../index";
export const addKey: ProxyReqMutator = (manager) => {
const req = manager.request;
let assignedKey: Key;
const { service, inboundApi, outboundApi, body } = req;
if (!inboundApi || !outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error({ inboundApi, outboundApi, path: req.path }, err.message);
throw err;
}
if (!body?.model) {
throw new Error("You must specify a model with your request.");
}
let needsMultimodal = false;
if (outboundApi === "anthropic-chat") {
needsMultimodal = containsImageContent(
body.messages as AnthropicChatMessage[]
);
}
if (inboundApi === outboundApi) {
// Pass streaming information for GPT-5 models that require verified keys for streaming
const isStreaming = body.stream === true;
assignedKey = keyPool.get(body.model, service, needsMultimodal, isStreaming);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
// TODO: This whole else condition is probably no longer needed since API
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-text":
case "anthropic-chat":
case "mistral-ai":
case "mistral-text":
case "google-ai":
assignedKey = keyPool.get(body.model, service);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
// Use the actual model from the request body instead of defaulting to dall-e-3
// This ensures that gpt-image-1 requests get keys that are verified for gpt-image-1
assignedKey = keyPool.get(body.model, service);
break;
case "openai-responses":
assignedKey = keyPool.get(body.model, service);
break;
case "openai":
throw new Error(
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
);
default:
assertNever(outboundApi);
}
}
manager.setKey(assignedKey);
req.log.info(
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
manager.setHeader("X-API-Key", assignedKey.key);
if (!manager.request.headers["anthropic-version"]) {
manager.setHeader("anthropic-version", "2023-06-01");
}
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId && !key.key.includes("svcacct")) {
manager.setHeader("OpenAI-Organization", key.organizationId);
}
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "mistral-ai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
manager.setHeader("api-key", azureKey);
break;
case "deepseek":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "xai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "cohere":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "qwen":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "moonshot":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "aws":
case "gcp":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
manager.setKey(key);
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
manager.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
manager.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -0,0 +1,67 @@
import type { ProxyReqMutator } from "../index";
/** Finalize the rewritten request body. Must be the last mutator. */
export const finalizeBody: ProxyReqMutator = (manager) => {
const req = manager.request;
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
// For OpenAI Responses API, ensure messages is in the correct format
if (req.outboundApi === "openai-responses") {
// Format messages for the Responses API
if (req.body.messages) {
req.log.info("Formatting messages for Responses API in finalizeBody");
// The Responses API expects input to be an array, not an object
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API in finalizeBody");
// If input already exists but contains a messages object, replace input with the messages array
req.body.input = req.body.input.messages;
}
// Final check to ensure max_completion_tokens is converted to max_output_tokens
if (req.body.max_completion_tokens) {
req.log.info("Converting max_completion_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_completion_tokens;
}
delete req.body.max_completion_tokens;
}
// Final check to ensure max_tokens is converted to max_output_tokens
if (req.body.max_tokens) {
req.log.info("Converting max_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
}
delete req.body.max_tokens;
}
// Remove all parameters not supported by Responses API
const unsupportedParams = [
'frequency_penalty',
'presence_penalty',
];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
delete req.body[param];
}
}
}
const serialized =
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
}
};
@@ -0,0 +1,32 @@
import { ProxyReqMutator } from "../index";
/**
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
manager.setPath(req.signedRequest.path);
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
const headers = req.signedRequest.headers;
Object.keys(headers).forEach((key) => {
manager.removeHeader(key);
});
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
manager.setHeader(key, value);
});
const serialized =
typeof req.signedRequest.body === "string"
? req.signedRequest.body
: JSON.stringify(req.signedRequest.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
};
@@ -0,0 +1,159 @@
import express, { Request } from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-schemas";
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
import {
AWSMistralV1ChatCompletionsSchema,
AWSMistralV1TextCompletionsSchema,
} from "../../../../shared/api-schemas/mistral-ai";
import { ProxyReqMutator } from "../index";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
* This happens AFTER request transformation.
*/
export const signAwsRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const { model, stream } = req.body;
const key = keyPool.get(model, "aws") as AwsBedrockKey;
manager.setKey(key);
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
req.body.system = system;
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
// If our key has an inference profile compatible with the requested model,
// we want to use the inference profile instead of the model ID when calling
// InvokeModel as that will give us higher rate limits.
const profile =
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${profile}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
const { body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
// AWS uses vendor API formats but imposes additional (more strict) validation
// rules, namely that extraneous parameters are not allowed. We will validate
// using the vendor's zod schema but apply `.strip` to ensure that any
// extraneous parameters are removed.
let strippedParams: Record<string, unknown> = {};
switch (req.outboundApi) {
case "anthropic-text":
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
break;
case "anthropic-chat":
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
break;
case "mistral-ai":
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
break;
case "mistral-text":
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
break;
default:
throw new Error("Unexpected outbound API for AWS.");
}
return strippedParams;
}
@@ -0,0 +1,78 @@
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
import { GcpKey, keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
import {
getCredentialsFromGcpKey,
refreshGcpAccessToken,
} from "../../../../shared/key-management/gcp/oauth";
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
export const signGcpRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const serviceValid = req.service === "gcp";
if (!serviceValid) {
throw new Error("addVertexAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const { model } = req.body;
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
const [token, durationSec] = await refreshGcpAccessToken(key);
keyPool.update(key, {
accessToken: token,
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
} as GcpKey);
// nb: key received by `get` is a clone and will not have the new access
// token we just set, so it must be manually updated.
key.accessToken = token;
}
manager.setKey(key);
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
// TODO: This should happen in transform-outbound-payload.ts
// TODO: Support tools
let strippedParams: Record<string, unknown>;
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
stream: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "vertex-2023-10-16";
const credential = await getCredentialsFromGcpKey(key);
const host = GCP_HOST.replace("%REGION%", credential.region);
// GCP doesn't use the anthropic-version header, but we set it to ensure the
// stream adapter selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
headers: {
["host"]: host,
["content-type"]: "application/json",
["authorization"]: `Bearer ${key.accessToken}`,
},
body: JSON.stringify(strippedParams),
});
};
@@ -0,0 +1,33 @@
import { ProxyReqMutator } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
*/
export const stripHeaders: ProxyReqMutator = (manager) => {
manager.removeHeader("origin");
manager.removeHeader("referer");
// Some APIs refuse requests coming from browsers to discourage embedding
// API keys in client-side code, so we must remove all CORS/fetch headers.
Object.keys(manager.request.headers).forEach((key) => {
if (key.startsWith("sec-")) {
manager.removeHeader(key);
}
});
manager.removeHeader("tailscale-user-login");
manager.removeHeader("tailscale-user-name");
manager.removeHeader("tailscale-headers-info");
manager.removeHeader("tailscale-user-profile-pic");
manager.removeHeader("cf-connecting-ip");
manager.removeHeader("cf-ray");
manager.removeHeader("cf-visitor");
manager.removeHeader("cf-warp-tag-id");
manager.removeHeader("forwarded");
manager.removeHeader("true-client-ip");
manager.removeHeader("x-forwarded-for");
manager.removeHeader("x-forwarded-host");
manager.removeHeader("x-forwarded-proto");
manager.removeHeader("x-real-ip");
};
@@ -1,45 +0,0 @@
import {
applyQuotaLimits,
blockZoomerOrigins,
checkModelFamily,
HPMRequestCallback,
stripHeaders,
} from "./index";
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
/**
* Returns an http-proxy-middleware request handler that runs the given set of
* onProxyReq callback functions in sequence.
*
* These will run each time a request is proxied, including on automatic retries
* by the queue after encountering a rate limit.
*/
export const createOnProxyReqHandler = ({
pipeline,
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
const callbackPipeline = [
checkModelFamily,
applyQuotaLimits,
blockZoomerOrigins,
stripHeaders,
...pipeline,
];
return (proxyReq, req, res, options) => {
// The streaming flag must be set before any other onProxyReq handler runs,
// as it may influence the behavior of subsequent handlers.
// Image generation requests can't be streamed.
// TODO: this flag is set in too many places
req.isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
try {
for (const fn of callbackPipeline) {
fn(proxyReq, req, res, options);
}
} catch (error) {
proxyReq.destroy(error);
}
};
};
@@ -1,32 +0,0 @@
import { AnthropicKey, Key } from "../../../../shared/key-management";
import { isTextGenerationRequest } from "../../common";
import { HPMRequestCallback } from "../index";
/**
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
* know this without trying to send the request and seeing if it fails. If a
* key is marked as requiring a preamble, it will be added here.
*/
export const addAnthropicPreamble: HPMRequestCallback = (
_proxyReq,
req
) => {
if (!isTextGenerationRequest(req) || req.key?.service !== "anthropic") {
return;
}
let preamble = "";
let prompt = req.body.prompt;
assertAnthropicKey(req.key);
if (req.key.requiresPreamble) {
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
}
req.body.prompt = preamble + prompt;
};
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
if (key.service !== "anthropic") {
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
}
}
@@ -1,121 +0,0 @@
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { HPMRequestCallback } from "../index";
import { assertNever } from "../../../../shared/utils";
/** Add a key that can service this request to the request object. */
export const addKey: HPMRequestCallback = (proxyReq, req) => {
let assignedKey: Key;
if (!req.inboundApi || !req.outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error(
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
err.message
);
throw err;
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
if (req.inboundApi === req.outboundApi) {
assignedKey = keyPool.get(req.body.model);
} else {
switch (req.outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
case "anthropic":
assignedKey = keyPool.get("claude-v1");
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct");
break;
case "openai":
throw new Error(
"OpenAI Chat as an API translation target is not supported"
);
case "google-ai":
throw new Error("add-key should not be used for this model.");
case "mistral-ai":
throw new Error("Mistral AI should never be translated");
case "openai-image":
assignedKey = keyPool.get("dall-e-3");
break;
default:
assertNever(req.outboundApi);
}
}
req.key = assignedKey;
req.log.info(
{
key: assignedKey.hash,
model: req.body?.model,
fromApi: req.inboundApi,
toApi: req.outboundApi,
},
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
case "mistral-ai":
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
proxyReq.setHeader("api-key", azureKey);
break;
case "aws":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
proxyReq,
req
) => {
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
const key = keyPool.get("text-embedding-ada-002") as OpenAIKey;
req.key = key;
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -1,19 +0,0 @@
import { fixRequestBody } from "http-proxy-middleware";
import type { HPMRequestCallback } from "../index";
/** Finalize the rewritten request body. Must be the last rewriter. */
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
const updatedBody = JSON.stringify(req.body);
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
(req as any).rawBody = Buffer.from(updatedBody);
// body-parser and http-proxy-middleware don't play nice together
fixRequestBody(proxyReq, req);
}
};
@@ -1,26 +0,0 @@
import type { HPMRequestCallback } from "../index";
/**
* For AWS/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
proxyReq.path = req.signedRequest.path;
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
proxyReq.setHeader(key, value);
});
// Don't use fixRequestBody here because it adds a content-length header.
// Amazon doesn't want that and it breaks the signature.
proxyReq.write(req.signedRequest.body);
};
@@ -1,16 +0,0 @@
import { HPMRequestCallback } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
**/
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
proxyReq.removeHeader("cf-connecting-ip");
proxyReq.removeHeader("forwarded");
proxyReq.removeHeader("true-client-ip");
proxyReq.removeHeader("x-forwarded-for");
proxyReq.removeHeader("x-real-ip");
};
@@ -1,15 +1,19 @@
import { RequestHandler } from "express";
import { ZodIssue } from "zod";
import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common";
import {
RequestPreprocessor,
validateContextSize,
blockZoomerOrigins,
countPromptTokens,
languageFilter,
setApiFormat,
transformOutboundPayload,
languageFilter,
validateContextSize,
validateModelFamily,
validateVision,
applyQuotaLimits,
} from ".";
import { ZodIssue } from "zod";
type RequestPreprocessorOptions = {
/**
@@ -29,14 +33,15 @@ type RequestPreprocessorOptions = {
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
* These should be used for validation and transformations that only need to
* happen once per request.
*
* These run first in the request lifecycle, a single time per request before it
* is added to the request queue. They aren't run again if the request is
* re-attempted after a rate limit.
*
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
* It will run after these preprocessors, but before the request is sent to
* http-proxy-middleware.
* To run functions against requests every time they are re-attempted, write a
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
@@ -44,12 +49,16 @@ export const createPreprocessorMiddleware = (
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
blockZoomerOrigins,
...(beforeTransform ?? []),
transformOutboundPayload,
countPromptTokens,
languageFilter,
...(afterTransform ?? []),
validateContextSize,
validateVision,
validateModelFamily,
applyQuotaLimits,
];
return async (...args) => executePreprocessors(preprocessors, args);
};
@@ -71,6 +80,9 @@ async function executePreprocessors(
preprocessors: RequestPreprocessor[],
[req, res, next]: Parameters<RequestHandler>
) {
handleTestMessage(req, res, next);
if (res.headersSent) return;
try {
for (const preprocessor of preprocessors) {
await preprocessor(req);
@@ -78,10 +90,10 @@ async function executePreprocessors(
next();
} catch (error) {
if (error.constructor.name === "ZodError") {
const msg = error?.issues
?.map((issue: ZodIssue) => issue.message)
const issues = error?.issues
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
.join("; ");
req.log.info(msg, "Prompt validation failed.");
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
} else {
req.log.error(error, "Error while executing request preprocessor");
}
@@ -99,3 +111,66 @@ async function executePreprocessors(
classifyErrorAndSend(error as Error, req, res);
}
}
/**
* Bypasses the API call and returns a test message response if the request body
* is a known test message from SillyTavern. Otherwise these messages just waste
* API request quota and confuse users when the proxy is busy, because ST always
* makes them with `stream: false` (which is not allowed when the proxy is busy)
*/
const handleTestMessage: RequestHandler = (req, res) => {
const { method, body } = req;
if (method !== "POST") {
return;
}
if (isTestMessage(body)) {
req.log.info({ body }, "Received test message. Skipping API call.");
res.json({
id: "test-message",
object: "chat.completion",
created: Date.now(),
model: body.model,
// openai chat
choices: [
{
message: { role: "assistant", content: "Hello!" },
finish_reason: "stop",
index: 0,
},
],
// anthropic text
completion: "Hello!",
// anthropic chat
content: [{ type: "text", text: "Hello!" }],
// gemini
candidates: [
{
content: { parts: [{ text: "Hello!" }] },
finishReason: "stop",
},
],
proxy_note:
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
});
}
};
function isTestMessage(body: any) {
const { messages, prompt, contents } = body;
if (messages) {
return (
messages.length === 1 &&
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else if (contents) {
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
prompt?.startsWith("Hi\n\n")
);
}
}
@@ -1,50 +0,0 @@
import { AzureOpenAIKey, keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addAzureKey: RequestPreprocessor = (req) => {
const apisValid = req.inboundApi === "openai" && req.outboundApi === "openai";
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
req.key = keyPool.get(model);
req.body.model = model;
req.log.info(
{ key: req.key.hash, model },
"Assigned Azure OpenAI key to request"
);
const cred = req.key as AzureOpenAIKey;
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
path: `/openai/deployments/${deploymentId}/chat/completions?api-version=2023-09-01-preview`,
headers: {
["host"]: `${resourceName}.openai.azure.com`,
["content-type"]: "application/json",
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
};
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
const [resourceName, deploymentId, apiKey] = key.key.split(":");
if (!resourceName || !deploymentId || !apiKey) {
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
}
return { resourceName, deploymentId, apiKey };
}
@@ -1,40 +0,0 @@
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addGoogleAIKey: RequestPreprocessor = (req) => {
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!apisValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model;
req.key = keyPool.get(model);
req.log.info(
{ key: req.key.hash, model },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
req.isStreaming = req.isStreaming || req.body.stream;
delete req.body.stream;
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(req.body),
};
};
@@ -1,6 +1,6 @@
import { hasAvailableQuota } from "../../../../shared/users/user-store";
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
import { HPMRequestCallback } from "../index";
import { RequestPreprocessor } from "../index";
export class QuotaExceededError extends Error {
public quotaInfo: any;
@@ -11,7 +11,7 @@ export class QuotaExceededError extends Error {
}
}
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
export const applyQuotaLimits: RequestPreprocessor = (req) => {
const subjectToQuota =
isTextGenerationRequest(req) || isImageGenerationRequest(req);
if (!subjectToQuota || !req.user) return;
@@ -34,4 +34,4 @@ export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
}
);
}
};
};
@@ -1,6 +1,6 @@
import { HPMRequestCallback } from "../index";
import { RequestPreprocessor } from "../index";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai,vip.jewproxy.tech,jewproxy.tech".split(",");
class ZoomerForbiddenError extends Error {
constructor(message: string) {
@@ -13,8 +13,8 @@ class ZoomerForbiddenError extends Error {
* Blocks requests from Janitor AI users with a fake, scary error message so I
* stop getting emails asking for tech support.
*/
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
const origin = req.headers.origin || req.headers.referer;
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
const origin = req.headers.origin || req.headers.referer || req.headers.host;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working.
// We don't want to block that just yet.
@@ -1,11 +1,18 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import type {
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "./transform-outbound-payload";
import { OpenAIChatMessage } from "../../../../shared/api-schemas";
import { GoogleAIChatMessage } from "../../../../shared/api-schemas/google-ai";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas/anthropic";
import {
MistralAIChatMessage,
ContentItem,
isMistralVisionModel
} from "../../../../shared/api-schemas/mistral-ai";
import { isGrokVisionModel } from "../../../../shared/api-schemas/xai";
/**
* Given a request with an already-transformed body, counts the number of
@@ -17,7 +24,13 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
switch (service) {
case "openai": {
req.outputTokens = req.body.max_tokens;
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-responses": {
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
@@ -28,7 +41,19 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic": {
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
}
const prompt = { system, messages: req.body.messages };
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-text": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
@@ -40,10 +65,50 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai": {
case "mistral-ai":
case "mistral-text": {
req.outputTokens = req.body.max_tokens;
const prompt: MistralAIChatMessage[] = req.body.messages;
// Handle multimodal content (vision) in Mistral models
const isVisionModel = isMistralVisionModel(req.body.model);
const messages = req.body.messages;
// Check if this is a vision request with images
const hasImageContent = Array.isArray(messages) && messages.some(
(msg: MistralAIChatMessage) => Array.isArray(msg.content) &&
msg.content.some((item: ContentItem) => item.type === "image_url")
);
// For vision content, we add a fixed token count per image
// This is an estimate as the actual token count depends on image size and complexity
const TOKENS_PER_IMAGE = 1200; // Conservative estimate
let imageTokens = 0;
if (hasImageContent && Array.isArray(messages)) {
// Count images in the request
for (const msg of messages) {
if (Array.isArray(msg.content)) {
const imageCount = msg.content.filter(
(item: ContentItem) => item.type === "image_url"
).length;
imageTokens += imageCount * TOKENS_PER_IMAGE;
}
}
req.log.debug(
{ imageCount: imageTokens / TOKENS_PER_IMAGE, tokenEstimate: imageTokens },
"Estimated token count for Mistral vision images"
);
}
const prompt: string | MistralAIChatMessage[] = messages ?? req.body.prompt;
result = await countTokens({ req, prompt, service });
// Add the image tokens to the total count
if (imageTokens > 0) {
result.token_count += imageTokens;
}
break;
}
case "openai-image": {
@@ -51,6 +116,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, service });
break;
}
// Handle XAI (Grok) vision models
// Since it uses the OpenAI API format, it's caught in the "openai" case,
// but we need to add additional handling for image tokens after that
default:
assertNever(service);
}
@@ -1,12 +1,15 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { UserInputError } from "../../../../shared/errors";
import { BadRequestError } from "../../../../shared/errors";
import {
MistralAIChatMessage,
OpenAIChatMessage,
} from "./transform-outbound-payload";
flattenAnthropicMessages,
} from "../../../../shared/api-schemas";
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
const rejectedClients = new Map<string, number>();
@@ -45,16 +48,20 @@ export const languageFilter: RequestPreprocessor = async (req) => {
req.res!.once("close", resolve);
setTimeout(resolve, delay);
});
throw new UserInputError(config.rejectMessage);
throw new BadRequestError(config.rejectMessage);
}
};
/*
TODO: this is not type safe and does not raise errors if request body zod schema
is changed.
*/
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic":
return body.prompt;
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "openai":
case "mistral-ai":
return body.messages
@@ -69,11 +76,19 @@ function getPromptFromRequest(req: Request) {
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "anthropic-text":
case "openai-text":
case "openai-responses":
case "openai-image":
case "mistral-text":
return body.prompt;
case "google-ai":
return body.prompt.text;
case "google-ai": {
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
return [
b.systemInstruction?.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text),
...b.contents.flatMap((c) => c.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text)),
].join("\n");
}
default:
assertNever(service);
}
@@ -4,8 +4,22 @@ import { LLMService } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
export const setApiFormat = (api: {
/**
* The API format the user made the request in and expects the response to be
* in.
*/
inApi: Request["inboundApi"];
/**
* The API format the proxy will make the request in and expects the response
* to be in. If different from `inApi`, the proxy will transform the user's
* request body to this format, and will transform the response body or stream
* events from this format.
*/
outApi: APIFormat;
/**
* The service the request will be sent to, which determines authentication
* and possibly the streaming transport.
*/
service: LLMService;
}): RequestPreprocessor => {
return function configureRequestApiFormat(req) {
@@ -1,96 +0,0 @@
import express from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
import { AnthropicV1CompleteSchema } from "./transform-outbound-payload";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
*/
export const signAwsRequest: RequestPreprocessor = async (req) => {
req.key = keyPool.get("anthropic.claude-v2");
const { model, stream } = req.body;
req.isStreaming = stream === true || stream === "true";
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.body.prompt = preamble + req.body.prompt;
// AWS supports only a subset of Anthropic's parameters and is more strict
// about unknown parameters.
// TODO: This should happen in transform-outbound-payload.ts
const strippedParams = AnthropicV1CompleteSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
}).strip().parse(req.body);
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
req.headers["anthropic-version"] = "2023-06-01";
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(strippedParams),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
req.signedRequest = await sign(newRequest, getCredentialParts(req));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
@@ -1,524 +1,237 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import {
isTextGenerationRequest,
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-schemas";
import { BadRequestError } from "../../../../shared/errors";
import { fixMistralPrompt, isMistralVisionModel } from "../../../../shared/api-schemas/mistral-ai";
import {
isImageGenerationRequest,
isTextGenerationRequest,
} from "../../common";
import { RequestPreprocessor } from "../index";
import { APIFormat } from "../../../../shared/key-management";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
// TODO: move schemas to shared
// https://console.anthropic.com/docs/api/reference#-v1-complete
export const AnthropicV1CompleteSchema = z
.object({
model: z.string().max(100),
prompt: z.string({
required_error:
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
}),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
stop_sequences: z.array(z.string().max(500)).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
})
.strip();
// https://platform.openai.com/docs/api-reference/chat/create
const OpenAIV1ChatContentArraySchema = z.array(
z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image_url"),
image_url: z.object({
url: z.string().url(),
detail: z.enum(["low", "auto", "high"]).optional().default("auto"),
}),
}),
])
);
export const OpenAIV1ChatCompletionSchema = z
.object({
model: z.string().max(100),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.union([z.string(), OpenAIV1ChatContentArraySchema]),
name: z.string().optional(),
}),
{
required_error:
"No `messages` found. Ensure you've set the correct completion endpoint.",
invalid_type_error:
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
}
),
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
n: z
.literal(1, {
errorMap: () => ({
message: "You may only request a single completion at a time.",
}),
})
.optional(),
stream: z.boolean().optional().default(false),
stop: z
.union([z.string().max(500), z.array(z.string().max(500))])
.optional(),
max_tokens: z.coerce
.number()
.int()
.nullish()
.default(16)
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
logit_bias: z.any().optional(),
user: z.string().max(500).optional(),
seed: z.number().int().optional(),
})
.strip();
export type OpenAIChatMessage = z.infer<
typeof OpenAIV1ChatCompletionSchema
>["messages"][0];
const OpenAIV1TextCompletionSchema = z
.object({
model: z
.string()
.max(100)
.regex(
/^gpt-3.5-turbo-instruct/,
"Model must start with 'gpt-3.5-turbo-instruct'"
),
prompt: z.string({
required_error:
"No `prompt` found. Ensure you've set the correct completion endpoint.",
}),
logprobs: z.number().int().nullish().default(null),
echo: z.boolean().optional().default(false),
best_of: z.literal(1).optional(),
stop: z
.union([z.string().max(500), z.array(z.string().max(500)).max(4)])
.optional(),
suffix: z.string().max(1000).optional(),
})
.strip()
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true }));
// https://platform.openai.com/docs/api-reference/images/create
const OpenAIV1ImagesGenerationSchema = z
.object({
prompt: z.string().max(4000),
model: z.string().max(100).optional(),
quality: z.enum(["standard", "hd"]).optional().default("standard"),
n: z.number().int().min(1).max(4).optional().default(1),
response_format: z.enum(["url", "b64_json"]).optional(),
size: z
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
.optional()
.default("1024x1024"),
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
user: z.string().max(500).optional(),
})
.strip();
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
contents: z.array(
z.object({
parts: z.array(z.object({ text: z.string() })),
role: z.enum(["user", "model"]),
})
),
tools: z.array(z.object({})).max(0).optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
generationConfig: z.object({
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
}),
})
.strip();
export type GoogleAIChatMessage = z.infer<
typeof GoogleAIV1GenerateContentSchema
>["contents"][0];
// https://docs.mistral.ai/api#operation/createChatCompletion
const MistralAIV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
})
),
temperature: z.number().optional().default(0.7),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
safe_mode: z.boolean().optional().default(false),
random_seed: z.number().int().optional(),
});
export type MistralAIChatMessage = z.infer<
typeof MistralAIV1ChatCompletionsSchema
>["messages"][0];
const VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
anthropic: AnthropicV1CompleteSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
};
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed || notTransformable) return;
if (sameService) {
const result = VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
if (alreadyTransformed) {
return;
} else if (notTransformable) {
// This is probably an indication of a bug in the proxy.
const { inboundApi, outboundApi, method, path } = req;
req.log.warn(
{ inboundApi, outboundApi, method, path },
"`transformOutboundPayload` called on a non-transformable request."
);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.body = openaiToAnthropic(req);
applyMistralPromptFixes(req);
applyGoogleAIKeyTransforms(req);
applyOpenAIResponsesTransform(req);
// Native prompts are those which were already provided by the client in the
// target API format. We don't need to transform them.
const isNativePrompt = req.inboundApi === req.outboundApi;
if (isNativePrompt) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
req.body = result;
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "google-ai") {
req.body = openaiToGoogleAI(req);
// Prompt requires translation from one API format to another.
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request...");
req.body = await transFn(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "openai-text") {
req.body = openaiToOpenaiText(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "openai-image") {
req.body = openaiToOpenaiImage(req);
return;
}
throw new Error(
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
throw new BadRequestError(
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
function openaiToAnthropic(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic request"
);
throw result.error;
}
// Handle OpenAI Responses API transformation
function applyOpenAIResponsesTransform(req: Request): void {
if (req.outboundApi === "openai-responses") {
req.log.info("Transforming request to OpenAI Responses API format");
req.headers["anthropic-version"] = "2023-06-01";
// Store the original body for reference if needed
const originalBody = { ...req.body };
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudePrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
// Model may be overridden in `calculate-context-size.ts` to avoid having
// a circular dependency (`calculate-context-size.ts` needs an already-
// transformed request body to count tokens, but this function would like
// to know the count to select a model).
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
};
}
function openaiToOpenaiText(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-text request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAIChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
const transformed = { ...rest, prompt: prompt, stop: stops };
return OpenAIV1TextCompletionSchema.parse(transformed);
}
// Takes the last chat message and uses it verbatim as the image prompt.
function openaiToOpenaiImage(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-image request"
);
throw result.error;
}
const { messages } = result.data;
const prompt = messages.filter((m) => m.role === "user").pop()?.content;
if (Array.isArray(prompt)) {
throw new Error("Image generation prompt must be a text message.");
}
if (body.stream) {
throw new Error(
"Streaming is not supported for image generation requests."
);
}
// Some frontends do weird things with the prompt, like prefixing it with a
// character name or wrapping the entire thing in quotes. We will look for
// the index of "Image:" and use everything after that as the prompt.
const index = prompt?.toLowerCase().indexOf("image:");
if (index === -1 || !prompt) {
throw new Error(
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${prompt}).`
);
}
// TODO: Add some way to specify parameters via chat message
const transformed = {
model: body.model.includes("dall-e") ? body.model : "dall-e-3",
quality: "standard",
size: "1024x1024",
response_format: "url",
prompt: prompt.slice(index! + 6).trim(),
};
return OpenAIV1ImagesGenerationSchema.parse(transformed);
}
function openaiToGoogleAI(
req: Request
): z.infer<typeof GoogleAIV1GenerateContentSchema> {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse({
...body,
model: "gpt-3.5-turbo",
});
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Google AI request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const foundNames = new Set<string>();
const contents = messages
.map((m) => {
const role = m.role === "assistant" ? "model" : "user";
// Detects character names so we can set stop sequences for them as Gemini
// is prone to continuing as the next character.
// If names are not available, we'll still try to prefix the message
// with generic names so we can set stops for them but they don't work
// as well as real names.
const text = flattenOpenAIMessageContent(m.content);
const propName = m.name?.trim();
const textName =
m.role === "system" ? "" : text.match(/^(.{0,50}?): /)?.[1]?.trim();
const name =
propName || textName || (role === "model" ? "Character" : "User");
foundNames.add(name);
// Prefixing messages with their character name seems to help avoid
// Gemini trying to continue as the next character, or at the very least
// ensures it will hit the stop sequence. Otherwise it will start a new
// paragraph and switch perspectives.
// The response will be very likely to include this prefix so frontends
// will need to strip it out.
const textPrefix = textName ? "" : `${name}: `;
return {
parts: [{ text: textPrefix + text }],
role: m.role === "assistant" ? ("model" as const) : ("user" as const),
// Map standard OpenAI chat completions format to Responses API format
// The main differences are:
// 1. Endpoint is /v1/responses instead of /v1/chat/completions
// 2. 'messages' field moves to 'input.messages'
// Move messages to input.messages
if (req.body.messages && !req.body.input) {
req.body.input = {
messages: req.body.messages
};
})
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
const last = acc[acc.length - 1];
if (last?.role === msg.role) {
last.parts[0].text += "\n\n" + msg.parts[0].text;
} else {
acc.push(msg);
delete req.body.messages;
}
// Keep all the original properties of the request but ensure compatibility
// with Responses API specifics
if (!req.body.previousResponseId && req.body.conversation_id) {
req.body.previousResponseId = req.body.conversation_id;
delete req.body.conversation_id;
}
// Convert max_tokens to max_output_tokens if present and not already set
if (req.body.max_tokens && !req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
delete req.body.max_tokens;
}
// Set the correct tools format if needed
if (req.body.tools) {
// Tools structure is maintained but might need conversion if non-standard
if (!req.body.tools.some((tool: any) => tool.type === "function" || tool.type === "web_search")) {
req.body.tools = req.body.tools.map((tool: any) => ({
...tool,
type: tool.type || "function"
}));
}
return acc;
}, []);
}
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
stops = [...new Set(stops)].slice(0, 5);
return {
model: "gemini-pro",
stream: rest.stream,
contents,
tools: [],
generationConfig: {
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
topP: rest.top_p,
topK: 40, // openai schema doesn't have this, google ai defaults to 40
temperature: rest.temperature,
},
safetySettings: [
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
],
};
}
export function openAIMessagesToClaudePrompt(messages: OpenAIChatMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
const name = m.name?.trim();
const content = flattenOpenAIMessageContent(m.content);
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${name ? `(as ${name}) ` : ""}${content}`;
})
.join("") + "\n\nAssistant:"
);
}
function flattenOpenAIChatMessages(messages: OpenAIChatMessage[]) {
// Temporary to allow experimenting with prompt strategies
const PROMPT_VERSION: number = 1;
switch (PROMPT_VERSION) {
case 1:
return (
messages
.map((m) => {
// Claude-style human/assistant turns
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "User";
}
return `\n\n${role}: ${flattenOpenAIMessageContent(m.content)}`;
})
.join("") + "\n\nAssistant:"
);
case 2:
return messages
.map((m) => {
// Claude without prefixes (except system) and no Assistant priming
let role: string = "";
if (role === "system") {
role = "System: ";
}
return `\n\n${role}${flattenOpenAIMessageContent(m.content)}`;
})
.join("");
default:
throw new Error(`Unknown prompt version: ${PROMPT_VERSION}`);
req.log.info({
originalModel: originalBody.model,
newFormat: "openai-responses"
}, "Successfully transformed request to Responses API format");
}
}
function flattenOpenAIMessageContent(
content: OpenAIChatMessage["content"]
): string {
return Array.isArray(content)
? content
.map((contentItem) => {
if ("text" in contentItem) return contentItem.text;
if ("image_url" in contentItem) return "[ Uploaded Image Omitted ]";
})
.join("\n")
: content;
// handles weird cases that don't fit into our abstractions
function applyMistralPromptFixes(req: Request): void {
if (req.inboundApi === "mistral-ai") {
// Mistral Chat is very similar to OpenAI but not identical and many clients
// don't properly handle the differences. We will try to validate the
// mistral prompt and try to fix it if it fails. It will be re-validated
// after this function returns.
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
// Check if this is a vision model request
const isVisionModel = isMistralVisionModel(req.body.model);
// Check if the request contains image content
const hasImageContent = result.messages?.some((msg: {content: string | any[]}) =>
Array.isArray(msg.content) &&
msg.content.some((item: any) => item.type === "image_url")
);
// For vision requests, normalize the image_url format
if (hasImageContent && Array.isArray(result.messages)) {
// Process each message with image content
result.messages.forEach((msg: any) => {
if (Array.isArray(msg.content)) {
// Process each content item
msg.content.forEach((item: any) => {
if (item.type === "image_url") {
// Normalize the image_url field to a string format that Mistral expects
if (typeof item.image_url === "object") {
// If it's an object, extract the URL or base64 data
if (item.image_url.url) {
item.image_url = item.image_url.url;
} else if (item.image_url.data) {
item.image_url = item.image_url.data;
}
req.log.info(
{ model: req.body.model },
"Normalized object-format image_url to string format"
);
}
}
});
}
});
}
// Apply Mistral prompt fixes while preserving multimodal content
req.body.messages = fixMistralPrompt(result.messages);
req.log.info(
{
n: req.body.messages.length,
prev: result.messages.length,
isVisionModel,
hasImageContent
},
"Applied Mistral chat prompt fixes."
);
// If this is a vision model with image content, it MUST use the chat API
// and cannot be converted to text completions
if (hasImageContent) {
req.log.info(
{ model: req.body.model },
"Detected Mistral vision request with image content. Keeping as chat format."
);
return;
}
// If the prompt relies on `prefix: true` for the last message, we need to
// convert it to a text completions request because AWS Mistral support for
// this feature is broken.
// On Mistral La Plateforme, we can't do this because they don't expose
// a text completions endpoint.
const { messages } = req.body;
const lastMessage = messages && messages[messages.length - 1];
if (lastMessage?.role === "assistant" && req.service === "aws") {
// enable prefix if client forgot, otherwise the template will insert an
// eos token which is very unlikely to be what the client wants.
lastMessage.prefix = true;
req.outboundApi = "mistral-text";
req.log.info(
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
);
}
}
}
function toCamelCase(str: string): string {
return str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
}
function transformKeysToCamelCase(obj: any, hasTransformed = { value: false }): any {
if (Array.isArray(obj)) {
return obj.map(item => transformKeysToCamelCase(item, hasTransformed));
}
if (obj !== null && typeof obj === 'object') {
return Object.fromEntries(
Object.entries(obj).map(([key, value]) => {
const camelKey = toCamelCase(key);
if (camelKey !== key) {
hasTransformed.value = true;
}
return [
camelKey,
transformKeysToCamelCase(value, hasTransformed)
];
})
);
}
return obj;
}
function applyGoogleAIKeyTransforms(req: Request): void {
// Google (Gemini) API in their infinite wisdom accepts both snake_case and camelCase
// for some params even though in the docs they use snake_case.
// Some frontends (e.g. ST) use snake_case and camelCase so we normalize all keys to camelCase
if (req.outboundApi === "google-ai") {
const hasTransformed = { value: false };
req.body = transformKeysToCamelCase(req.body, hasTransformed);
if (hasTransformed.value) {
req.log.info("Applied Gemini camelCase -> snake_case transform");
}
}
}
@@ -6,8 +6,9 @@ import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
const GOOGLE_AI_MAX_CONTEXT = 32000;
const MISTRAL_AI_MAX_CONTENT = 32768;
// todo: make configurable
const GOOGLE_AI_MAX_CONTEXT = 2048000;
const MISTRAL_AI_MAX_CONTENT = 131072;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
@@ -27,16 +28,20 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
switch (req.outboundApi) {
case "openai":
case "openai-text":
case "openai-responses":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic":
case "anthropic-chat":
case "anthropic-text":
proxyMax = CLAUDE_MAX_CONTEXT;
break;
case "google-ai":
proxyMax = GOOGLE_AI_MAX_CONTEXT;
break;
case "mistral-ai":
case "mistral-text":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
return;
default:
@@ -44,15 +49,62 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
}
proxyMax ||= Number.MAX_SAFE_INTEGER;
if (req.user?.type === "special") {
req.log.debug("Special user, not enforcing proxy context limit.");
proxyMax = Number.MAX_SAFE_INTEGER;
}
let modelMax: number;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/gpt-4-1106(-preview)?/)) {
} else if (model.match(/^gpt-4o/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4.5/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4\.1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-5(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-chat-latest$/)) {
modelMax = 400000;
} else if (model.match(/^chatgpt-4o/)) {
modelMax = 128000;
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-(0125|1106)(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^o3-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o4-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000; // 200k context window for codex-mini-latest
} else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096;
modelMax = 16384;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
@@ -65,13 +117,45 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 100000;
} else if (model.match(/^claude-2/)) {
modelMax = 200000;
} else if (model.match(/^gemini-\d{3}$/)) {
modelMax = GOOGLE_AI_MAX_CONTEXT;
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
modelMax = MISTRAL_AI_MAX_CONTENT;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^gemini-/)) {
modelMax = 1024000;
} else if (model.match(/^anthropic\.claude-3/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
modelMax = 100000;
} else if (model.match(/^deepseek/)) {
modelMax = 64000;
} else if (model.match(/^kimi-k2/)) {
// Kimi K2 models have 131k context window
modelMax = 131000;
} else if (model.match(/moonshot/)) {
// Moonshot models typically have 200k context window
modelMax = 200000;
} else if (model.match(/command[\w-]*-03-202[0-9]/)) {
// Cohere's command-a-03 models have 256k context window
modelMax = 256000;
} else if (model.match(/command/) || model.match(/cohere/)) {
// Default for all other Cohere models
modelMax = 128000;
} else if (model.match(/^grok-4/)) {
modelMax = 256000;
} else if (model.match(/^grok/)) {
modelMax = 128000;
} else if (model.match(/^magistral/)) {
modelMax = 40000;
} else if (model.match(/tral/)) {
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
// no name convention and wildly different context windows so this is a
// catch-all
modelMax = MISTRAL_AI_MAX_CONTENT;
} else {
req.log.warn({ model }, "Unknown model, using 200k token limit.");
modelMax = 200000;
@@ -107,4 +191,4 @@ function assertRequestHasTokenCounts(
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
}
@@ -1,14 +1,16 @@
import { HPMRequestCallback } from "../index";
import { config } from "../../../../config";
import { ForbiddenError } from "../../../../shared/errors";
import { getModelFamilyForRequest } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
/**
* Ensures the selected model family is enabled by the proxy configuration.
**/
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
*/
export const validateModelFamily: RequestPreprocessor = (req) => {
const family = getModelFamilyForRequest(req);
if (!config.allowedModelFamilies.includes(family)) {
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
throw new ForbiddenError(
`Model family '${family}' is not enabled on this proxy`
);
}
};
@@ -0,0 +1,50 @@
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
import { containsImageContent as containsImageContentGoogleAI } from "../../../../shared/api-schemas/google-ai";
import { ForbiddenError } from "../../../../shared/errors";
/**
* Rejects prompts containing images if multimodal prompts are disabled.
*/
export const validateVision: RequestPreprocessor = async (req) => {
if (req.service === undefined) {
throw new Error("Request service must be set before validateVision");
}
if (req.user?.type === "special") return;
if (config.allowedVisionServices.includes(req.service)) return;
// vision not allowed for req's service, block prompts with images
let hasImage = false;
switch (req.outboundApi) {
case "openai":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "openai-responses":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "anthropic-chat":
hasImage = containsImageContentAnthropic(req.body.messages);
break;
case "google-ai":
hasImage = containsImageContentGoogleAI(req.body.contents);
break;
case "anthropic-text":
case "mistral-ai":
case "mistral-text":
case "openai-image":
case "openai-text":
return;
default:
assertNever(req.outboundApi);
}
if (hasImage) {
throw new ForbiddenError(
"Prompts containing images are not permitted. Disable 'Send Inline Images' in your client and try again."
);
}
};
@@ -0,0 +1,135 @@
import { Request, Response } from "express";
import http from "http";
import ProxyServer from "http-proxy";
import { Readable } from "stream";
import {
createProxyMiddleware,
Options,
debugProxyErrorsPlugin,
proxyEventsPlugin,
} from "http-proxy-middleware";
import { ProxyReqMutator, stripHeaders } from "./index";
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
import { createQueueMiddleware } from "../../queue";
import { getHttpAgents } from "../../../shared/network";
import { classifyErrorAndSend } from "../common";
/**
* Options for the `createQueuedProxyMiddleware` factory function.
*/
type ProxyMiddlewareFactoryOptions = {
/**
* Functions which receive a ProxyReqManager and can modify the request before
* it is proxied. The modifications will be automatically reverted if the
* request needs to be returned to the queue.
*/
mutations?: ProxyReqMutator[];
/**
* The target URL to proxy requests to. This can be a string or a function
* which accepts the request and returns a string.
*/
target: string | Options<Request>["router"];
/**
* A function which receives the proxy response and the JSON-decoded request
* body. Only fired for non-streaming responses; streaming responses are
* handled in `handle-streaming-response.ts`.
*/
blockingResponseHandler?: ProxyResHandlerWithBody;
};
/**
* Returns a middleware function that accepts incoming requests and places them
* into the request queue. When the request is dequeued, it is proxied to the
* target URL using the given options and middleware. Non-streaming responses
* are handled by the given `blockingResponseHandler`.
*/
export function createQueuedProxyMiddleware({
target,
mutations,
blockingResponseHandler,
}: ProxyMiddlewareFactoryOptions) {
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
const hpmRouter = typeof target === "function" ? target : undefined;
const [httpAgent, httpsAgent] = getHttpAgents();
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
const proxyMiddleware = createProxyMiddleware<Request, Response>({
target: hpmTarget,
router: hpmRouter,
agent,
changeOrigin: true,
toProxy: true,
selfHandleResponse: typeof blockingResponseHandler === "function",
// Disable HPM logger plugin (requires re-adding the other default plugins).
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
// fixes several error handling/connection close issues in http-proxy core.
ejectPlugins: true,
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
// the default plugins only allow http.IncomingMessage for TReq. They are
// compatible with express.Request, so we can use them. `Plugin` type is not
// exported for some reason.
plugins: [
debugProxyErrorsPlugin,
pinoLoggerPlugin,
proxyEventsPlugin,
] as any,
on: {
proxyRes: createOnProxyResHandler(
blockingResponseHandler ? [blockingResponseHandler] : []
),
error: classifyErrorAndSend,
},
buffer: ((req: Request) => {
// This is a hack/monkey patch and is not part of the official
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
let payload = req.body;
if (typeof payload === "string") {
payload = Buffer.from(payload);
}
const stream = new Readable();
stream.push(payload);
stream.push(null);
return stream;
}) as any,
});
return createQueueMiddleware({
mutations: [stripHeaders, ...(mutations ?? [])],
proxyMiddleware,
});
}
type ProxiedResponse = http.IncomingMessage & Response & any;
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
proxyServer.on("error", (err, req, res, target) => {
req.log.error(
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
"Error occurred while proxying request to target"
);
});
proxyServer.on("proxyReq", (proxyReq, req) => {
const { protocol, host, path } = proxyReq;
req.log.info(
{
from: req.originalUrl,
to: `${protocol}//${host}${path}`,
},
"Sending request to upstream API..."
);
});
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
const { protocol, host, path } = proxyRes.req;
req.log.info(
{
target: `${protocol}//${host}${path}`,
status: proxyRes.statusCode,
contentType: proxyRes.headers["content-type"],
contentEncoding: proxyRes.headers["content-encoding"],
contentLength: proxyRes.headers["content-length"],
transferEncoding: proxyRes.headers["transfer-encoding"],
},
"Got response from upstream API."
);
});
}
@@ -0,0 +1,112 @@
import { Request } from "express";
import { Key } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
/**
* Represents a change to the request that will be reverted if the request
* fails.
*/
interface ProxyReqMutation {
target: "header" | "path" | "body" | "api-key" | "signed-request";
key?: string;
originalValue: any | undefined;
}
/**
* Manages a request's headers, body, and path, allowing them to be modified
* before the request is proxied and automatically reverted if the request
* needs to be retried.
*/
export class ProxyReqManager {
private req: Request;
private mutations: ProxyReqMutation[] = [];
/**
* A read-only proxy of the request object. Avoid changing any properties
* here as they will persist across retries.
*/
public readonly request: Readonly<Request>;
constructor(req: Request) {
this.req = req;
this.request = new Proxy(req, {
get: (target, prop) => {
if (typeof prop === "string") return target[prop as keyof Request];
return undefined;
},
});
}
setHeader(name: string, newValue: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
this.req.headers[name.toLowerCase()] = newValue;
}
removeHeader(name: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
delete this.req.headers[name.toLowerCase()];
}
setBody(newBody: any): void {
const originalValue = this.req.body;
this.mutations.push({ target: "body", key: "body", originalValue });
this.req.body = newBody;
}
setKey(newKey: Key): void {
const originalValue = this.req.key;
this.mutations.push({ target: "api-key", key: "key", originalValue });
this.req.key = newKey;
}
setPath(newPath: string): void {
const originalValue = this.req.path;
this.mutations.push({ target: "path", key: "path", originalValue });
this.req.url = newPath;
}
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
const originalValue = this.req.signedRequest;
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
this.req.signedRequest = newSignedRequest;
}
hasChanged(): boolean {
return this.mutations.length > 0;
}
revert(): void {
for (const mutation of this.mutations.reverse()) {
switch (mutation.target) {
case "header":
if (mutation.originalValue === undefined) {
delete this.req.headers[mutation.key!.toLowerCase()];
continue;
} else {
this.req.headers[mutation.key!.toLowerCase()] =
mutation.originalValue;
}
break;
case "path":
this.req.url = mutation.originalValue;
break;
case "body":
this.req.body = mutation.originalValue;
break;
case "api-key":
// We don't reset the key here because it's not a property of the
// inbound request, so we'd only ever be reverting it to null.
break;
case "signed-request":
this.req.signedRequest = mutation.originalValue;
break;
default:
assertNever(mutation.target);
}
}
this.mutations = [];
}
}
@@ -0,0 +1,36 @@
import util from "util";
import zlib from "zlib";
import { PassThrough } from "stream";
const BUFFER_DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
text: (data: Buffer) => data,
};
const STREAM_DECODER_MAP = {
gzip: zlib.createGunzip,
deflate: zlib.createInflate,
br: zlib.createBrotliDecompress,
text: () => new PassThrough(),
};
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
const isSupportedContentEncoding = (
encoding: string
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
export function getStreamDecompressor(encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return STREAM_DECODER_MAP[encoding]();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
@@ -0,0 +1,429 @@
import express from "express";
import { APIFormat } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
import { initializeSseStream } from "../../../shared/streaming";
import http from "http";
/**
* Returns a Markdown-formatted message that renders semi-nicely in most chat
* frontends. For example:
*
* **Proxy error (HTTP 404 Not Found)**
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
* ***
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
* ```
* {
* "type": "error",
* "error": {
* "type": "not_found_error",
* "message": "model: some-invalid-model-id",
* },
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
* }
* ```
*/
function getMessageContent(params: {
title: string;
message: string;
obj?: Record<string, any>;
}) {
const { title, message, obj } = params;
const note = obj?.proxy_note || obj?.error?.message || "";
const header = `### **${title}**`;
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
const serializedObj = obj
? ["```", JSON.stringify(obj, null, 2), "```"].join("\n")
: "";
const { stack } = JSON.parse(JSON.stringify(obj ?? {}));
let prettyTrace = "";
if (stack && obj) {
prettyTrace = [
"Include this trace when reporting an issue.",
"```",
stack,
"```",
].join("\n");
delete obj.stack;
}
return [
header,
friendlyMessage,
serializedObj,
prettyTrace,
"<!-- oai-proxy-error -->",
].join("\n\n");
}
type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: Record<string, any>;
reqId: string | number | object;
model?: string;
statusCode?: number;
};
/**
* Very crude inference of the request format based on the request body. Don't
* rely on this to be very accurate.
*/
function tryInferFormat(body: any): APIFormat | "unknown" {
if (typeof body !== "object" || !body.model) {
return "unknown";
}
if (body.model.includes("gpt")) {
return "openai";
}
if (body.model.includes("mistral")) {
return "mistral-ai";
}
if (body.model.includes("claude")) {
return body.messages?.length ? "anthropic-chat" : "anthropic-text";
}
if (body.model.includes("gemini")) {
return "google-ai";
}
return "unknown";
}
/**
* Redacts the hostname from the error message if it contains a DNS resolution
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
* as those may contain sensitive information about the proxy's configuration.
*/
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
if (!options.message.includes("getaddrinfo")) return options;
const redacted = { ...options };
redacted.message = "Could not resolve hostname";
if (typeof redacted.obj?.error === "object") {
redacted.obj = {
...redacted.obj,
error: { message: "Could not resolve hostname" },
};
}
return redacted;
}
/**
* Generates an appropriately-formatted error response and sends it to the
* client over their requested transport (blocking or SSE stream).
*/
export function sendErrorToClient(params: {
options: ErrorGeneratorOptions;
req: express.Request;
res: express.Response;
}) {
const { req, res } = params;
const options = redactHostname(params.options);
const { statusCode, message, title, obj: details } = options;
// Since we want to send the error in a format the client understands, we
// need to know the request format. `setApiFormat` might not have been called
// yet, so we'll try to infer it from the request body.
const format =
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
if (format === "unknown") {
// Early middleware error (auth, rate limit) so we can only send something
// generic.
const code = statusCode || 400;
const hasDetails = details && Object.keys(details).length > 0;
return res.status(code).json({
error: {
message,
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
},
...(hasDetails ? { details } : {}),
});
}
// Cannot modify headers if client opted into streaming and made it into the
// proxy request queue, because that immediately starts an SSE stream.
if (!res.headersSent) {
res.setHeader("x-oai-proxy-error", title);
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
}
// By this point, we know the request format. To get the error to display in
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
// from the language model. Depending on whether the client is streaming, we
// will either send an SSE event or a JSON response.
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
if (isStreaming) {
// User can have opted into streaming but not made it into the queue yet,
// in which case the stream must be started first.
if (!res.headersSent) {
initializeSseStream(res);
}
res.write(buildSpoofedSSE({ ...options, format }));
res.write(`data: [DONE]\n\n`);
res.end();
} else {
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
}
}
/**
* Returns a non-streaming completion object that looks like it came from the
* service that the request is being proxied to. Used to send error messages to
* the client and have them look like normal responses, for clients with poor
* error handling.
*/
export function buildSpoofedCompletion({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
switch (format) {
case "openai":
case "openai-responses":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "mistral-ai":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "mistral-text":
return {
outputs: [{ text: content, stop_reason: title }],
model,
};
case "openai-text":
return {
id: "error-" + id,
object: "text_completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
};
case "anthropic-text":
return {
id: "error-" + id,
type: "completion",
completion: content,
stop_reason: title,
stop: null,
model,
};
case "anthropic-chat":
return {
id: "error-" + id,
type: "message",
role: "assistant",
content: [{ type: "text", text: content }],
model,
stop_reason: title,
stop_sequence: null,
};
case "google-ai":
return {
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
case "openai-image":
return obj;
default:
assertNever(format);
}
}
/**
* Returns an SSE message that looks like a completion event for the service
* that the request is being proxied to. Used to send error messages to the
* client in the middle of a streaming request.
*/
export function buildSpoofedSSE({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
let event;
switch (format) {
case "openai":
case "openai-responses":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-text":
event = {
outputs: [{ text: content, stop_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
model,
};
break;
case "anthropic-text":
event = {
completion: content,
stop_reason: title,
truncated: false,
stop: null,
model,
log_id: "proxy-req-" + id,
};
break;
case "anthropic-chat":
event = {
type: "content_block_delta",
index: 0,
delta: { type: "text_delta", text: content },
};
break;
case "google-ai":
// TODO: google ai supports two streaming transports, SSE and JSON.
// we currently only support SSE.
// return JSON.stringify({
event = {
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
break;
case "openai-image":
return JSON.stringify(obj);
default:
assertNever(format);
}
if (format === "anthropic-text") {
return (
["event: completion", `data: ${JSON.stringify(event)}`].join("\n") +
"\n\n"
);
}
// ugh.
if (format === "anthropic-chat") {
return (
[
[
"event: message_start",
`data: ${JSON.stringify({
type: "message_start",
message: {
id: "error-" + id,
type: "message",
role: "assistant",
content: [],
model,
},
})}`,
].join("\n"),
[
"event: content_block_start",
`data: ${JSON.stringify({
type: "content_block_start",
index: 0,
content_block: { type: "text", text: "" },
})}`,
].join("\n"),
["event: content_block_delta", `data: ${JSON.stringify(event)}`].join(
"\n"
),
[
"event: content_block_stop",
`data: ${JSON.stringify({ type: "content_block_stop", index: 0 })}`,
].join("\n"),
[
"event: message_delta",
`data: ${JSON.stringify({
type: "message_delta",
delta: { stop_reason: title, stop_sequence: null, usage: null },
})}`,
],
[
"event: message_stop",
`data: ${JSON.stringify({ type: "message_stop" })}`,
].join("\n"),
].join("\n\n") + "\n\n"
);
}
return `data: ${JSON.stringify(event)}\n\n`;
}
@@ -0,0 +1,70 @@
import { sendProxyError } from "../common";
import type { RawResponseBodyHandler } from "./index";
import { decompressBuffer } from "./compression";
/**
* Handles the response from the upstream service and decodes the body if
* necessary. If the response is JSON, it will be parsed and returned as an
* object. Otherwise, it will be returned as a string. Does not handle streaming
* responses.
* @throws {Error} Unsupported content-encoding or invalid application/json body
*/
export const handleBlockingResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
if (req.isStreaming) {
const err = new Error(
"handleBlockingResponse called for a streaming request."
);
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
}
return new Promise((resolve, reject) => {
let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => {
const contentEncoding = proxyRes.headers["content-encoding"];
const contentType = proxyRes.headers["content-type"];
let body: string | Buffer = Buffer.concat(chunks);
const rejectWithMessage = function (msg: string, err: Error) {
const error = `${msg} (${err.message})`;
req.log.warn(
{ msg: error, stack: err.stack },
"Error in blocking response handler"
);
sendProxyError(req, res, 500, "Internal Server Error", { error });
return reject(error);
};
try {
body = await decompressBuffer(body, contentEncoding);
} catch (e) {
return rejectWithMessage(`Could not decode response body`, e);
}
try {
return resolve(tryParseAsJson(body, contentType));
} catch (e) {
return rejectWithMessage("API responded with invalid JSON", e);
}
});
});
};
function tryParseAsJson(body: string, contentType?: string) {
// If the response is declared as JSON, it must parse or we will throw
if (contentType?.includes("application/json")) {
return JSON.parse(body);
}
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
// anyway since some APIs return the wrong content-type header in some cases.
// If it fails to parse, we'll just return the raw body without throwing.
try {
return JSON.parse(body);
} catch (e) {
return body;
}
}
@@ -1,67 +1,95 @@
import { pipeline } from "stream";
import express from "express";
import { pipeline, Readable, Transform } from "stream";
import { StringDecoder } from "string_decoder";
import { promisify } from "util";
import type { logger } from "../../../logger";
import { BadRequestError, RetryableError } from "../../../shared/errors";
import { APIFormat, keyPool } from "../../../shared/key-management";
import {
makeCompletionSSE,
copySseResponseHeaders,
initializeSseStream,
} from "../../../shared/streaming";
import { enqueue } from "../../queue";
import { decodeResponseBody, RawResponseBodyHandler, RetryableError } from ".";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { reenqueueRequest } from "../../queue";
import type { RawResponseBodyHandler } from ".";
import { handleBlockingResponse } from "./handle-blocking-response";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator";
import { keyPool } from "../../../shared/key-management";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { getStreamDecompressor } from "./compression";
const pipelineAsync = promisify(pipeline);
/**
* Consume the SSE stream and forward events to the client. Once the stream is
* stream is closed, resolve with the full response body so that subsequent
* middleware can work with it.
* `handleStreamedResponse` consumes a streamed response from the upstream API,
* decodes chunk-by-chunk into a stream of events, transforms those events into
* the client's requested format, and forwards the result to the client.
*
* Typically we would only need of the raw response handlers to execute, but
* in the event a streamed request results in a non-200 response, we need to
* fall back to the non-streaming response handler so that the error handler
* can inspect the error response.
* After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response (to count output tokens, track usage, etc).
*
* In the event of an error, the request's streaming flag is unset and the
* request is bounced back to the non-streaming response handler. If the error
* is retryable, that handler will re-enqueue the request and also reset the
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
* places, so it's hard to keep track of.
*/
export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
const { hash } = req.key!;
const { headers, statusCode } = proxyRes;
if (!req.isStreaming) {
throw new Error("handleStreamedResponse called for non-streaming request.");
}
if (proxyRes.statusCode! > 201) {
if (statusCode! > 201) {
req.isStreaming = false;
req.log.warn(
{ statusCode: proxyRes.statusCode, key: hash },
{ statusCode },
`Streaming request returned error status code. Falling back to non-streaming response handler.`
);
return decodeResponseBody(proxyRes, req, res);
return handleBlockingResponse(proxyRes, req, res);
}
req.log.debug(
{ headers: proxyRes.headers, key: hash },
`Starting to proxy SSE stream.`
);
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
// Users waiting in the queue already have a SSE connection open for the
// heartbeat, so we can't always send the stream headers.
// Typically, streaming will have already been initialized by the request
// queue to send heartbeat pings.
if (!res.headersSent) {
copySseResponseHeaders(proxyRes, res);
initializeSseStream(res);
}
const prefersNativeEvents = req.inboundApi === req.outboundApi;
const contentType = proxyRes.headers["content-type"];
const streamOptions = {
contentType: headers["content-type"],
api: req.outboundApi,
logger: req.log,
};
const adapter = new SSEStreamAdapter({ contentType, api: req.outboundApi });
const aggregator = new EventAggregator({ format: req.outboundApi });
// While the request is streaming, aggregator collects all events so that we
// can compile them into a single response object and publish that to the
// remaining middleware. Because we have an OpenAI transformer for every
// supported format, EventAggregator always consumes OpenAI events so that we
// only have to write one aggregator (OpenAI input) for each output format.
const aggregator = new EventAggregator(req);
const decompressor = getStreamDecompressor(headers["content-encoding"]);
// Decoder reads from the response bytes to produce a stream of plaintext.
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// Adapter consumes the decoded text and produces server-sent events so we
// have a standard event format for the client and to translate between API
// message formats.
const adapter = new SSEStreamAdapter(streamOptions);
// Transformer converts server-sent events from one vendor's API message
// format to another.
const transformer = new SSEMessageTransformer({
inputFormat: req.outboundApi,
inputFormat: req.outboundApi, // The format of the upstream service's events
outputFormat: req.inboundApi, // The format the client requested
inputApiVersion: String(req.headers["anthropic-version"]),
logger: req.log,
requestId: String(req.id),
@@ -76,23 +104,33 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
});
try {
await pipelineAsync(proxyRes, adapter, transformer);
req.log.debug({ key: hash }, `Finished proxying SSE stream.`);
await Promise.race([
handleAbortedStream(req, res),
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
]);
req.log.debug(`Finished proxying SSE stream.`);
res.end();
return aggregator.getFinalResponse();
} catch (err) {
if (err instanceof RetryableError) {
keyPool.markRateLimited(req.key!);
req.log.warn(
{ key: req.key!.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error during streaming response.`
);
req.retryCount++;
await enqueue(req);
await reenqueueRequest(req);
} else if (err instanceof BadRequestError) {
sendErrorToClient({
req,
res,
options: {
format: req.inboundApi,
title: "Proxy streaming error (Bad Request)",
message: `The API returned an error while streaming your request. Your prompt might not be formatted correctly.\n\n*${err.message}*`,
reqId: req.id,
model: req.body?.model,
},
});
} else {
const { message, stack, lastEvent } = err;
const eventText = JSON.stringify(lastEvent, null, 2) ?? "undefined"
const errorEvent = makeCompletionSSE({
const eventText = JSON.stringify(lastEvent, null, 2) ?? "undefined";
const errorEvent = buildSpoofedSSE({
format: req.inboundApi,
title: "Proxy stream error",
message: "An unexpected error occurred while streaming the response.",
@@ -104,6 +142,53 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
res.write(`data: [DONE]\n\n`);
res.end();
}
throw err;
// At this point the response is closed. If the request resulted in any
// tokens being consumed (suggesting a mid-stream error), we will resolve
// and continue the middleware chain so tokens can be counted.
if (aggregator.hasEvents()) {
return aggregator.getFinalResponse();
} else {
// If there is nothing, then this was a completely failed prompt that
// will not have billed any tokens. Throw to stop the middleware chain.
throw err;
}
}
};
function handleAbortedStream(req: express.Request, res: express.Response) {
return new Promise<void>((resolve) =>
res.on("close", () => {
if (!res.writableEnded) {
req.log.info("Client prematurely closed connection during stream.");
}
resolve();
})
);
}
function getDecoder(options: {
input: Readable;
api: APIFormat;
logger: typeof logger;
contentType?: string;
}) {
const { contentType, input, logger } = options;
if (contentType?.includes("application/vnd.amazon.eventstream")) {
return getAwsEventStreamDecoder({ input, logger });
} else if (contentType?.includes("application/json")) {
throw new Error("JSON streaming not supported, request SSE instead");
} else {
// Ensures split chunks across multi-byte characters are handled correctly.
const stringDecoder = new StringDecoder("utf8");
return new Transform({
readableObjectMode: true,
writableObjectMode: false,
transform(chunk, _encoding, callback) {
const text = stringDecoder.write(chunk);
if (text) this.push(text);
callback();
},
});
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,81 @@
import { createHash } from "crypto";
import { config } from "../../../config";
import { eventLogger } from "../../../shared/prompt-logging";
import { getModelFromBody, isTextGenerationRequest } from "../common";
import { ProxyResHandlerWithBody } from ".";
import {
OpenAIChatMessage,
AnthropicChatMessage,
} from "../../../shared/api-schemas";
/** If event logging is enabled, logs a chat completion event. */
export const logEvent: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
responseBody
) => {
if (!config.eventLogging) {
return;
}
if (typeof responseBody !== "object") {
throw new Error("Expected body to be an object");
}
if (!["openai", "anthropic-chat"].includes(req.outboundApi)) {
// only chat apis are supported
return;
}
if (!req.user) {
return;
}
const loggable = isTextGenerationRequest(req);
if (!loggable) return;
const messages = req.body.messages as
| OpenAIChatMessage[]
| AnthropicChatMessage[];
let hashes = [];
hashes.push(hashMessages(messages));
for (
let i = 1;
i <= Math.min(config.eventLoggingTrim!, messages.length);
i++
) {
hashes.push(hashMessages(messages.slice(0, -i)));
}
const model = getModelFromBody(req, responseBody);
const userToken = req.user!.token;
const family = req.modelFamily!;
eventLogger.logEvent({
ip: req.ip,
type: "chat_completion",
model,
family,
hashes,
userToken,
inputTokens: req.promptTokens ?? 0,
outputTokens: req.outputTokens ?? 0,
});
};
const hashMessages = (
messages: OpenAIChatMessage[] | AnthropicChatMessage[]
): string => {
let hasher = createHash("sha256");
let messageTexts = [];
for (const msg of messages) {
if (!["system", "user", "assistant"].includes(msg.role)) continue;
if (typeof msg.content === "string") {
messageTexts.push(msg.content);
} else if (Array.isArray(msg.content)) {
if (msg.content[0].type === "text") {
messageTexts.push(msg.content[0].text);
}
}
}
hasher.update(messageTexts.join("<|im_sep|>"));
return hasher.digest("hex");
};
+62 -6
View File
@@ -10,9 +10,12 @@ import {
import { ProxyResHandlerWithBody } from ".";
import { assertNever } from "../../../shared/utils";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../request/preprocessors/transform-outbound-payload";
} from "../../../shared/api-schemas";
/** If prompt logging is enabled, enqueues the prompt for logging. */
export const logPrompt: ProxyResHandlerWithBody = async (
@@ -57,15 +60,33 @@ type OaiImageResult = {
const getPromptForRequest = (
req: Request,
responseBody: Record<string, any>
): string | OpenAIChatMessage[] | MistralAIChatMessage[] | OaiImageResult => {
):
| string
| OpenAIChatMessage[]
| { contents: GoogleAIChatMessage[] }
| { system: string; messages: AnthropicChatMessage[] }
| MistralAIChatMessage[]
| OaiImageResult => {
// Since the prompt logger only runs after the request has been proxied, we
// can assume the body has already been transformed to the target API's
// format.
switch (req.outboundApi) {
case "openai":
case "openai-responses":
return req.body.messages;
case "mistral-ai":
return req.body.messages;
case "anthropic-chat":
let system = req.body.system;
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
}
return { system, messages: req.body.messages };
case "openai-text":
case "anthropic-text":
case "mistral-text":
return req.body.prompt;
case "openai-image":
return {
@@ -75,21 +96,37 @@ const getPromptForRequest = (
quality: req.body.quality,
revisedPrompt: responseBody.data[0].revised_prompt,
};
case "anthropic":
return req.body.prompt;
case "google-ai":
return req.body.prompt.text;
return { contents: req.body.contents };
default:
assertNever(req.outboundApi);
}
};
const flattenMessages = (
val: string | OpenAIChatMessage[] | MistralAIChatMessage[] | OaiImageResult
val:
| string
| OaiImageResult
| OpenAIChatMessage[]
| { contents: GoogleAIChatMessage[] }
| { system: string; messages: AnthropicChatMessage[] }
| MistralAIChatMessage[]
): string => {
if (typeof val === "string") {
return val.trim();
}
if (isAnthropicChatPrompt(val)) {
const { system, messages } = val;
return `System: ${system}\n\n${flattenAnthropicMessages(messages)}`;
}
if (isGoogleAIChatPrompt(val)) {
return val.contents
.map(({ parts, role }) => {
const text = parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text).join("\n");
return `${role}: ${text}`;
})
.join("\n");
}
if (Array.isArray(val)) {
return val
.map(({ content, role }) => {
@@ -98,6 +135,8 @@ const flattenMessages = (
.map((c) => {
if ("text" in c) return c.text;
if ("image_url" in c) return "(( Attached Image ))";
if ("source" in c) return "(( Attached Image ))";
return "(( Unsupported Content ))";
})
.join("\n")
: content;
@@ -107,3 +146,20 @@ const flattenMessages = (
}
return val.prompt.trim();
};
function isGoogleAIChatPrompt(
val: unknown
): val is { contents: GoogleAIChatMessage[] } {
return typeof val === "object" && val !== null && "contents" in val;
}
function isAnthropicChatPrompt(
val: unknown
): val is { system: string; messages: AnthropicChatMessage[] } {
return (
typeof val === "object" &&
val !== null &&
"system" in val &&
"messages" in val
);
}
+11 -5
View File
@@ -1,11 +1,14 @@
import { ProxyResHandlerWithBody } from "./index";
import { mirrorGeneratedImage, OpenAIImageGenerationResult } from "../../../shared/file-storage/mirror-generated-image";
import {
mirrorGeneratedImage,
OpenAIImageGenerationResult,
} from "../../../shared/file-storage/mirror-generated-image";
export const saveImage: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
body,
body
) => {
if (req.outboundApi !== "openai-image") {
return;
@@ -16,12 +19,15 @@ export const saveImage: ProxyResHandlerWithBody = async (
}
if (body.data) {
const baseUrl = req.protocol + "://" + req.get("host");
const prompt = body.data[0].revised_prompt ?? req.body.prompt;
await mirrorGeneratedImage(
baseUrl,
const res = await mirrorGeneratedImage(
req,
prompt,
body as OpenAIImageGenerationResult
);
req.log.info(
{ urls: res.data.map((item) => item.url) },
"Saved generated image to user_content"
);
}
};
@@ -0,0 +1,49 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicChatCompletionResponse = {
id: string;
type: "message";
role: "assistant";
content: { type: "text"; text: string }[];
model: string;
stop_reason: string | null;
stop_sequence: string | null;
usage: { input_tokens: number; output_tokens: number };
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Anthropic chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForAnthropicChat(
events: OpenAIChatCompletionStreamEvent[]
): AnthropicChatCompletionResponse {
let merged: AnthropicChatCompletionResponse = {
id: "",
type: "message",
role: "assistant",
content: [],
model: "",
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 },
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.id = event.id;
acc.model = event.model;
acc.content = [{ type: "text", text: "" }];
return acc;
}
acc.stop_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.content[0].text += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -1,6 +1,6 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicCompletionResponse = {
export type AnthropicTextCompletionResponse = {
completion: string;
stop_reason: string;
truncated: boolean;
@@ -15,10 +15,10 @@ export type AnthropicCompletionResponse = {
* finalized Anthropic completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForAnthropic(
export function mergeEventsForAnthropicText(
events: OpenAIChatCompletionStreamEvent[]
): AnthropicCompletionResponse {
let merged: AnthropicCompletionResponse = {
): AnthropicTextCompletionResponse {
let merged: AnthropicTextCompletionResponse = {
log_id: "",
exception: null,
model: "",
@@ -0,0 +1,39 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralChatCompletionResponse = {
choices: {
index: number;
message: { role: string; content: string };
finish_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralChat(
events: OpenAIChatCompletionStreamEvent[]
): MistralChatCompletionResponse {
let merged: MistralChatCompletionResponse = {
choices: [
{ index: 0, message: { role: "", content: "" }, finish_reason: "" },
],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.choices[0].message.role = event.choices[0].delta.role ?? "assistant";
return acc;
}
acc.choices[0].finish_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.choices[0].message.content += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -0,0 +1,33 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralTextCompletionResponse = {
outputs: {
text: string;
stop_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral text completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralText(
events: OpenAIChatCompletionStreamEvent[]
): MistralTextCompletionResponse {
let merged: MistralTextCompletionResponse = {
outputs: [{ text: "", stop_reason: "" }],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
return acc;
}
acc.outputs[0].text += event.choices[0].delta.content ?? "";
acc.outputs[0].stop_reason = event.choices[0].finish_reason ?? "";
return acc;
}, merged);
return merged;
}
@@ -0,0 +1,93 @@
import pino from "pino";
import { Duplex, Readable } from "stream";
import { EventStreamMarshaller } from "@smithy/eventstream-serde-node";
import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
import { Message } from "@smithy/eventstream-codec";
/**
* Decodes a Readable stream, such as a proxied HTTP response, into a stream of
* Message objects using the AWS SDK's EventStreamMarshaller. Error events in
* the amazon eventstream protocol are decoded as Message objects and will not
* emit an error event on the decoder stream.
*/
export function getAwsEventStreamDecoder(params: {
input: Readable;
logger: pino.Logger;
}): Duplex {
const { input, logger } = params;
const config = { utf8Encoder: toUtf8, utf8Decoder: fromUtf8 };
const eventStream = new EventStreamMarshaller(config).deserialize(
input,
async (input: Record<string, Message>) => {
const eventType = Object.keys(input)[0];
let result;
if (eventType === "chunk") {
result = input[eventType];
} else {
// AWS unmarshaller treats non-chunk events (errors and exceptions) oddly.
result = { [eventType]: input[eventType] } as any;
}
return result;
}
);
return new AWSEventStreamDecoder(eventStream, { logger });
}
class AWSEventStreamDecoder extends Duplex {
private readonly asyncIterable: AsyncIterable<Message>;
private iterator: AsyncIterator<Message>;
private reading: boolean;
private logger: pino.Logger;
constructor(
asyncIterable: AsyncIterable<Message>,
options: { logger: pino.Logger }
) {
super({ ...options, objectMode: true });
this.asyncIterable = asyncIterable;
this.iterator = this.asyncIterable[Symbol.asyncIterator]();
this.reading = false;
this.logger = options.logger.child({ module: "aws-eventstream-decoder" });
}
async _read(_size: number) {
if (this.reading) return;
this.reading = true;
try {
while (true) {
const { value, done } = await this.iterator.next();
if (done) {
this.push(null);
break;
}
if (!this.push(value)) break;
}
} catch (err) {
// AWS SDK's EventStreamMarshaller emits errors in the stream itself as
// whatever our deserializer returns, which will not be Error objects
// because we want to pass the Message to the next stream for processing.
// Any actual Error thrown here is some failure during deserialization.
const isAwsError = !(err instanceof Error);
if (isAwsError) {
this.logger.warn({ err: err.headers }, "Received AWS error event");
this.push(err);
this.push(null);
} else {
this.logger.error(err, "Error during AWS stream deserialization");
this.destroy(err);
}
} finally {
this.reading = false;
}
}
_write(_chunk: any, _encoding: string, callback: () => void) {
callback();
}
_final(callback: () => void) {
callback();
}
}
@@ -1,10 +1,19 @@
import express from "express";
import { APIFormat } from "../../../../shared/key-management";
import { assertNever } from "../../../../shared/utils";
import {
mergeEventsForAnthropic,
anthropicV2ToOpenAI,
mergeEventsForAnthropicChat,
mergeEventsForAnthropicText,
mergeEventsForOpenAIChat,
mergeEventsForOpenAIText,
mergeEventsForMistralChat,
mergeEventsForMistralText,
AnthropicV2StreamEvent,
OpenAIChatCompletionStreamEvent,
mistralAIToOpenAI,
MistralAIStreamEvent,
MistralChatCompletionEvent,
} from "./index";
/**
@@ -12,32 +21,112 @@ import {
* compiles them into a single finalized response for downstream middleware.
*/
export class EventAggregator {
private readonly format: APIFormat;
private readonly model: string;
private readonly requestFormat: APIFormat;
private readonly responseFormat: APIFormat;
private readonly events: OpenAIChatCompletionStreamEvent[];
constructor({ format }: { format: APIFormat }) {
constructor({ body, inboundApi, outboundApi }: express.Request) {
this.events = [];
this.format = format;
this.requestFormat = inboundApi;
this.responseFormat = outboundApi;
this.model = body.model;
}
addEvent(event: OpenAIChatCompletionStreamEvent) {
this.events.push(event);
addEvent(
event:
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralAIStreamEvent
) {
if (eventIsOpenAIEvent(event)) {
this.events.push(event);
} else {
// horrible special case. previously all transformers' target format was
// openai, so the event aggregator could conveniently assume all incoming
// events were in openai format.
// now we have added some transformers that convert between non-openai
// formats, so aggregator needs to know how to collapse for more than
// just openai.
// because writing aggregation logic for every possible output format is
// annoying, we will just transform any non-openai output events to openai
// format (even if the client did not request openai at all) so that we
// still only need to write aggregators for openai SSEs.
let openAIEvent: OpenAIChatCompletionStreamEvent | undefined;
switch (this.requestFormat) {
case "anthropic-text":
assertIsAnthropicV2Event(event);
openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "fallback-" + Date.now(),
fallbackModel: event.model || this.model || "fallback-claude-3",
})?.event;
break;
case "mistral-ai":
assertIsMistralChatEvent(event);
openAIEvent = mistralAIToOpenAI({
data: `data: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: "fallback-" + Date.now(),
fallbackModel: this.model || "fallback-mistral",
})?.event;
break;
}
if (openAIEvent) {
this.events.push(openAIEvent);
}
}
}
getFinalResponse() {
switch (this.format) {
switch (this.responseFormat) {
case "openai":
case "openai-responses":
case "google-ai":
case "mistral-ai":
return mergeEventsForOpenAIChat(this.events);
case "openai-text":
return mergeEventsForOpenAIText(this.events);
case "anthropic":
return mergeEventsForAnthropic(this.events);
case "anthropic-text":
return mergeEventsForAnthropicText(this.events);
case "anthropic-chat":
return mergeEventsForAnthropicChat(this.events);
case "mistral-ai":
return mergeEventsForMistralChat(this.events);
case "mistral-text":
return mergeEventsForMistralText(this.events);
case "openai-image":
throw new Error(`SSE aggregation not supported for ${this.format}`);
throw new Error(
`SSE aggregation not supported for ${this.responseFormat}`
);
default:
assertNever(this.format);
assertNever(this.responseFormat);
}
}
hasEvents() {
return this.events.length > 0;
}
}
function eventIsOpenAIEvent(
event: any
): event is OpenAIChatCompletionStreamEvent {
return event?.object === "chat.completion.chunk";
}
function assertIsAnthropicV2Event(event: any): asserts event is AnthropicV2StreamEvent {
if (!event?.completion) {
throw new Error(`Bad event for Anthropic V2 SSE aggregation`);
}
}
function assertIsMistralChatEvent(
event: any
): asserts event is MistralChatCompletionEvent {
if (!event?.choices) {
throw new Error(`Bad event for Mistral SSE aggregation`);
}
}
@@ -1,9 +1,36 @@
export type SSEResponseTransformArgs = {
export type SSEResponseTransformArgs<S = Record<string, any>> = {
data: string;
lastPosition: number;
index: number;
fallbackId: string;
fallbackModel: string;
state?: S;
};
export type MistralChatCompletionEvent = {
choices: {
index: number;
message: { role: string; content: string };
stop_reason: string | null;
}[];
};
export type MistralTextCompletionEvent = {
outputs: { text: string; stop_reason: string | null }[];
};
export type MistralAIStreamEvent = {
"amazon-bedrock-invocationMetrics"?: {
inputTokenCount: number;
outputTokenCount: number;
invocationLatency: number;
firstByteLatency: number;
};
} & (MistralChatCompletionEvent | MistralTextCompletionEvent);
export type AnthropicV2StreamEvent = {
log_id?: string;
model?: string;
completion: string;
stop_reason: string | null;
};
export type OpenAIChatCompletionStreamEvent = {
@@ -16,17 +43,29 @@ export type OpenAIChatCompletionStreamEvent = {
delta: { role?: string; content?: string };
finish_reason: string | null;
}[];
}
};
export type StreamingCompletionTransformer = (
params: SSEResponseTransformArgs
) => { position: number; event?: OpenAIChatCompletionStreamEvent };
export type StreamingCompletionTransformer<
T = OpenAIChatCompletionStreamEvent,
S = any,
> = (params: SSEResponseTransformArgs<S>) => {
position: number;
event?: T;
state?: S;
};
export { openAITextToOpenAIChat } from "./transformers/openai-text-to-openai";
export { anthropicV1ToOpenAI } from "./transformers/anthropic-v1-to-openai";
export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
export { mistralAIToOpenAI } from "./transformers/mistral-ai-to-openai";
export { mistralTextToMistralChat } from "./transformers/mistral-text-to-mistral-chat";
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
export { mergeEventsForAnthropic } from "./aggregators/anthropic";
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
export { mergeEventsForMistralChat } from "./aggregators/mistral-chat";
export { mergeEventsForMistralText } from "./aggregators/mistral-text";

Some files were not shown because too many files have changed in this diff Show More