317 Commits

Author SHA1 Message Date
penurin 2fbe0bff0d Check for o1 instead of o1-preview for the o1 family 2024-12-27 17:17:39 +00:00
nai-degen 36e2430a8f adjusts gemini keychecker to trigger real generation for better rate limit detection 2024-12-07 01:31:10 -06:00
nai-degen 28447d0811 resolves server-side error when a Gemini prompt is blocked due to safety 2024-12-07 00:54:13 -06:00
nai-degen 6d54cbc785 maybe handles gemini daily key block idk 2024-12-05 15:06:29 -06:00
nai-degen 9d7a4f4b51 maybe fixes gemini's fucked error messages idk 2024-12-05 14:54:35 -06:00
nai-degen 3496a2a9bd fixes incorrect 3.5 sonnet v2 model id in gcp model reassignment 2024-12-03 19:53:56 -06:00
nai-degen 5072638ec2 attempt at fixing persistent 'invalid csrf token' error on some browsers 2024-12-03 19:49:47 -06:00
Nopm 8a325a1e0b Add 2 million Google AI context (khanon/oai-reverse-proxy!82) 2024-12-04 01:27:21 +00:00
khanon 5eeb2875b4 adds haiku 3.5 2024-11-04 18:20:19 +00:00
khanon c67dad1617 fixes AWS claude v2 2024-10-25 18:10:26 +00:00
khanon fe61745e24 fixes issue with AWS model assignment when requesting legacy claude (claude-2.1, etc) 2024-10-25 17:49:24 +00:00
nai-degen 251ea6d412 fixes typo in AWS ThrottlingException 2024-10-23 15:36:21 -05:00
nai-degen 55f7337ea4 adjusts AWS keychecker to treat rate limited models as available models 2024-10-23 15:35:32 -05:00
nai-degen f3b876887e fixes issue with AWS model name reassignment 2024-10-23 13:59:10 -05:00
nai-degen 49c578f4dc adds Sonnet 3.5v2 AWS model ID and adjusts AWS model assignment to raise error on no match 2024-10-23 13:39:34 -05:00
khanon 4190d5fef6 fixes missing comma....... 2024-10-22 16:06:39 +00:00
khanon 1644e82f25 adds Sonnet 20241022 snapshot IDs 2024-10-22 15:52:52 +00:00
nai-degen 0bbdc0b841 fixes google ai language filter and updates readme 2024-10-18 23:15:02 -05:00
nai-degen c4a633a5d6 fixes gcp oauth2 token refresh not updating cloned key instance 2024-10-18 22:41:24 -05:00
nai-degen 0c6ec3254f finally DOES something about broken GCP streaming, boebeitfully 2024-10-12 20:10:59 -05:00
nai-degen 13aa55cd3d handles gemini ai test message from sillytavern 2024-10-12 09:01:08 -05:00
nai-degen ba4532b38d more fixes for annoying gemini API design that allows arrays or single objects for contents parts 2024-10-09 17:11:53 -05:00
nai-degen b57627e69b adds stripHeaders to global mutators in createQueuedProxyMiddleware 2024-10-09 16:59:12 -05:00
nai-degen 536803853a uses removeHeader instead of setHeader to empty string 2024-10-09 16:44:53 -05:00
nai-degen ad0a3c0936 removes cors/sec-fetch headers to fix venus chub with anthropic api 2024-10-09 16:33:54 -05:00
nai-degen 161f5aba3e handles sillytavern using both camel and snake-cased parameters for gemini api 2024-10-06 11:03:48 -05:00
nai-degen 514d1b7e31 fixes azure 2024-10-01 16:15:04 -05:00
nai-degen 22d7f966c6 fixes for gemini api streaming 2024-09-29 12:44:18 -05:00
nai-degen cfb6353c65 updates google ai safety settings schema 2024-09-25 21:19:17 -05:00
nai-degen a7fed3136e fixes google ai gemini 2024-09-25 15:58:52 -05:00
nai-degen 29638cf26e minor cleanup to pow challenge 2024-09-22 11:28:33 -05:00
nai-degen ee26e7be65 various improvements and fixes to PoW challenge UI and token refresh 2024-09-22 11:11:30 -05:00
nai-degen ff0d3dfdcd prevents overwriting anthropic-version header if it's already provided 2024-09-19 00:55:17 -05:00
nai-degen 81a3ae1746 maybe fixes missing anthropic version header in some cases 2024-09-19 00:50:17 -05:00
nai-degen 4dfd57fcb4 updates render dockerfile to correctly copy patches dir into build context 2024-09-16 23:39:43 -05:00
khanon d21e274358 Add configurable network interface or SOCKS/HTTP proxy for outgoing requests (khanon/oai-reverse-proxy!80) 2024-09-16 15:17:57 +00:00
nai-degen 6e97e036b2 fixes refreshed PoW tokens not actually being reactivated 2024-09-15 18:01:23 -05:00
nai-degen 7a4a16dd2f fixes chatgpt-latest missing from models endpoint 2024-09-15 06:02:35 -05:00
nai-degen f1cfa644c5 maybe fixes openai sk-svcacct keys 2024-09-13 00:55:29 -05:00
nai-degen 6a908b09cb adds preliminary openai o1 support and some improvements to openai keychecker 2024-09-12 23:03:33 -05:00
nai-degen 86772ab32a adds 503 as a 'successful' AWS keychecker response to deal with temporary outages 2024-09-11 02:42:59 -05:00
honeytree bd87ca60f7 Implement priority queue by tokens (khanon/oai-reverse-proxy!79) 2024-09-09 16:48:46 +00:00
nai-degen ac1897fd17 returns more clear proxy_note hint on AWS 503 error 2024-09-09 09:56:18 -05:00
nai-degen 2a6f85e2e2 Revert "handles AWS HTTP 503 ServiceUnavailableException similarly to 429s"
This reverts commit ffcaa23511.
2024-09-09 09:43:59 -05:00
nai-degen ffcaa23511 handles AWS HTTP 503 ServiceUnavailableException similarly to 429s 2024-09-09 08:07:53 -05:00
nai-degen 1d5b8efa23 reduces key lockout period to more quickly drain queue after AWS rate limit resolves 2024-09-08 17:17:22 -05:00
nai-degen 905273abf2 fixes aws mistral token cost estimation 2024-09-08 17:15:59 -05:00
nai-degen ac92a19946 improves reliability of inference profile detection for AWS keychecker 2024-09-07 17:36:29 -05:00
khanon 96fe974ad0 Use AWS Inference Profiles for higher rate limits (khanon/oai-reverse-proxy!78) 2024-09-01 22:55:07 +00:00
nai-degen 578615fbd2 fixes typo in new Claude system prompt schema 2024-08-30 10:23:57 -05:00
nai-degen 5dc4050e52 disable periodic GCP key rechecks to workaround keychecker bug 2024-08-29 15:25:37 -05:00
nai-degen cf615ee62c applies prettier to GCP checker 2024-08-29 15:15:56 -05:00
nai-degen ee61f9be2b removes unnecessary log from last commit 2024-08-27 23:58:32 -05:00
nai-degen 0c448cb59d fixes azure dalle using wrong rate limit and out-of-spec Retry-After header 2024-08-27 23:53:28 -05:00
nai-degen 51a9ccceb2 supports alternate claude system prompt format 2024-08-27 23:27:20 -05:00
nai-degen ce490efd7d minor adjustments to HMAC signing 2024-08-22 19:54:02 -05:00
nai-degen 5000e59a61 fix for google makersuite prompt validation/transformation 2024-08-22 14:19:48 -05:00
nai-degen d54acad6ad adds support for sonnet 8192 output tokens on anthropic api 2024-08-15 11:55:13 -05:00
nai-degen 5e1fffe07d adds chatgpt-4o-latest 2024-08-15 11:54:42 -05:00
nai-degen f7fd5f00f2 fixes esponse_format schema for mistral la plateforme 2024-08-14 14:41:47 -05:00
nai-degen 6d323f6ea1 do not transform mistral chat prompts to text when using la plateforme 2024-08-14 12:26:27 -05:00
nai-degen 2959ed3f7f fixes aws keychecker not detecting claude 2.1 2024-08-14 10:49:02 -05:00
nai-degen b58e7cb830 always applies Mistral prompt fixes on messages input 2024-08-14 10:48:55 -05:00
khanon f531272b00 Refactor AWS service code and add AWS Mistral support (khanon/oai-reverse-proxy!75) 2024-08-14 04:40:41 +00:00
nai-degen 6c45c92ea0 updates dependencies 2024-08-12 19:10:15 -05:00
nai-degen b7cd326d2a handles 'invalid subscription' 403 errors from Mistral API 2024-08-07 14:14:53 -05:00
nai-degen 6c9f302fb9 minor gultra fix 2024-08-06 18:46:49 -05:00
nai-degen 9ab1e7d0ce adds new gpt4o id 2024-08-06 13:08:25 -05:00
nai-degen 81f8dc2613 updates README.md 2024-08-05 11:33:16 -05:00
khanon 0c936e97fe Merge GCP Vertex AI implementation from cg-dot/oai-reverse-proxy (khanon/oai-reverse-proxy!72) 2024-08-05 14:27:51 +00:00
nai-degen 29ed07492e fixes info page display for gemini flash/ultra 2024-08-03 22:18:05 -05:00
nai-degen 2f7315379c adds gemini/makersuite keychecker, native endpoint, and streaming fixes 2024-08-03 21:53:32 -05:00
nai-degen e91532f4f7 handle dead makersuite keys triggering 400 error instead of 401/403 2024-08-03 19:09:50 -05:00
nai-degen ca58770458 fixes issue with PROXY_KEY when used together with proof-of-work captcha 2024-07-29 19:41:57 -05:00
nai-degen 9a3cca6b80 adds new mistral models and updates older model lists/context limits 2024-07-28 13:15:03 -05:00
nai-degen 584bb3fbc7 addresses minor issue with quota refresh UI 2024-07-28 11:54:38 -05:00
nai-degen 2aa19e5b09 adds user-specific overrides for daily quota refresh 2024-07-27 14:25:53 -05:00
nai-degen f242777596 fixes token index used as msg idx in anthropic chat-to-openai SSE transformer 2024-07-07 13:33:33 -05:00
nai-degen edc0d094e2 tries to disable quarantined aws keys 2024-06-30 05:08:27 -05:00
nai-degen 994b30dcce adjusts gemini pro model assignment 2024-06-26 13:37:23 -05:00
nai-degen e3d1ab51d1 improves handling of AWS regions with Sonnet 3.5 enabled but Sonnet 3.0 disabled 2024-06-20 12:20:38 -05:00
nai-degen ff38eda066 improves model detection for AWS Sydney region 2024-06-20 12:19:44 -05:00
nai-degen 84b917f726 fixes AWS Sonnet 3.5 key assignment bug 2024-06-20 12:00:11 -05:00
nai-degen 5871025245 fixes AWS keychecker failure caused by Sonnet 3.5 gradual rollout 2024-06-20 11:24:47 -05:00
nai-degen b4fb97ca5c fixes model id typo 2024-06-20 10:42:48 -05:00
nai-degen eb700d3da6 adds untested claude 3.5 model ids and model assignment 2024-06-20 10:34:48 -05:00
nai-degen d706d4c59d adds USER_CONCURRENCY_LIMIT environment variable 2024-06-14 22:52:16 -05:00
nai-degen 0ea43f61c2 fixes incorrect variable name in .env.example docs 2024-06-09 11:36:20 -05:00
nai-degen ca4321b4cb adjusts openai schema validation to allow
ull stop sequence
2024-06-07 14:29:18 -05:00
nai-degen 7660ed8b94 allows enabling vision prompts on a per-service basis 2024-06-07 12:09:43 -05:00
nai-degen 55f1bbed3b adds ipv6 mask to default ADMIN_WHITELIST 2024-06-02 20:49:18 -05:00
nai-degen 57fd17ede0 makes it easier for clients to detect proxy errors programatically 2024-05-27 15:30:28 -05:00
nai-degen 9d00b8a9de adjusts max IP error message wording 2024-05-27 08:24:56 -05:00
nai-degen 155e185c6e fixes shutdown handler fuckup 2024-05-26 15:36:54 -05:00
nai-degen a59b6555e7 redacts de3u api-key from diagnostic logs 2024-05-26 15:13:21 -05:00
scrappyanon 2d82e55d72 Sqlite backend with user event logging (khanon/oai-reverse-proxy!69) 2024-05-26 17:31:12 +00:00
nai-degen 6352df5d5a fixes mixed ipv4-ipv6 handling in cidr module 2024-05-24 02:55:11 -05:00
nai-degen 7d517a4c5f fixes Refresh Token UI incorrectly discarding expired (but refreshable) temp tokens 2024-05-22 22:18:23 -05:00
nai-degen 0418951928 tries to provide better guidance on CSRF errors 2024-05-21 13:10:54 -05:00
nai-degen 3012aa651e adds slightly less-ugly global stylesheet; improves mobile compat 2024-05-21 12:56:25 -05:00
nai-degen 1b68ad7c6f docs update 2024-05-21 12:46:51 -05:00
nai-degen 68b48428de adjusts gatekeeper module to send auth errors as fake chat completions 2024-05-21 12:44:43 -05:00
nai-degen b76db652e0 adds configurable PoW timeout and iteration count 2024-05-21 12:38:41 -05:00
nai-degen 63ab1a7685 reverts debug change that broke info page 2024-05-20 07:47:46 -05:00
nai-degen a3462e21bc adds config setting for PoW verification timeout 2024-05-19 15:17:25 -05:00
nai-degen 8d2ed23522 fixes inverted refreshtoken logic 2024-05-19 12:35:15 -05:00
khanon 205ffa69ce Temporary usertokens via proof-of-work challenge (khanon/oai-reverse-proxy!68) 2024-05-19 16:31:56 +00:00
nai-degen 930bac0072 bumps ejs package version 2024-05-17 21:46:27 -05:00
nai-degen 3ad826851c adds proper GPT4o model family for separate cost/quota tracking 2024-05-14 13:51:19 -05:00
nai-degen 6dabc82bcf adds preliminary gpt4o 2024-05-13 12:43:39 -05:00
nai-degen d3e7ef3c14 prevents leaking headers to upstream API when serving via Tailscale 2024-05-01 11:26:15 -05:00
nai-degen b1062dc9b3 minor adjustments to jsonl log backend to reduce filesize 2024-04-26 15:06:12 -05:00
nai-degen 32b623d6bc partial googleai fixes; adds jsonl file backend for promptlogger stolen from fiz 2024-04-23 03:43:38 -05:00
nai-degen 0a27345c29 upgrades firebase-admin from 11.10.1 to 12.1.0 2024-04-22 12:36:41 -05:00
nai-degen c15f07c0d8 adds OpenAI-to-AWS Claude3 compat endpoint 2024-04-17 21:23:30 -05:00
nai-degen db28e90c51 adds proper Opus model check to aws claude keychecker 2024-04-17 21:09:00 -05:00
nai-degen c0cd2c7549 adds aws opus maybe, idk cannot test 2024-04-16 11:33:44 -05:00
nai-degen 9445110727 adds gpt-4-turbo stable 2024-04-09 16:31:42 -05:00
nai-degen 34a673a80a adds option to disable multimodal prompts 2024-03-23 14:30:14 -05:00
nai-degen 8cb960e174 fixes incorrect model assignment when requesting Haiku from AWS 2024-03-21 23:21:27 -05:00
nai-degen 32fea30c91 handles Anthropic keys which cannot support multimodal requests 2024-03-20 00:03:10 -05:00
nai-degen 3f9fd25004 exempt 'special' token type from context size limits 2024-03-19 11:14:51 -05:00
nai-degen e068edcf48 adds Anthropic key tier detection and trial key display 2024-03-18 15:20:34 -05:00
nai-degen 2098948b7a reduces Anthropic keychecker frequency 2024-03-18 15:19:41 -05:00
nai-degen 7705ee58a0 minor cleanup of error-generator.ts 2024-03-18 15:18:18 -05:00
nai-degen 7c64d9209e minor refactoring of response middleware handlers 2024-03-17 22:20:39 -05:00
nai-degen 59107af3d6 minor fixes for google sheets backend for anthropic-chat 2024-03-17 12:08:11 -05:00
nai-degen 435280fa04 fixes missing system prompt on AWS anthropic-chat schema 2024-03-16 16:00:59 -05:00
nai-degen d9117bf08e fixes AWS debug log 2024-03-14 21:34:07 -05:00
nai-degen 57d9791270 fixes uncounted tokens when Response stream is prematurely closed 2024-03-14 21:32:20 -05:00
nai-degen 367ac3d075 adds ?debug=true query param to have proxy respond with transformed prompt 2024-03-14 08:16:38 -05:00
nai-degen 276a1a1d44 small fix for recurring AWS logging check 2024-03-13 20:53:21 -05:00
nai-degen 6cf029112e adds Anthropic's SOTA Haiku model; misc code cleanup 2024-03-13 20:48:05 -05:00
nai-degen 4b86802eb2 adds separate model detection for gpt-4-32k-0314 2024-03-10 19:16:11 -05:00
nai-degen 7f431de98e sets cache-control on static user images 2024-03-10 15:50:40 -05:00
nai-degen e0bf10626e removes .reverse() from image history to avoid thumbnails shifting as users browse 2024-03-10 15:12:20 -05:00
nai-degen eb55f30414 adds input prompt to imagehistory 2024-03-10 15:08:44 -05:00
nai-degen e1fb53b461 pretty-prints dall-e image metadata JSON download 2024-03-10 15:04:44 -05:00
nai-degen 7610369c6d adds dall-e full history page and metadata downloader 2024-03-10 14:53:11 -05:00
nai-degen 37f17ded60 removes OpenAI max_tokens default as that isn't aligned with the real API 2024-03-10 12:32:15 -05:00
nai-degen 96b6ea9568 adds azure-image endpoint to service info; hides unavailable endpoints 2024-03-09 13:25:50 -06:00
nai-degen cec39328a2 adds azure dall-e support 2024-03-09 13:03:50 -06:00
nai-degen cab346787c fixes regression in anthropic text > anthropic chat api translation 2024-03-08 21:16:25 -06:00
nai-degen fab404b232 refactors api transformers and adds oai->anthropic chat api translation 2024-03-08 20:59:19 -06:00
nai-degen 8d84f289b2 fixes issue with mistral-large model family not being detected 2024-03-08 17:07:25 -06:00
nai-degen 9ce10b4f6a shows more helpful errors when users' prefills are invalid during AWS streaming 2024-03-07 13:28:23 -06:00
nai-degen 96756d32f3 fixes handling of DALL-E content_policy_violation errors 2024-03-07 12:56:35 -06:00
nai-degen 1fb3eac154 maybe shows clearer AWS ValidationExceptions when users have bad prefills 2024-03-06 05:12:47 -06:00
nai-degen 8f46bd4397 handles 'this organization is disabled' error from anthropic 2024-03-06 00:42:10 -06:00
nai-degen ddf34685df adds Claude 3 Vision support 2024-03-05 18:34:10 -06:00
nai-degen ea3aae5da6 allows selecting compat model via endpoint name and makes errors less confusing 2024-03-05 05:13:22 -06:00
nai-degen 055d650c5d fixes legacy compat endpoint 2024-03-05 01:38:39 -06:00
nai-degen 2643dfea61 improves aws sonnet key detection and no keys available error messaging 2024-03-05 01:04:08 -06:00
nai-degen 434445797a fixes bad handleCompatibilityRequest middleware fallthrough 2024-03-04 23:53:13 -06:00
nai-degen 03c5c473e1 improves error handling for sillytavern 2024-03-04 22:59:32 -06:00
nai-degen 068e7a834f fixes AWS legacy models for non-streaming requests 2024-03-04 21:22:43 -06:00
nai-degen 736803ad92 enables opus by default 2024-03-04 21:11:32 -06:00
nai-degen 6b22d17c50 fixes claude-opus token usage being attributed to regular claude 2024-03-04 17:03:02 -06:00
nai-degen 51ffca480a adds AWS Claude Chat Completions and Claude 3 Sonnet support 2024-03-04 16:25:06 -06:00
nai-degen 802d847cc6 enables Claude opus by default 2024-03-04 16:21:40 -06:00
nai-degen 90ddcac55b makes claude3 compat model customizable via environment variable 2024-03-04 14:21:55 -06:00
nai-degen 36923686f6 shows claude-opus key count on service info page 2024-03-04 14:12:38 -06:00
nai-degen 1edc93dc72 adds claude-opus model family 2024-03-04 14:08:59 -06:00
nai-degen f6c124c1d3 fixes issue with preamble-required claude keys and anthropic chat 2024-03-04 14:00:25 -06:00
nai-degen 90a053d0e0 detects and removes over-quota claude keys from keypool 2024-03-04 13:42:29 -06:00
khanon db318ec237 Implement Anthropic Chat Completions endpoint and Claude 3 (khanon/oai-reverse-proxy!64) 2024-03-04 19:06:46 +00:00
nai-degen b90abbda88 spoofs response for SillyTavern test messages 2024-02-28 15:57:18 -06:00
nai-degen 93cee1db9b removes claude v1 from AWS keychecker as it has been retired 2024-02-27 15:52:09 -06:00
nai-degen bd15728743 uses explicitly set keyprovider rather than inferring via requested model 2024-02-27 10:56:50 -06:00
nai-degen 627559b729 updates mistral modelids 2024-02-26 23:55:03 -06:00
nai-degen 428e103323 allows customizing the /proxy endpoint prefix 2024-02-26 18:20:34 -06:00
nai-degen fd742fc0cb Merge remote-tracking branch 'origin/main' 2024-02-26 18:12:23 -06:00
nai-degen 5e19e2756a adds mistral-large model family, untested 2024-02-26 18:12:08 -06:00
devvnull d3f7c675e3 add pricing for Azure GPT counterparts and update Claude pricing (khanon/oai-reverse-proxy!65) 2024-02-20 03:53:26 +00:00
nai-degen 59bda40bbc handles google streaming json response format variation 2024-02-19 00:12:09 -06:00
nai-degen 68d829bceb adds Claude over-quota detection 2024-02-17 15:56:22 -06:00
nai-degen 9c03290a3d detects anthropic copyright prefill pozzing 2024-02-16 10:22:45 -06:00
nai-degen 3498584a1f removes forceModel on Google AI endpoint 2024-02-15 11:41:34 -06:00
nai-degen 21d61da62b increases max image payload size for gpt4v 2024-02-12 21:59:48 -06:00
nai-degen 35dc0f4826 fixes 'Premature close' caused by fucked up AWS unmarshaller errors 2024-02-10 14:47:14 -06:00
nai-degen a2ae9f32db handles OpenAI organization check failures due to missing API scopes 2024-02-09 10:10:22 -06:00
devvnull 0ce4582f3b Improve "\n\nHuman" prefix requirement detection for Anthropic (khanon/oai-reverse-proxy!63) 2024-02-08 16:28:11 +00:00
nai-degen bbee056114 fixes Force Key Recheck admin function for azure/aws 2024-02-07 19:54:40 -06:00
nai-degen ecc804887b uses EventStreamMarshaller from AWS SDK to hopefully handle split messages 2024-02-05 19:56:41 -06:00
nai-degen a8fd3c7240 fixes AWS Claude throttlingException handling 2024-02-04 20:48:20 -06:00
nai-degen 40240601f5 refactors SSEStreamAdapter to fix leaking decoder streams 2024-02-04 18:38:06 -06:00
nai-degen 98cea2da02 replaces eventstream lib to (hopefully) fix interrupted AWS streams 2024-02-04 17:18:28 -06:00
nai-degen c88f47d0ed fixes middleware order breaking /proxy endpoint 2024-02-04 16:21:44 -06:00
nai-degen 43106d9c7f tracks Risu userid rather than IP address on usertokens 2024-02-04 14:14:36 -06:00
nai-degen fe429a7610 adds SERVICE_INFO_PASSWORD to gate infopage behind a password 2024-02-04 14:04:46 -06:00
nai-degen 235510e588 fixes incorrect AWS Claude 2.1 max context limit 2024-02-01 20:40:15 -06:00
nai-degen 7eb6eb90ad moves api schema validators from transform-outbound-payload into shared 2024-01-29 19:38:22 -06:00
nai-degen 924db33f7e attempts to auto-convert Mistral prompts for its more strict rules 2024-01-28 17:42:23 -06:00
nai-degen 3f2f30e605 updates gpt4-v tokenizer for previous Risu change 2024-01-27 13:35:46 -06:00
nai-degen c9791acd85 makes gpt4-v input validation less strict to accomodate Risu 2024-01-27 13:24:11 -06:00
nai-degen e871b8ecf1 removes logprobs default value since it breaks gpt-4-vision 2024-01-27 12:19:24 -06:00
nai-degen 37ca98ad30 adds dark mode (infopage only currently) 2024-01-25 16:24:11 -06:00
nai-degen e6dc4475e6 fixes max context size for nu-gpt4-turbo 2024-01-25 14:07:42 -06:00
nai-degen 5e646b1c86 adds gpt-4-0125-preview and gpt-4-turbo-preview alias 2024-01-25 13:27:03 -06:00
nai-degen 6f626e623e fixes OAI trial keys bricking the dall-e queue 2024-01-25 01:47:51 -06:00
nai-degen 02a54bf4e3 fixes azure openai logprobs (actually tested this time) 2024-01-25 01:17:18 -06:00
nai-degen 79b2e5b6fd adds very basic support for OpenAI function calling 2024-01-24 16:42:26 -06:00
nai-degen 935a633325 fixes typo in Azure logprob adjustment 2024-01-24 16:03:47 -06:00
nai-degen 4a4b60ebcd handles Azure deviation from OpenAI spec on logprobs param 2024-01-24 16:01:19 -06:00
nai-degen ad465be363 fixes logprobs schema validation for turbo instruct endpoint 2024-01-24 14:31:10 -06:00
nai-degen c7a351baa8 adds support for requesting logprobs from OpenAI Chat Completions API 2024-01-24 11:46:09 -06:00
nai-degen ba8b052b17 adds bindAddress to omitted config keys 2024-01-18 04:14:15 -06:00
nai-degen e813cd9d22 default claude 2.1 instead of 1.3 in openai compat endpoint since 1.3 is not accessible on all keys 2024-01-18 04:14:15 -06:00
nai-degen 4c2a2c1e6c improves handle-streamed-response comments/docs [skip-ci] 2024-01-18 04:14:15 -06:00
nai-degen f1d927fa62 updates README with building/forking info [skip-ci] 2024-01-15 11:46:09 -06:00
nai-degen ad6e5224e3 allows binding to loopback interface via app config instead of only docker 2024-01-15 11:32:26 -06:00
nai-degen 85d89bdb9f fixes CI image tagging on main branch 2024-01-15 01:37:50 -06:00
khanon f5e7195cc9 Add Gitlab CI and self-hosting instructions (khanon/oai-reverse-proxy!61) 2024-01-15 06:51:12 +00:00
nai-degen 81f1e2bc37 fixes broken GET models endpoint for openai/mistral 2024-01-14 05:33:24 -06:00
nai-degen c2a686f229 Revert "reduces max request body size for now"
This reverts commit 4ffa7fb12b.
2024-01-13 18:12:16 -06:00
twinkletoes 96a0f94041 Fix Mistral safe_prompt schema property (khanon/oai-reverse-proxy!60) 2024-01-14 00:11:39 +00:00
nai-degen d56043616e adds keychecker workaround for OpenAI API bug falsely returning gpt4-32k 2024-01-12 10:33:48 -06:00
nai-degen e3e06b065d fixes sourcemap dependency in package.json 2024-01-09 00:32:34 -06:00
nai-degen 1bbb515200 updates static service info 2024-01-08 23:32:25 -06:00
nai-degen a57cc4e8d4 updates dotenv 2024-01-08 23:25:02 -06:00
nai-degen 2239bead2c updates README.md 2024-01-08 19:36:35 -06:00
nai-degen 1a585ddd32 adds TRUSTED_PROXIES to .env.example 2024-01-08 16:41:30 -06:00
nai-degen be731691a1 allows configurable trust proxy setting for Render deployments 2024-01-08 16:39:28 -06:00
nai-degen c2e442e030 long overdue removal of tired in-joke 2024-01-08 11:01:44 -06:00
nai-degen d3ac3b362b trusts only one proxy hop (AWS WAF in huggingface's case) 2024-01-07 19:18:01 -06:00
nai-degen 7b0892ddae fixes unawaited call to async enqueue 2024-01-07 16:23:53 -06:00
nai-degen 7f92565739 SSE queueing adjustments, untested 2024-01-07 16:19:22 -06:00
nai-degen 936d3c0721 corrects nodejs max heap memory config 2024-01-07 16:16:27 -06:00
nai-degen 4ffa7fb12b reduces max request body size for now 2024-01-07 13:03:24 -06:00
nai-degen 8dc7464381 strips extraneous properties on zod schemas 2024-01-07 13:00:48 -06:00
nai-degen d2cd24bfd2 suggest larger nodejs max heap 2024-01-07 12:58:50 -06:00
twinkletoes e33f778192 Change mistral-medium friendly name (khanon/oai-reverse-proxy!59) 2023-12-26 00:27:17 +00:00
twinkletoes 4a823b216f Mistral AI support (khanon/oai-reverse-proxy!58) 2023-12-25 18:33:16 +00:00
nai-degen 01e76cbb1c restores accidentally deleted line breaking infopage stats 2023-12-17 00:25:58 -06:00
nai-degen 655703e680 refactors infopage 2023-12-16 20:30:20 -06:00
nai-degen 3be2687793 tries to detect Azure GPT4-Turbo deployments more reliably 2023-12-15 12:14:23 -06:00
nai-degen 5599a83ae4 improves streaming error handling 2023-12-14 05:01:10 -06:00
nai-degen de34d41918 fixes gemini name prefixing when 'Add character names' is disabled in ST 2023-12-13 23:21:30 -06:00
nai-degen c5cd90dcef adjusts prompt transform to discourage Gemini from speaking for user 2023-12-13 23:03:57 -06:00
nai-degen 8a135a960d fixes gemini prompt reformatting for jbs; adds stop sequences 2023-12-13 21:45:53 -06:00
nai-degen 707cbbce16 fixes gemini throwing an error on JB prompts 2023-12-13 19:14:31 -06:00
khanon fad16cc268 Add Google AI API (khanon/oai-reverse-proxy!57) 2023-12-13 21:56:07 +00:00
nai-degen 0d3682197c treats 403 from anthropic as key dead 2023-12-11 09:13:53 -06:00
valadaptive e0624e30fd Fix some corner cases in SSE parsing (khanon/oai-reverse-proxy!56) 2023-12-09 06:18:01 +00:00
nai-degen 94d4efe9bb properly enforce allowedModelFamilies; refactor HPM proxyReq handlers 2023-12-05 22:07:56 -06:00
random-username-423 12276a1f59 Fix AWS Claude Model Reassigning (khanon/oai-reverse-proxy!55) 2023-12-06 03:21:27 +00:00
nai-degen fdd824f0e4 adds azure rate limit auto-retry 2023-12-04 01:24:33 -06:00
khanon fbdea30264 Azure OpenAI suport (khanon/oai-reverse-proxy!48) 2023-12-04 04:21:18 +00:00
nai-degen cd1b9d0e0c don't print google api key to container logs on error 2023-12-01 11:23:56 -06:00
nai-degen 9e61d9029f adds claude-2.1 (untested) 2023-11-21 11:32:43 -06:00
nai-degen f95e24afbb fixes incorrect max model size for gpt4-v 2023-11-19 02:23:41 -06:00
khanon f29049f993 Support for GPT-4-Vision (khanon/oai-reverse-proxy!54) 2023-11-19 05:06:21 +00:00
nai-degen 7f2f324e26 fixes render dockerfile and dalle3 model detection 2023-11-18 12:27:14 -06:00
nai-degen dc61291933 adds temporary keychecker var to treat dall-e-2 the same as dall-e-3 2023-11-17 20:24:36 -06:00
nai-degen 6c02e9b265 don't enqueue requests which fail stream check 2023-11-17 14:36:47 -06:00
nai-degen e018672968 re-adds keychecker info to STATIC_INFO_PAGE 2023-11-16 02:16:24 -06:00
nai-degen bfd7e23124 encodes queue payload 2023-11-16 01:19:01 -06:00
khanon 6aa6bebf08 Scale SSE heartbeat size with traffic (khanon/oai-reverse-proxy!53) 2023-11-16 05:45:35 +00:00
nai-degen 6acdf35914 removes length from stalled request error message 2023-11-15 17:18:51 -06:00
nai-degen 3de79873e9 adds STATIC_SERVICE_INFO config 2023-11-15 17:12:07 -06:00
nai-degen 3aca9e90f0 fixes rate limiter always using IMAGE_MODEL_RATE_LIMIT 2023-11-15 13:07:58 -06:00
nai-degen 5fabe1d1f8 uses exponential moving average for wait time calculation 2023-11-14 01:36:11 -06:00
nai-degen 4a68c14477 further increases OpenAI rate limit backoff 2023-11-14 01:28:28 -06:00
khanon 20c064394a OpenAI DALL-E Image Generation (khanon/oai-reverse-proxy!52) 2023-11-14 05:41:19 +00:00
nai-degen 3ea23760c3 adjusts prompt logging to truncate huge prompts from the end 2023-11-11 20:14:32 -06:00
nai-degen 5db07404f2 fixes infopage crash when check_keys is disabled 2023-11-10 22:41:57 -06:00
nai-degen c453a5f2ad logs usertoken lookup attempts 2023-11-10 22:41:36 -06:00
nai-degen c7a095d345 removes debug log 2023-11-09 16:25:57 -06:00
nai-degen e9110611fa adds REJECT_PHRASES configuration setting 2023-11-09 16:24:49 -06:00
nai-degen 79e1fe09e4 fixes multiple enumeration on infopage 2023-11-08 12:02:23 -06:00
dllt98 08b2196bfb Update .env.example to include MAX_CONTEXT_TOKENS_OPENAI (khanon/oai-reverse-proxy!50) 2023-11-08 02:50:19 +00:00
nai-degen 350d6542cf fixes stats for non-openai models 2023-11-06 22:41:48 -06:00
nai-degen c9c24f86bb improvements to infopage key categorization 2023-11-06 22:13:34 -06:00
nai-degen b6f8f15a1f tries to prevent per-day rate limited keys from bricking the queue 2023-11-06 21:16:36 -06:00
nai-degen 5467136c1a adds gpt4-turbo to userschema; updates docs 2023-11-06 16:35:35 -06:00
nai-degen 0d5dfeccf8 adds gpt4-turbo model family and support for gpt-4-1106-preview model 2023-11-06 15:29:43 -06:00
nai-degen b615ffa433 fixes issue with local development cookies 2023-11-06 10:28:27 -06:00
nai-degen a27163a629 adds option to not disable keys when reaching IP limit 2023-11-06 10:15:57 -06:00
nai-degen 5a8fb3aff6 adds USE_INSECURE_COOKIES for hosts without SSL support 2023-11-03 15:25:06 -05:00
nai-degen 51dd0c71ba removes unused import in openai proxy 2023-10-24 13:17:46 -05:00
nai-degen 89e1ed46d5 re-signs AWS requests on every attempt to fix fucked up queueing 2023-10-24 13:10:50 -05:00
nai-degen 26dc79c8f1 fixes broken AWS rate limit backoff 2023-10-24 09:19:46 -05:00
nai-degen 89e9b67f3f fixes AWS mid-stream rate limits not actually marking key as rate-limited 2023-10-23 22:47:29 -05:00
nai-degen 52ec2ec265 fixes blank AWS responses due to reqs sometimes using wrong handler 2023-10-23 22:23:06 -05:00
nai-degen 8bd2f749c1 reduces logging severity of prompt validation errors 2023-10-23 20:30:27 -05:00
khanon ff27ca3780 Update info-page.ts 2023-10-20 00:33:57 +00:00
nai-degen 41a463d2c8 possibly fix issue with AWS keychecker due to amazon API change 2023-10-16 12:17:02 -05:00
nai-degen 3f7e50f87e follow-up 'fixes empty AWS streaming responses when under heavy load' 2023-10-15 00:06:38 -05:00
nai-degen f6cfc6e882 fixes empty AWS streaming responses when under heavy load 2023-10-15 00:05:36 -05:00
nai-degen af4d8dae40 changes default AMZ_HOST to bedrock-runtime.region.amazonaws.com 2023-10-12 15:39:06 -05:00
nai-degen 725fd6e6f1 deprioritizes queued Agnai.chat requests and limits concurrency to five across all shared IPs 2023-10-09 12:36:54 -05:00
nai-degen c87484f1ff adds AWS console screenshot to docs 2023-10-07 21:33:53 -05:00
nai-degen 15a2cb5a26 another docs correction 2023-10-07 21:10:18 -05:00
nai-degen c8182cea17 docs correction 2023-10-07 21:08:40 -05:00
nai-degen b06d48e1f8 adds better AWS docs 2023-10-07 20:58:04 -05:00
khanon 140bdea14e Implement AWS KeyChecker and auto-disable AWS logged keys (khanon/oai-reverse-proxy!47) 2023-10-08 01:17:09 +00:00
nai-degen 12f78fa1f2 exempts 'special' role from rate limiting 2023-10-06 20:29:28 -05:00
nai-degen daf6a123d5 adjusts Agnai.chat and RisuAI rate limiting 2023-10-04 09:39:59 -05:00
nai-degen 4e05b01e90 improves AWS .env.example and config.ts docs 2023-10-03 20:29:49 -05:00
nai-degen 5033d00444 improves clarity of errors sent back to streaming clients 2023-10-03 19:45:15 -05:00
nai-degen ba0b20617e ensures AWS always uses anthropic-version 2023-06-01 parser 2023-10-03 19:43:30 -05:00
nai-degen 4a5fd91da3 address npm audit; adds zod-error package 2023-10-03 19:05:46 -05:00
khanon ecf897e685 Refactor handleStreamingResponse to make it less shit (khanon/oai-reverse-proxy!46) 2023-10-03 06:14:19 +00:00
nai-degen 6a3d753f0d fixes anthropic keychecker for some keys 2023-10-02 20:32:07 -05:00
khanon 0bf2f5c123 fixes typo in .env.example 2023-10-02 20:39:30 +00:00
nai-degen ede274c117 disables AWS key on AccessDeniedException 2023-10-02 11:18:08 -05:00
nai-degen d2267beb18 adds aws-claude token cost 2023-10-02 09:43:26 -05:00
nai-degen 0837c89a42 fixes incorrect context size limit for aws claude v1 2023-10-02 03:53:04 -05:00
nai-degen f67560a17b refactors proxy routing 2023-10-01 12:12:28 -05:00
nai-degen e13361a323 removes dead koboldai code 2023-10-01 11:27:11 -05:00
khanon fa4bf468d2 Implement AWS Bedrock support (khanon/oai-reverse-proxy!45) 2023-10-01 01:40:18 +00:00
nai-degen 7e681a7bef strips OAI request parameters when translating to Claude format 2023-09-29 03:01:39 -05:00
nai-degen 1b0106a1ea strips reverse proxy originating IP headers 2023-09-29 03:00:55 -05:00
nai-degen f5521aa6c3 prevents selecting trial keys for embeddings requests due to rate limits 2023-09-26 01:26:07 -05:00
nai-degen f8b480f4c2 adds support for proxying text-embedding-ada-002 requests 2023-09-26 00:58:38 -05:00
khanon 1f35fe1ae1 updates huggingface docs to clarify gatekeeper 2023-09-24 11:00:25 +00:00
khanon 35b44e1c6b fixes issue with OpenAIV1ChatCompletionSchema and PaLM compat 2023-09-24 10:48:56 +00:00
208 changed files with 21748 additions and 5142 deletions
+122 -28
View File
@@ -5,93 +5,187 @@
# All values have reasonable defaults, so you only need to change the ones you
# want to override.
# Use production mode unless you are developing locally.
NODE_ENV=production
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# General settings:
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# Model requests allowed per minute per user.
# MODEL_RATE_LIMIT=4
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
# IMAGE_MODEL_RATE_LIMIT=2
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=32768
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=300
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
# MAX_OUTPUT_TOKENS_OPENAI=1024
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode.
# Disabled by default in local development mode, but enabled in production.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# ALLOWED_MODEL_FAMILIES=claude,turbo,gpt4,gpt4-32k
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
# | mistral-small | mistral-medium | mistral-large | aws-claude |
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
# By default, all models are allowed except for dall-e and o1.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
# ALLOWED_VISION_SERVICES=
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# Whether cookies should be set without the Secure flag, for hosts that don't
# support SSL. True by default in development, false in production.
# USE_INSECURE_COOKIES=false
# Reorganizes requests in the queue according to their token count, placing
# larger prompts further back. The penalty is determined by (promptTokens *
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
# When there is no queue or it is very short, the effect is negligible (this
# setting only reorders the queue, it does not artificially delay requests).
# TOKENS_PUNISHMENT_FACTOR=0.0
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# -------------------------------------------------------------------------------
# Blocking settings:
# Allows blocking requests depending on content, referers, or IP addresses.
# This is a convenience feature; if you need more robust functionality it is
# highly recommended to put this application behind nginx or Cloudflare, as they
# will have better performance.
# IP addresses or CIDR blocks from which requests will be blocked.
# IP_BLACKLIST=10.0.0.1/24
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Whether to reject requests containing disallowed content.
# REJECT_DISALLOWED=false
# Comma-separated list of phrases that will be rejected. Surround phrases with
# quotes if they contain commas. You can use regular expression tokens.
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# The port to listen on.
# PORT=7860
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# REJECT_MESSAGE="You can't say that here."
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
# See `docs/user-quotas.md` to learn how to set up quotas.
# Which access control method to use. (none | proxy_token | user_token)
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# GATEKEEPER_STORE=memory
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
# Whether user_tokens should be automatically disabled when reaching the IP limit.
# MAX_IPS_AUTO_BAN=true
# With user_token gatekeeper, whether to allow users to change their nickname.
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_CLAUDE=0
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_DALL_E=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# -------------------------------------------------------------------------------
# HTTP agent settings:
# If you need to change how the proxy makes requests to other servers, such
# as when checking keys or forwarding users' requests to external services,
# you can configure an alternative HTTP agent. Otherwise the default OS settings
# will be used.
# The name of the network interface to use. The first external IPv4 address
# belonging to this interface will be used for outgoing requests.
# HTTP_AGENT_INTERFACE=enp0s3
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
# Note that if your proxy server issues a self-signed certificate, you may need
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
# that variable in your environment, not in this file.
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
# ------------------------------------------------------------------------------
# Secrets and keys:
# Do not put any passwords or API keys directly in this file.
# For Huggingface, set them via the Secrets section in your Space's config UI.
# For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
GCP_CREDENTIALS=project-id:client-email:region:private-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# Restrict access to the admin interface to specific IP addresses, specified
# as a comma-separated list of CIDR ranges.
# ADMIN_WHITELIST=0.0.0.0/0
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+6 -1
View File
@@ -1,6 +1,11 @@
.env
.aider*
.env*
!.env.vault
.venv
.vscode
.idea
build
greeting.md
node_modules
http-client.private.env.json
+5 -5
View File
@@ -1,13 +1,13 @@
{
"plugins": ["prettier-plugin-ejs"],
"overrides": [
{
"files": [
"*.ejs"
],
"files": "*.ejs",
"options": {
"printWidth": 160,
"printWidth": 120,
"bracketSameLine": true
}
}
]
],
"trailingComma": "es5"
}
+61 -31
View File
@@ -1,42 +1,72 @@
# OAI Reverse Proxy
Reverse proxy server for the OpenAI and Anthropic APIs. Forwards text generation requests while rejecting administrative/billing requests. Includes optional rate limiting and prompt filtering to prevent abuse.
Reverse proxy server for various LLM APIs.
### Table of Contents
- [What is this?](#what-is-this)
- [Why?](#why)
- [Usage Instructions](#setup-instructions)
- [Deploy to Huggingface (Recommended)](#deploy-to-huggingface-recommended)
- [Deploy to Repl.it (WIP)](#deploy-to-replit-wip)
- [Local Development](#local-development)
<!-- TOC -->
* [OAI Reverse Proxy](#oai-reverse-proxy)
* [Table of Contents](#table-of-contents)
* [What is this?](#what-is-this)
* [Features](#features)
* [Usage Instructions](#usage-instructions)
* [Personal Use (single-user)](#personal-use-single-user)
* [Updating](#updating)
* [Local Development](#local-development)
* [Self-hosting](#self-hosting)
* [Building](#building)
* [Forking](#forking)
<!-- TOC -->
## What is this?
If you would like to provide a friend access to an API via keys you own, you can use this to keep your keys safe while still allowing them to generate text with the API. You can also use this if you'd like to build a client-side application which uses the OpenAI or Anthropic APIs, but don't want to build your own backend. You should never embed your real API keys in a client-side application. Instead, you can have your frontend connect to this reverse proxy and forward requests to the downstream service.
This project allows you to run a reverse proxy server for various LLM APIs.
This keeps your keys safe and allows you to use the rate limiting and prompt filtering features of the proxy to prevent abuse.
## Why?
OpenAI keys have full account permissions. They can revoke themselves, generate new keys, modify spend quotas, etc. **You absolutely should not share them, post them publicly, nor embed them in client-side applications as they can be easily stolen.**
This proxy only forwards text generation requests to the downstream service and rejects requests which would otherwise modify your account.
---
## Features
- [x] Support for multiple APIs
- [x] [OpenAI](https://openai.com/)
- [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
- [x] Multiple API keys with rotation and rate limit handling
- [x] Basic user management
- [x] Simple role-based permissions
- [x] Per-model token quotas
- [x] Temporary user accounts
- [x] Event audit logging
- [x] Optional full logging of prompts and completions
- [x] Abuse detection and prevention
- [x] IP address and user token model invocation rate limits
- [x] IP blacklists
- [x] Proof-of-work challenge for access by anonymous users
## Usage Instructions
If you'd like to run your own instance of this proxy, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like.
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
### Deploy to Huggingface (Recommended)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
### Deploy to Render
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
## Local Development
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
1. Clone the repo
2. Install dependencies with `npm install`
### Personal Use (single-user)
If you just want to run the proxy server to use yourself without hosting it for others:
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
2. Clone this repository
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
4. Start the server in development mode with `npm run start:dev`.
4. Install dependencies with `npm install`
5. Run `npm run build`
6. Run `npm start`
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
#### Updating
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
#### Local Development
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
### Self-hosting
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
## Building
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
## Forking
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
+2
View File
@@ -0,0 +1,2 @@
*
!.gitkeep
View File
+21
View File
@@ -0,0 +1,21 @@
stages:
- build
build_image:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- |
if [ "$CI_COMMIT_REF_NAME" = "main" ]; then
TAG="latest"
else
TAG=$CI_COMMIT_REF_NAME
fi
- echo "Building image with tag $TAG"
- BASE64_AUTH=$(echo -n "$DOCKER_HUB_USERNAME:$DOCKER_HUB_ACCESS_TOKEN" | base64)
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"auth\":\"$BASE64_AUTH\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/docker/ci/Dockerfile --destination docker.io/khanonci/oai-reverse-proxy:$TAG --build-arg CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME --build-arg CI_COMMIT_SHA=$CI_COMMIT_SHA --build-arg CI_PROJECT_PATH=$CI_PROJECT_PATH
only:
- main
+22
View File
@@ -0,0 +1,22 @@
FROM node:18-bullseye-slim
WORKDIR /app
COPY . .
RUN npm ci
RUN npm run build
RUN npm prune --production
EXPOSE 7860
ENV PORT=7860
ENV NODE_ENV=production
ARG CI_COMMIT_REF_NAME
ARG CI_COMMIT_SHA
ARG CI_PROJECT_PATH
ENV GITGUD_BRANCH=$CI_COMMIT_REF_NAME
ENV GITGUD_COMMIT=$CI_COMMIT_SHA
ENV GITGUD_PROJECT=$CI_PROJECT_PATH
CMD [ "npm", "start" ]
+17
View File
@@ -0,0 +1,17 @@
# Before running this, create a .env and greeting.md file.
# Refer to .env.example for the required environment variables.
# User-generated content is stored in the data directory.
# When self-hosting, it's recommended to run this behind a reverse proxy like
# nginx or Caddy to handle SSL/TLS and rate limiting. Refer to
# docs/self-hosting.md for more information and an example nginx config.
version: '3.8'
services:
oai-reverse-proxy:
image: khanonci/oai-reverse-proxy:latest
ports:
- "127.0.0.1:7860:7860"
env_file:
- ./.env
volumes:
- ./greeting.md:/app/greeting.md
- ./data:/app/data
+4
View File
@@ -3,9 +3,13 @@ RUN apt-get update && \
apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install
COPY Dockerfile greeting.md* .env* ./
RUN npm run build
EXPOSE 7860
ENV NODE_ENV=production
# Huggigface free VMs have 16GB of RAM so we can be greedy
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ]
+1 -2
View File
@@ -17,9 +17,8 @@ ARG GREETING_URL
RUN if [ -n "$GREETING_URL" ]; then \
curl -sL "$GREETING_URL" > greeting.md; \
fi
COPY package*.json greeting.md* ./
RUN npm install
COPY . .
RUN npm install
RUN npm run build
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
EXPOSE 10000
Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

+58
View File
@@ -0,0 +1,58 @@
# Configuring the proxy for AWS Bedrock
The proxy supports AWS Bedrock models via the `/proxy/aws/claude` endpoint. There are a few extra steps necessary to use AWS Bedrock compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Attaching policies](#attaching-policies)
- [Provisioning models](#provisioning-models)
- [Note regarding logging](#note-regarding-logging)
## Setting keys
Use the `AWS_CREDENTIALS` environment variable to set the AWS API keys.
Like other APIs, you can provide multiple keys separated by commas. Each AWS key, however, is a set of credentials including the access key, secret key, and region. These are separated by a colon (`:`).
For example:
```
AWS_CREDENTIALS=AKIA000000000000000:somesecretkey:us-east-1,AKIA111111111111111:anothersecretkey:us-west-2
```
## Attaching policies
Unless your credentials belong to the root account, the principal will need to be granted the following permissions:
- `bedrock:InvokeModel`
- `bedrock:InvokeModelWithResponseStream`
- `bedrock:GetModelInvocationLoggingConfiguration`
- The proxy needs this to determine whether prompt/response logging is enabled. By default, the proxy won't use credentials unless it can conclusively determine that logging is disabled, for privacy reasons.
Use the IAM console or the AWS CLI to attach these policies to the principal associated with the credentials.
## Provisioning models
AWS does not automatically provide accounts with access to every model. You will need to provision the models you want to use, in the regions you want to use them in. You can do this from the AWS console.
⚠️ **Models are region-specific.** Currently AWS only offers Claude in a small number of regions. Switch to the AWS region you want to use, then go to the models page and request access to **Anthropic / Claude**.
![](./assets/aws-request-model-access.png)
Access is generally granted more or less instantly. Once your account has access, you can enable the model by checking the box next to it.
You can also request Claude Instant, but support for this isn't fully implemented yet.
### Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `anthropic.claude-v1` (~18k context, claude 1.3 -- EOL 2024-02-28)
- `anthropic.claude-v2` (~100k context, claude 2.0)
- `anthropic.claude-v2:1` (~200k context, claude 2.1)
- **Claude Instant**
- `anthropic.claude-instant-v1` (~100k context, claude instant 1.2)
## Note regarding logging
By default, the proxy will refuse to use keys if it finds that logging is enabled, or if it doesn't have permission to check logging status.
If you can't attach the `bedrock:GetModelInvocationLoggingConfiguration` policy to the principal, you can set the `ALLOW_AWS_LOGGING` environment variable to `true` to force the proxy to use the keys anyway. A warning will appear on the info page when this is enabled.
+30
View File
@@ -0,0 +1,30 @@
# Configuring the proxy for Azure
The proxy supports Azure OpenAI Service via the `/proxy/azure/openai` endpoint. The process of setting it up is slightly different from regular OpenAI.
- [Setting keys](#setting-keys)
- [Model assignment](#model-assignment)
## Setting keys
Use the `AZURE_CREDENTIALS` environment variable to set the Azure API keys.
Like other APIs, you can provide multiple keys separated by commas. Each Azure key, however, is a set of values including the Resource Name, Deployment ID, and API key. These are separated by a colon (`:`).
For example:
```
AZURE_CREDENTIALS=contoso-ml:gpt4-8k:0123456789abcdef0123456789abcdef,northwind-corp:testdeployment:0123456789abcdef0123456789abcdef
```
## Model assignment
Note that each Azure deployment is assigned a model when you create it in the Azure OpenAI Service portal. If you want to use a different model, you'll need to create a new deployment, and therefore a new key to be added to the AZURE_CREDENTIALS environment variable. Each credential only grants access to one model.
### Supported model IDs
Users can send normal OpenAI model IDs to the proxy to invoke the corresponding models. For the most part they work the same with Azure. GPT-3.5 Turbo has an ID of "gpt-35-turbo" because Azure doesn't allow periods in model names, but the proxy should automatically convert this to the correct ID.
As noted above, you can only use model IDs for which a deployment has been created and added to the proxy.
## On content filtering
Be aware that all Azure OpenAI Service deployments have content filtering enabled by default at a Medium level. Prompts or responses which are deemed to be inappropriate will be rejected by the API. This is a feature of the Azure OpenAI Service and not the proxy.
You can disable this from deployment's settings within Azure, but you would need to request an exemption from Microsoft for your organization first. See [this page](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/content-filters) for more information.
+71
View File
@@ -0,0 +1,71 @@
# Configuring the proxy for DALL-E
The proxy supports DALL-E 2 and DALL-E 3 image generation via the `/proxy/openai-images` endpoint. By default it is disabled as it is somewhat expensive and potentially more open to abuse than text generation.
- [Updating your Dockerfile](#updating-your-dockerfile)
- [Enabling DALL-E](#enabling-dall-e)
- [Setting quotas](#setting-quotas)
- [Rate limiting](#rate-limiting)
## Updating your Dockerfile
If you are using a previous version of the Dockerfile supplied with the proxy, it doesn't have the necessary permissions to let the proxy save temporary files.
You can replace the entire thing with the new Dockerfile at [./docker/huggingface/Dockerfile](../docker/huggingface/Dockerfile) (or the equivalent for Render deployments).
You can also modify your existing Dockerfile; just add the following lines after the `WORKDIR` line:
```Dockerfile
# Existing
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
# Take ownership of the app directory and switch to the non-root user
RUN chown -R 1000:1000 /app
USER 1000
# Existing
RUN npm install
```
## Enabling DALL-E
Add `dall-e` to the `ALLOWED_MODEL_FAMILIES` environment variable to enable DALL-E. For example:
```
# GPT3.5 Turbo, GPT-4, GPT-4 Turbo, and DALL-E
ALLOWED_MODEL_FAMILIES=turbo,gpt-4,gpt-4turbo,dall-e
# All models as of this writing
ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,gemini-pro,aws-claude,dall-e
```
Refer to [.env.example](../.env.example) for a full list of supported model families. You can add `dall-e` to that list to enable all models.
## Setting quotas
DALL-E doesn't bill by token like text generation models. Instead there is a fixed cost per image generated, depending on the model, image size, and selected quality.
The proxy still uses tokens to set quotas for users. The cost for each generated image will be converted to "tokens" at a rate of 100000 tokens per US$1.00. This works out to a similar cost-per-token as GPT-4 Turbo, so you can use similar token quotas for both.
Use `TOKEN_QUOTA_DALL_E` to set the default quota for image generation. Otherwise it works the same as token quotas for other models.
```
# ~50 standard DALL-E images per refresh period, or US$2.00
TOKEN_QUOTA_DALL_E=200000
```
Refer to [https://openai.com/pricing](https://openai.com/pricing) for the latest pricing information. As of this writing, the cheapest DALL-E 3 image costs $0.04 per generation, which works out to 4000 tokens. Higher resolution and quality settings can cost up to $0.12 per image, or 12000 tokens.
## Rate limiting
The old `MODEL_RATE_LIMIT` setting has been split into `TEXT_MODEL_RATE_LIMIT` and `IMAGE_MODEL_RATE_LIMIT`. Whatever value you previously set for `MODEL_RATE_LIMIT` will be used for text models.
If you don't specify a `IMAGE_MODEL_RATE_LIMIT`, it defaults to half of the `TEXT_MODEL_RATE_LIMIT`, to a minimum of 1 image per minute.
```
# 4 text generations per minute, 2 images per minute
TEXT_MODEL_RATE_LIMIT=4
IMAGE_MODEL_RATE_LIMIT=2
```
If a prompt is filtered by OpenAI's content filter, it won't count towards the rate limit.
## Hiding recent images
By default, the proxy shows the last 12 recently generated images by users. You can hide this section by setting `SHOW_RECENT_IMAGES` to `false`.
+12 -1
View File
@@ -1,5 +1,7 @@
# Deploy to Huggingface Space
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend.
### 1. Get an API key
@@ -25,11 +27,14 @@ RUN apt-get update && \
apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install
COPY Dockerfile greeting.md* .env* ./
RUN npm run build
EXPOSE 7860
ENV NODE_ENV=production
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ]
```
- Click "Commit new file to `main`" to save the Dockerfile.
@@ -88,6 +93,12 @@ See `.env.example` for a full list of available settings, or check `config.ts` f
## Restricting access to the server
If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key.
If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key. Set the `GATEKEEPER` mode to `proxy_key`, and then set the `PROXY_KEY` variable to whatever password you want.
Add this using the same method as the OPENAI_KEY secret above. Don't add this to your `.env` file because that file is public and anyone can see it.
Example:
```
GATEKEEPER=proxy_key
PROXY_KEY=your_secret_password
```
+5
View File
@@ -1,4 +1,7 @@
# Deploy to Render.com
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
### 1. Create account
@@ -28,6 +31,8 @@ The service will be created according to the instructions in the `render.yaml` f
- For example, `OPENAI_KEY=sk-abc123`.
- Click **Save Changes**.
**IMPORTANT:** Set `TRUSTED_PROXIES=3`, otherwise users' IP addresses will not be recorded correctly (the server will see the IP address of Render's load balancer instead of the user's real IP address).
The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page.
If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet.
+35
View File
@@ -0,0 +1,35 @@
# Configuring the proxy for Vertex AI (GCP)
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Setup Vertex AI](#setup-vertex-ai)
- [Supported model IDs](#supported-model-ids)
## Setting keys
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
For example:
```
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
```
## Setup Vertex AI
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
6. The required credential is in the JSON file you just downloaded.
## Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `claude-3-haiku@20240307`
- `claude-3-sonnet@20240229`
- `claude-3-opus@20240229`
- `claude-3-5-sonnet@20240620`
+135
View File
@@ -0,0 +1,135 @@
# Proof-of-work Verification
You can require users to complete a proof-of-work before they can access the
proxy. This can increase the cost of denial of service attacks and slow down
automated abuse.
When configured, users access the challenge UI and request a token. The server
sends a challenge to the client, which asks the user's browser to find a
solution to the challenge that meets a certain constraint (the difficulty
level). Once the user has found a solution, they can submit it to the server
and get a user token valid for a period you specify.
The proof-of-work challenge uses the argon2id hash function.
## Configuration
To enable proof-of-work verification, set the following environment variables:
```
GATEKEEPER=user_token
CAPTCHA_MODE=proof_of_work
# Validity of the token in hours
POW_TOKEN_HOURS=24
# Max number of IPs that can use a user_token issued via proof-of-work
POW_TOKEN_MAX_IPS=2
# The difficulty level of the proof-of-work challenge. You can use one of the
# predefined levels specified below, or you can specify a custom number of
# expected hash iterations.
POW_DIFFICULTY_LEVEL=low
# The time limit for solving the challenge, in minutes
POW_CHALLENGE_TIMEOUT=30
```
## Difficulty Levels
The difficulty level controls how long, on average, it will take for a user to
solve the proof-of-work challenge. Due to randomness, the actual time can very
significantly; lucky users may solve the challenge in a fraction of the average
time, while unlucky users may take much longer.
The difficulty level doesn't affect the speed of the hash function itself, only
the number of hashes that will need to be computed. Therefore, the time required
to complete the challenge scales linearly with the difficulty level's iteration
count.
You can adjust the difficulty level while the proxy is running from the admin
interface.
Be aware that there is a time limit for solving the challenge, by default set to
30 minutes. Above 'high' difficulty, you will probably need to increase the time
limit or it will be very hard for users with slow devices to find a solution
within the time limit.
### Low
- Average of 200 iterations required
- Default setting.
### Medium
- Average of 900 iterations required
### High
- Average of 1900 iterations required
### Extreme
- Average of 4000 iterations required
- Not recommended unless you are expecting very high levels of abuse
- May require increasing `POW_CHALLENGE_TIMEOUT`
### Custom
Setting `POW_DIFFICULTY_LEVEL` to an integer will use that number of iterations
as the difficulty level.
## Other challenge settings
- `POW_CHALLENGE_TIMEOUT`: The time limit for solving the challenge, in minutes.
Default is 30.
- `POW_TOKEN_HOURS`: The period of time for which a user token issued via proof-
of-work can be used. Default is 24 hours. Starts when the challenge is solved.
- `POW_TOKEN_MAX_IPS`: The maximum number of unique IPs that can use a single
user token issued via proof-of-work. Default is 2.
- `POW_TOKEN_PURGE_HOURS`: The period of time after which an expired user token
issued via proof-of-work will be removed from the database. Until it is
purged, users can refresh expired tokens by completing a half-difficulty
challenge. Default is 48 hours.
- `POW_MAX_TOKENS_PER_IP`: The maximum number of active user tokens that can
be associated with a single IP address. After this limit is reached, the
oldest token will be forcibly expired when a new token is issued. Set to 0
to disable this feature. Default is 0.
## Custom argon2id parameters
You can set custom argon2id parameters for the proof-of-work challenge.
Generally, you should not need to change these unless you have a specific
reason to do so.
The listed values are the defaults.
```
ARGON2_TIME_COST=8
ARGON2_MEMORY_KB=65536
ARGON2_PARALLELISM=1
ARGON2_HASH_LENGTH=32
```
Increasing parallelism will not do much except increase memory consumption for
both the client and server, because browser proof-of-work implementations are
single-threaded. It's better to increase the time cost if you want to increase
the difficulty.
Increasing memory too much may cause memory exhaustion on some mobile devices,
particularly on iOS due to the way Safari handles WebAssembly memory allocation.
## Tested hash rates
These were measured with the default argon2id parameters listed above. These
tests were not at all scientific so take them with a grain of salt.
Safari does not like large WASM memory usage, so concurrency is limited to 4 to
avoid overallocating memory on mobile WebKit browsers. Thermal throttling can
also significantly reduce hash rates on mobile devices.
- Intel Core i9-13900K (Chrome): 33-35 H/s
- Intel Core i9-13900K (Firefox): 29-32 H/s
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
- This is a 2019 phone almost matching an iPhone five years newer because of
bad Safari performance.
+150
View File
@@ -0,0 +1,150 @@
# Quick self-hosting guide
Temporary guide for self-hosting. This will be improved in the future to provide more robust instructions and options. Provided commands are for Ubuntu.
This uses prebuilt Docker images for convenience. If you want to make adjustments to the code you can instead clone the repo and follow the Local Development guide in the [README](../README.md).
## Table of Contents
- [Requirements](#requirements)
- [Running the application](#running-the-application)
- [Setting up a reverse proxy](#setting-up-a-reverse-proxy)
- [trycloudflare](#trycloudflare)
- [nginx](#nginx)
- [Example basic nginx configuration (no SSL)](#example-basic-nginx-configuration-no-ssl)
- [Example with Cloudflare SSL](#example-with-cloudflare-ssl)
- [Updating/Restarting the application](#updatingrestarting-the-application)
## Requirements
- Docker
- Docker Compose
- A VPS with at least 512MB of RAM (1GB recommended)
- A domain name
If you don't have a VPS and domain name you can use TryCloudflare to set up a temporary URL that you can share with others. See [trycloudflare](#trycloudflare) for more information.
## Running the application
- Install Docker and Docker Compose
- Create a new directory for the application
- This will contain your .env file, greeting file, and any user-generated files
- Execute the following commands:
- ```
touch .env
touch greeting.md
echo "OPENAI_KEY=your-openai-key" >> .env
curl https://gitgud.io/khanon/oai-reverse-proxy/-/raw/main/docker/docker-compose-selfhost.yml -o docker-compose.yml
```
- You can set further environment variables and keys in the `.env` file. See [.env.example](../.env.example) for a list of available options.
- You can set a custom greeting in `greeting.md`. This will be displayed on the homepage.
- Run `docker compose up -d`
You can check logs with `docker compose logs -n 100 -f`.
The provided docker-compose file listens on port 7860 but binds to localhost only. You should use a reverse proxy to expose the application to the internet as described in the next section.
## Setting up a reverse proxy
Rather than exposing the application directly to the internet, it is recommended to set up a reverse proxy. This will allow you to use HTTPS and add additional security measures.
### trycloudflare
This will give you a temporary (72 hours) URL that you can use to let others connect to your instance securely, without having to set up a reverse proxy. If you are running the server on your home network, this is probably the best option.
- Install `cloudflared` following the instructions at [try.cloudflare.com](https://try.cloudflare.com/).
- Run `cloudflared tunnel --url http://localhost:7860`
- You will be given a temporary URL that you can share with others.
If you have a VPS, you should use a proper reverse proxy like nginx instead for a more permanent solution which will allow you to use your own domain name, handle SSL, and add additional security/anti-abuse measures.
### nginx
First, install nginx.
- `sudo apt update && sudo apt install nginx`
#### Example basic nginx configuration (no SSL)
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:7860;
}
}
```
- Replace `example.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
- `sudo nginx -t`
- This will check the configuration file for errors.
- `sudo systemctl restart nginx`
- This will restart nginx and apply the new configuration.
#### Example with Cloudflare SSL
This allows you to use a self-signed certificate on the server, and have Cloudflare handle client SSL. You need to have a Cloudflare account and have your domain set up with Cloudflare already, pointing to your server's IP address.
- Set Cloudflare to use Full SSL mode. Since we are using a self-signed certificate, don't use Full (strict) mode.
- Create a self-signed certificate:
- `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/ssl/private/nginx-selfsigned.key -out /etc/ssl/certs/nginx-selfsigned.crt`
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 443 ssl;
server_name yourdomain.com www.yourdomain.com;
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
# Only allow inbound traffic from Cloudflare
allow 173.245.48.0/20;
allow 103.21.244.0/22;
allow 103.22.200.0/22;
allow 103.31.4.0/22;
allow 141.101.64.0/18;
allow 108.162.192.0/18;
allow 190.93.240.0/20;
allow 188.114.96.0/20;
allow 197.234.240.0/22;
allow 198.41.128.0/17;
allow 162.158.0.0/15;
allow 104.16.0.0/13;
allow 104.24.0.0/14;
allow 172.64.0.0/13;
allow 131.0.72.0/22;
deny all;
location / {
proxy_pass http://localhost:7860;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
}
```
- Replace `yourdomain.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
## Updating/Restarting the application
After making an .env change, you need to restart the application for it to take effect.
- `docker compose down`
- `docker compose up -d`
To update the application to the latest version:
- `docker compose pull`
- `docker compose down`
- `docker compose up -d`
- `docker image prune -f`
+10
View File
@@ -12,6 +12,7 @@ Several of these features require you to set secrets in your environment. If usi
- [Memory](#memory)
- [Firebase Realtime Database](#firebase-realtime-database)
- [Firebase setup instructions](#firebase-setup-instructions)
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
## No user management (`GATEKEEPER=none`)
@@ -61,3 +62,12 @@ To use Firebase Realtime Database to persist user data, set the following enviro
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
## Whitelisting admin IP addresses
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
You can provide a comma-separated list containing individual IPv4 or IPv6 addresses, or CIDR ranges.
To whitelist an entire IP range, use CIDR notation. For example, `192.168.0.1/24` would whitelist all addresses from `192.168.0.0` to `192.168.0.255`.
To disable the whitelist, set `ADMIN_WHITELIST=0.0.0.0/0,::0`, which will allow access from any IPv4 or IPv6 address. This is the default behavior.
+9
View File
@@ -0,0 +1,9 @@
{
"dev": {
"proxy-host": "http://localhost:7860",
"oai-key-1": "override in http-client.private.env.json",
"proxy-key": "override in http-client.private.env.json",
"azu-resource-name": "override in http-client.private.env.json",
"azu-deployment-id": "override in http-client.private.env.json"
}
}
+2959 -847
View File
File diff suppressed because it is too large Load Diff
+44 -15
View File
@@ -4,12 +4,14 @@
"description": "Reverse proxy for the OpenAI API",
"scripts": {
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"database:migrate": "ts-node scripts/migrate.ts",
"postinstall": "patch-package",
"prepare": "husky install",
"start": "node --trace-deprecation --trace-warnings build/server.js",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
"start:watch": "nodemon --require source-map-support/register build/server.js",
"start:replit": "tsc && node build/server.js",
"start": "node build/server.js",
"type-check": "tsc --noEmit",
"prepare": "husky install"
"type-check": "tsc --noEmit"
},
"engines": {
"node": ">=18.0.0"
@@ -18,31 +20,53 @@
"license": "MIT",
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"axios": "^1.3.5",
"@aws-crypto/sha256-js": "^5.2.0",
"@huggingface/jinja": "^0.3.0",
"@node-rs/argon2": "^1.8.3",
"@smithy/eventstream-codec": "^2.1.3",
"@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.7.4",
"better-sqlite3": "^10.0.0",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1",
"cors": "^2.8.5",
"csrf-csrf": "^2.3.0",
"dotenv": "^16.0.3",
"ejs": "^3.1.9",
"express": "^4.18.2",
"dotenv": "^16.3.1",
"ejs": "^3.1.10",
"express": "^4.19.3",
"express-session": "^1.17.3",
"firebase-admin": "^11.10.1",
"firebase-admin": "^12.5.0",
"glob": "^10.3.12",
"googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1",
"http-proxy": "1.18.1",
"http-proxy-middleware": "^3.0.2",
"ipaddr.js": "^2.1.0",
"memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1",
"patch-package": "^8.0.0",
"pino": "^8.11.0",
"pino-http": "^8.3.3",
"sanitize-html": "^2.11.0",
"proxy-agent": "^6.4.0",
"sanitize-html": "^2.13.0",
"sharp": "^0.32.6",
"showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
"tiktoken": "^1.0.10",
"tinyws": "^0.1.0",
"uuid": "^9.0.0",
"zlib": "^1.0.5",
"zod": "^3.21.4"
"zod": "^3.22.3",
"zod-error": "^1.5.0"
},
"devDependencies": {
"@smithy/types": "^3.3.0",
"@types/better-sqlite3": "^7.6.10",
"@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13",
"@types/express": "^4.17.17",
@@ -51,17 +75,22 @@
"@types/node-schedule": "^2.1.0",
"@types/sanitize-html": "^2.9.0",
"@types/showdown": "^2.0.0",
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1",
"concurrently": "^8.0.1",
"esbuild": "^0.17.16",
"esbuild-register": "^3.4.2",
"husky": "^8.0.3",
"nodemon": "^3.0.1",
"source-map-support": "^0.5.21",
"pino-pretty": "^10.2.3",
"prettier": "^3.0.3",
"prettier-plugin-ejs": "^1.0.3",
"ts-node": "^10.9.1",
"typescript": "^5.0.4"
"typescript": "^5.4.2"
},
"overrides": {
"google-gax": "^3.6.1"
"node-fetch@2.x": {
"whatwg-url": "14.x"
}
}
}
+23
View File
@@ -0,0 +1,23 @@
# Patches
Contains monkey patches for certain packages, applied using `patch-package`.
## `http-proxy+1.18.1.patch`
Modifies the `http-proxy` package to work around an incompatibility with
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
when `socks-proxy-agent` is used instead of a generic http.Agent.
Modification involves adjusting the `buffer` property on ProxyServer's `options`
object to be a function that returns a stream instead of a stream itself. This
allows us to give it a function which produces a new Readable from the already-
parsed request body.
With the old implementation we would need to create an entirely new ProxyServer
instance for each request, which is not ideal under heavy load.
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
### See also
https://github.com/chimurai/http-proxy-middleware/issues/40
https://github.com/chimurai/http-proxy-middleware/issues/299
https://github.com/http-party/node-http-proxy/pull/1027
+13
View File
@@ -0,0 +1,13 @@
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
index 7ae7355..c825c27 100644
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
@@ -167,7 +167,7 @@ module.exports = {
}
}
- (options.buffer || req).pipe(proxyReq);
+ (options.buffer(req) || req).pipe(proxyReq);
proxyReq.on('response', function(proxyRes) {
if(server) { server.emit('proxyRes', proxyRes, req, res); }
+349
View File
@@ -0,0 +1,349 @@
/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
/* Document
========================================================================== */
/**
* 1. Correct the line height in all browsers.
* 2. Prevent adjustments of font size after orientation changes in iOS.
*/
html {
line-height: 1.15; /* 1 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/* Sections
========================================================================== */
/**
* Remove the margin in all browsers.
*/
body {
margin: 0;
}
/**
* Render the `main` element consistently in IE.
*/
main {
display: block;
}
/**
* Correct the font size and margin on `h1` elements within `section` and
* `article` contexts in Chrome, Firefox, and Safari.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/* Grouping content
========================================================================== */
/**
* 1. Add the correct box sizing in Firefox.
* 2. Show the overflow in Edge and IE.
*/
hr {
box-sizing: content-box; /* 1 */
height: 0; /* 1 */
overflow: visible; /* 2 */
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
pre {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/* Text-level semantics
========================================================================== */
/**
* Remove the gray background on active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* 1. Remove the bottom border in Chrome 57-
* 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
*/
abbr[title] {
border-bottom: none; /* 1 */
text-decoration: underline; /* 2 */
text-decoration: underline dotted; /* 2 */
}
/**
* Add the correct font weight in Chrome, Edge, and Safari.
*/
b,
strong {
font-weight: bolder;
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
code,
kbd,
samp {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/**
* Add the correct font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` elements from affecting the line height in
* all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sub {
bottom: -0.25em;
}
sup {
top: -0.5em;
}
/* Embedded content
========================================================================== */
/**
* Remove the border on images inside links in IE 10.
*/
img {
border-style: none;
}
/* Forms
========================================================================== */
/**
* 1. Change the font styles in all browsers.
* 2. Remove the margin in Firefox and Safari.
*/
button,
input,
optgroup,
select,
textarea {
font-family: inherit; /* 1 */
font-size: 100%; /* 1 */
line-height: 1.15; /* 1 */
margin: 0; /* 2 */
}
/**
* Show the overflow in IE.
* 1. Show the overflow in Edge.
*/
button,
input { /* 1 */
overflow: visible;
}
/**
* Remove the inheritance of text transform in Edge, Firefox, and IE.
* 1. Remove the inheritance of text transform in Firefox.
*/
button,
select { /* 1 */
text-transform: none;
}
/**
* Correct the inability to style clickable types in iOS and Safari.
*/
button,
[type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button;
}
/**
* Remove the inner border and padding in Firefox.
*/
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
border-style: none;
padding: 0;
}
/**
* Restore the focus styles unset by the previous rule.
*/
button:-moz-focusring,
[type="button"]:-moz-focusring,
[type="reset"]:-moz-focusring,
[type="submit"]:-moz-focusring {
outline: 1px dotted ButtonText;
}
/**
* Correct the padding in Firefox.
*/
fieldset {
padding: 0.35em 0.75em 0.625em;
}
/**
* 1. Correct the text wrapping in Edge and IE.
* 2. Correct the color inheritance from `fieldset` elements in IE.
* 3. Remove the padding so developers are not caught out when they zero out
* `fieldset` elements in all browsers.
*/
legend {
box-sizing: border-box; /* 1 */
color: inherit; /* 2 */
display: table; /* 1 */
max-width: 100%; /* 1 */
padding: 0; /* 3 */
white-space: normal; /* 1 */
}
/**
* Add the correct vertical alignment in Chrome, Firefox, and Opera.
*/
progress {
vertical-align: baseline;
}
/**
* Remove the default vertical scrollbar in IE 10+.
*/
textarea {
overflow: auto;
}
/**
* 1. Add the correct box sizing in IE 10.
* 2. Remove the padding in IE 10.
*/
[type="checkbox"],
[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Correct the cursor style of increment and decrement buttons in Chrome.
*/
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Correct the odd appearance in Chrome and Safari.
* 2. Correct the outline style in Safari.
*/
[type="search"] {
-webkit-appearance: textfield; /* 1 */
outline-offset: -2px; /* 2 */
}
/**
* Remove the inner padding in Chrome and Safari on macOS.
*/
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* 1. Correct the inability to style clickable types in iOS and Safari.
* 2. Change font properties to `inherit` in Safari.
*/
::-webkit-file-upload-button {
-webkit-appearance: button; /* 1 */
font: inherit; /* 2 */
}
/* Interactive
========================================================================== */
/*
* Add the correct display in Edge, IE 10+, and Firefox.
*/
details {
display: block;
}
/*
* Add the correct display in all browsers.
*/
summary {
display: list-item;
}
/* Misc
========================================================================== */
/**
* Add the correct display in IE 10+.
*/
template {
display: none;
}
/**
* Add the correct display in IE 10.
*/
[hidden] {
display: none;
}
+231
View File
@@ -0,0 +1,231 @@
/* modified https://github.com/oxalorg/sakura */
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #c9c9c9;
background-color: #222222;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0px;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: #ffffff;
}
a {
text-decoration: none;
color: #ffffff;
}
a:visited {
color: #e6e6e6;
}
a:hover {
color: #c9c9c9;
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0px;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0px;
margin-right: 0px;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid #ffffff;
margin-bottom: 2.5rem;
background-color: #4a4a4a;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0px;
margin-bottom: 2.5rem;
}
pre {
background-color: #4a4a4a;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0px;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #4a4a4a;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #4a4a4a;
}
input,
textarea {
border: 1px solid #c9c9c9;
}
input:focus,
textarea:focus {
border: 1px solid #ffffff;
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: #ffffff;
color: #222222;
border-radius: 1px;
border: 1px solid #ffffff;
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: #c9c9c9;
color: #222222;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #c9c9c9;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #4a4a4a;
border: 1px solid #4a4a4a;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid #ffffff;
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted #ffffff;
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
+237
View File
@@ -0,0 +1,237 @@
/* modified https://github.com/oxalorg/sakura */
:root {
--accent-color: #4a4a4a;
--accent-color-hover: #5a5a5a;
--link-color: #58739c;
--link-visted-color: #6f5e6f;
}
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #4a4a4a;
background-color: #f9f9f9;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: var(--accent-color);
}
a {
text-decoration: none;
color: var(--link-color);
}
a:visited {
color: var(--link-visted-color);
}
a:hover {
color: var(--accent-color-hover);
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0;
margin-right: 0;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid var(--accent-color);
margin-bottom: 2.5rem;
background-color: #f1f1f1;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0;
margin-bottom: 2.5rem;
}
pre {
background-color: #f1f1f1;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #f1f1f1;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #f1f1f1;
}
input,
textarea {
border: 1px solid #4a4a4a;
}
input:focus,
textarea:focus {
border: 1px solid var(--accent-color);
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: var(--accent-color);
color: #f9f9f9;
border-radius: 2px;
border: 1px solid var(--accent-color);
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: var(--accent-color-hover);
color: #f9f9f9;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #4a4a4a;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #f1f1f1;
border: 1px solid #f1f1f1;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid var(--accent-color);
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted var(--accent-color);
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
+120
View File
@@ -0,0 +1,120 @@
importScripts(
"https://cdn.jsdelivr.net/npm/hash-wasm@4.11.0/dist/argon2.umd.min.js"
);
let active = false;
let nonce = 0;
let signature = "";
let lastNotify = 0;
let hashesSinceLastNotify = 0;
let params = {
salt: null,
hashLength: 0,
iterations: 0,
memorySize: 0,
parallelism: 0,
targetValue: BigInt(0),
safariFix: false,
};
self.onmessage = async (event) => {
const { data } = event;
switch (data.type) {
case "stop":
active = false;
self.postMessage({ type: "paused", hashes: hashesSinceLastNotify });
return;
case "start":
active = true;
signature = data.signature;
nonce = data.nonce;
const c = data.challenge;
const salt = new Uint8Array(c.s.length / 2);
for (let i = 0; i < c.s.length; i += 2) {
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
}
params = {
salt: salt,
hashLength: c.hl,
iterations: c.t,
memorySize: c.m,
parallelism: c.p,
targetValue: BigInt(c.d.slice(0, -1)),
safariFix: data.isMobileWebkit,
};
console.log("Started", params);
self.postMessage({ type: "started" });
setTimeout(solve, 0);
break;
}
};
const doHash = async (password) => {
const { salt, hashLength, iterations, memorySize, parallelism } = params;
return await self.hashwasm.argon2id({
password,
salt,
hashLength,
iterations,
memorySize,
parallelism,
});
};
const checkHash = (hash) => {
const { targetValue } = params;
const hashValue = BigInt(`0x${hash}`);
return hashValue <= targetValue;
};
const solve = async () => {
if (!active) {
console.log("Stopped solver", nonce);
return;
}
// Safari WASM doesn't like multiple calls in one worker
const batchSize = 1;
const batch = [];
for (let i = 0; i < batchSize; i++) {
batch.push(nonce++);
}
try {
const results = await Promise.all(
batch.map(async (nonce) => {
const hash = await doHash(String(nonce));
return { hash, nonce };
})
);
hashesSinceLastNotify += batchSize;
const solution = results.find(({ hash }) => checkHash(hash));
if (solution) {
console.log("Solution found", solution, params.salt);
self.postMessage({ type: "solved", nonce: solution.nonce });
active = false;
} else {
if (Date.now() - lastNotify >= 500) {
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
lastNotify = Date.now();
hashesSinceLastNotify = 0;
}
setTimeout(solve, 10);
}
} catch (error) {
console.error("Error", error);
const stack = error.stack;
const debug = {
stack,
lastNonce: nonce,
targetValue: params.targetValue,
};
self.postMessage({ type: "error", error: error.message, debug });
active = false;
}
};
+39
View File
@@ -0,0 +1,39 @@
import Database from "better-sqlite3";
import { DATABASE_VERSION, migrateDatabase } from "../src/shared/database";
import { logger } from "../src/logger";
import { config } from "../src/config";
const log = logger.child({ module: "scripts/migrate" });
async function runMigration() {
let targetVersion = Number(process.argv[2]) || undefined;
if (!targetVersion) {
log.info("Enter target version or leave empty to use the latest version.");
process.stdin.resume();
process.stdin.setEncoding("utf8");
const input = await new Promise<string>((resolve) => {
process.stdin.on("data", (text) => {
resolve((String(text) || "").trim());
});
});
process.stdin.pause();
targetVersion = Number(input);
if (!targetVersion) {
targetVersion = DATABASE_VERSION;
}
}
const db = new Database(config.sqliteDataPath, {
verbose: (msg, ...args) => log.debug({ args }, String(msg)),
});
const currentVersion = db.pragma("user_version", { simple: true });
log.info({ currentVersion, targetVersion }, "Running migrations.");
migrateDatabase(targetVersion, db);
}
runMigration().catch((error) => {
log.error(error, "Migration failed.");
process.exit(1);
});
+309
View File
@@ -0,0 +1,309 @@
# OAI Reverse Proxy
###
# @name OpenAI -- Chat Completions
POST https://api.openai.com/v1/chat/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 30,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name OpenAI -- Text Completions
POST https://api.openai.com/v1/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 30,
"stream": false,
"prompt": "This is a test prompt where"
}
###
# @name OpenAI -- Create Embedding
POST https://api.openai.com/v1/embeddings
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name OpenAI -- Get Organizations
GET https://api.openai.com/v1/organizations
Authorization: Bearer {{oai-key-1}}
###
# @name OpenAI -- Get Models
GET https://api.openai.com/v1/models
Authorization: Bearer {{oai-key-1}}
###
# @name Azure OpenAI -- Chat Completions
POST https://{{azu-resource-name}}.openai.azure.com/openai/deployments/{{azu-deployment-id}}/chat/completions?api-version=2023-09-01-preview
api-key: {{azu-key-1}}
Content-Type: application/json
{
"max_tokens": 1,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name Proxy / OpenAI -- Get Models
GET {{proxy-host}}/proxy/openai/v1/models
Authorization: Bearer {{proxy-key}}
###
# @name Proxy / OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4-1106-preview",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 123,
"messages": [
{
"role": "user",
"content": "phrase one"
}
]
}
###
# @name Proxy / OpenAI -- Native Text Completions
POST {{proxy-host}}/proxy/openai/v1/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 20,
"temperature": 0,
"prompt": "Genshin Impact is a game about",
"stream": false
}
###
# @name Proxy / OpenAI -- Chat-to-Text API Translation
# Accepts a chat completion request and reformats it to work with the text completion API. `model` is ignored.
POST {{proxy-host}}/proxy/openai/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "I don't think that's right..."
}
]
}
###
# @name Proxy / OpenAI -- Create Embedding
POST {{proxy-host}}/proxy/openai/embeddings
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name Proxy / Anthropic -- Native Completion (old API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- Native Completion (2023-06-01 API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/anthropic/v1/chat/completions
Authorization: Bearer {{proxy-key}}
#anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 20,
"stream": false,
"temperature": 0,
"messages": [
{
"role": "user",
"content": "What is genshin impact"
}
]
}
###
# @name Proxy / AWS Claude -- Native Completion
POST {{proxy-host}}/proxy/aws/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / AWS Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/aws/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / GCP Claude -- Native Completion
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 2,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "That's not right."
}
]
}
###
# @name Proxy / Google AI -- OpenAI-to-Google AI API Translation
POST {{proxy-host}}/proxy/google-ai/v1/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 42,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
}
]
}
+102
View File
@@ -0,0 +1,102 @@
import Database from "better-sqlite3";
import { v4 as uuidv4 } from "uuid";
import { config } from "../src/config";
function generateRandomIP() {
return (
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255)
);
}
function generateRandomDate() {
const end = new Date();
const start = new Date(end);
start.setDate(end.getDate() - 90);
const randomDate = new Date(
start.getTime() + Math.random() * (end.getTime() - start.getTime())
);
return randomDate.toISOString();
}
function generateMockSHA256() {
const characters = 'abcdef0123456789';
let hash = '';
for (let i = 0; i < 64; i++) {
const randomIndex = Math.floor(Math.random() * characters.length);
hash += characters[randomIndex];
}
return hash;
}
function getRandomModelFamily() {
const modelFamilies = [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"aws-claude-opus",
"gcp-claude",
"gcp-claude-opus",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"dall-e",
"azure-dall-e",
];
return modelFamilies[Math.floor(Math.random() * modelFamilies.length)];
}
(async () => {
const db = new Database(config.sqliteDataPath);
const numRows = 100;
const insertStatement = db.prepare(`
INSERT INTO events (type, ip, date, model, family, hashes, userToken, inputTokens, outputTokens)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`);
const users = Array.from({ length: 10 }, () => uuidv4());
function getRandomUser() {
return users[Math.floor(Math.random() * users.length)];
}
const transaction = db.transaction(() => {
for (let i = 0; i < numRows; i++) {
insertStatement.run(
"chat_completion",
generateRandomIP(),
generateRandomDate(),
getRandomModelFamily() + "-" + Math.floor(Math.random() * 100),
getRandomModelFamily(),
Array.from(
{ length: Math.floor(Math.random() * 10) },
generateMockSHA256
).join(","),
getRandomUser(),
Math.floor(Math.random() * 500),
Math.floor(Math.random() * 6000)
);
}
});
transaction();
console.log(`Inserted ${numRows} rows into the events table.`);
db.close();
})();
+118
View File
@@ -0,0 +1,118 @@
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
import axios from "axios";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
// Copied from amazon bedrock docs
// List models
// ListFoundationModels
// Service: Amazon Bedrock
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
// Bedrock User Guide.
// Request Syntax
// GET /foundation-models?
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
// HTTP/1.1
// URI Request Parameters
// The request uses the following URI parameters.
// byCustomizationType (p. 38)
// List by customization type.
// Valid Values: FINE_TUNING
// byInferenceType (p. 38)
// List by inference type.
// Valid Values: ON_DEMAND | PROVISIONED
// byOutputModality (p. 38)
// List by output modality type.
// Valid Values: TEXT | IMAGE | EMBEDDING
// byProvider (p. 38)
// A Bedrock model provider.
// Pattern: ^[a-z0-9-]{1,63}$
// Request Body
// The request does not have a request body
// Run inference on a text model
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
// parameter to accept any content type in the response.
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
// -H accept: */*
// -H content-type: application/json
// Payload
// {"inputText": "Hello world"}
// Example response
// Response for the above request.
// -H content-type: application/json
// Payload
// <the model response>
const AMZ_REGION = "us-east-1";
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
async function listModels() {
const httpRequest = new HttpRequest({
method: "GET",
protocol: "https:",
hostname: AMZ_HOST,
path: "/foundation-models",
headers: { ["Host"]: AMZ_HOST },
});
const signedRequest = await signRequest(httpRequest);
const response = await axios.get(
`https://${signedRequest.hostname}${signedRequest.path}`,
{ headers: signedRequest.headers }
);
console.log(response.data);
}
async function invokeModel() {
const model = "anthropic.claude-v1";
const httpRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: AMZ_HOST,
path: `/model/${model}/invoke`,
headers: {
["Host"]: AMZ_HOST,
["accept"]: "*/*",
["content-type"]: "application/json",
},
body: JSON.stringify({
temperature: 0.5,
prompt: "\n\nHuman:Hello world\n\nAssistant:",
max_tokens_to_sample: 10,
}),
});
console.log("httpRequest", httpRequest);
const signedRequest = await signRequest(httpRequest);
const response = await axios.post(
`https://${signedRequest.hostname}${signedRequest.path}`,
signedRequest.body,
{ headers: signedRequest.headers }
);
console.log(response.status);
console.log(response.headers);
console.log(response.data);
console.log("full url", response.request.res.responseUrl);
}
async function signRequest(request: HttpRequest) {
const signer = new SignatureV4({
sha256: Sha256,
credentials: {
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY,
},
region: AMZ_REGION,
service: "bedrock",
});
return await signer.sign(request, { signingDate: new Date() });
}
// listModels();
// invokeModel();
+45
View File
@@ -0,0 +1,45 @@
const axios = require("axios");
const concurrentRequests = 75;
const headers = {
Authorization: "Bearer test",
"Content-Type": "application/json",
};
const payload = {
model: "gpt-4",
max_tokens: 1,
stream: false,
messages: [{ role: "user", content: "Hi" }],
};
const makeRequest = async (i) => {
try {
const response = await axios.post(
"http://localhost:7860/proxy/google-ai/v1/chat/completions",
payload,
{ headers }
);
console.log(
`Req ${i} finished with status code ${response.status} and response:`,
response.data
);
} catch (error) {
const msg = error.response
console.error(`Error in req ${i}:`, error.message, msg || "");
}
};
const executeRequestsConcurrently = () => {
const promises = [];
for (let i = 1; i <= concurrentRequests; i++) {
console.log(`Starting request ${i}`);
promises.push(makeRequest(i));
}
Promise.all(promises).then(() => {
console.log("All requests finished");
});
};
executeRequestsConcurrently();
+53
View File
@@ -0,0 +1,53 @@
const axios = require("axios");
function randomInteger(max) {
return Math.floor(Math.random() * max + 1);
}
async function testQueue() {
const requests = Array(10).fill(undefined).map(async function() {
const maxTokens = randomInteger(2000);
const headers = {
"Authorization": "Bearer test",
"Content-Type": "application/json",
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
};
const payload = {
model: "gpt-4o-mini-2024-07-18",
max_tokens: 20 + maxTokens,
stream: false,
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
temperature: 0,
};
try {
const response = await axios.post(
"http://localhost:7860/proxy/openai/v1/chat/completions",
payload,
{ headers }
);
if (response.status !== 200) {
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
return;
}
const content = response.data.choices[0].message.content;
console.log(
`Request ${maxTokens} `,
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
);
} catch (error) {
const msg = error.response;
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
}
});
await Promise.all(requests);
console.log("All requests finished");
}
testQueue();
+49
View File
@@ -0,0 +1,49 @@
import { Router } from "express";
import { z } from "zod";
import { encodeCursor, decodeCursor } from "../../shared/utils";
import { eventsRepo } from "../../shared/database/repos/event";
const router = Router();
/**
* Returns events for the given user token.
* GET /admin/events/:token
* @query first - The number of events to return.
* @query after - The cursor to start returning events from (exclusive).
*/
router.get("/:token", (req, res) => {
const schema = z.object({
token: z.string(),
first: z.coerce.number().int().positive().max(200).default(25),
after: z
.string()
.optional()
.transform((v) => {
try {
return decodeCursor(v);
} catch {
return null;
}
})
.nullable(),
sort: z.string().optional(),
});
const args = schema.safeParse({ ...req.params, ...req.query });
if (!args.success) {
return res.status(400).json({ error: args.error });
}
const data = eventsRepo
.getUserEvents(args.data.token, {
limit: args.data.first,
cursor: args.data.after,
})
.map((e) => ({ node: e, cursor: encodeCursor(e.date) }));
res.json({
data,
endCursor: data[data.length - 1]?.cursor,
});
});
export { router as eventsApiRouter };
+26 -4
View File
@@ -1,15 +1,31 @@
import express, { Router } from "express";
import { authorize } from "./auth";
import { createWhitelistMiddleware } from "../shared/cidr";
import { HttpError } from "../shared/errors";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { injectLocals } from "../shared/inject-locals";
import { withSession } from "../shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { config } from "../config";
import { renderPage } from "../info-page";
import { buildInfo } from "../service-info";
import { authorize } from "./auth";
import { loginRouter } from "./login";
import { usersApiRouter as apiRouter } from "./api/users";
import { eventsApiRouter } from "./api/events";
import { usersApiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
import { logger } from "../logger";
const adminRouter = Router();
const whitelist = createWhitelistMiddleware(
"ADMIN_WHITELIST",
config.adminWhitelist
);
if (!whitelist.ranges.length && config.adminKey?.length) {
logger.error("ADMIN_WHITELIST is empty. No admin requests will be allowed. Set 0.0.0.0/0 to allow all.");
}
adminRouter.use(whitelist);
adminRouter.use(
express.json({ limit: "20mb" }),
express.urlencoded({ extended: true, limit: "20mb" })
@@ -17,12 +33,18 @@ adminRouter.use(
adminRouter.use(withSession);
adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
adminRouter.use("/", loginRouter);
adminRouter.use("/manage", authorize({ via: "cookie" }), webRouter);
adminRouter.use("/service-info", authorize({ via: "cookie" }), (req, res) => {
return res.send(
renderPage(buildInfo(req.protocol + "://" + req.get("host"), true))
);
});
adminRouter.use(
(
+212 -7
View File
@@ -1,4 +1,5 @@
import { Router } from "express";
import ipaddr from "ipaddr.js";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
@@ -6,7 +7,7 @@ import { HttpError } from "../../shared/errors";
import * as userStore from "../../shared/users/user-store";
import { parseSort, sortBy, paginate } from "../../shared/utils";
import { keyPool } from "../../shared/key-management";
import { MODEL_FAMILIES } from "../../shared/models";
import { LLMService, MODEL_FAMILIES } from "../../shared/models";
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
import {
User,
@@ -14,6 +15,9 @@ import {
UserSchema,
UserTokenCounts,
} from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
import { invalidatePowChallenges } from "../../user/web/pow-captcha";
const router = Router();
@@ -39,6 +43,74 @@ router.get("/create-user", (req, res) => {
});
});
router.get("/anti-abuse", (_req, res) => {
const wl = [...whitelists.entries()];
const bl = [...blacklists.entries()];
res.render("admin_anti-abuse", {
captchaMode: config.captchaMode,
difficulty: config.powDifficultyLevel,
whitelists: wl.map((w) => ({
name: w[0],
mode: "whitelist",
ranges: w[1].ranges,
})),
blacklists: bl.map((b) => ({
name: b[0],
mode: "blacklist",
ranges: b[1].ranges,
})),
});
});
router.post("/cidr", (req, res) => {
const body = req.body;
const valid = z
.object({
action: z.enum(["add", "remove"]),
mode: z.enum(["whitelist", "blacklist"]),
name: z.string().min(1),
mask: z.string().min(1),
})
.safeParse(body);
if (!valid.success) {
throw new HttpError(
400,
valid.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
const { mode, name, mask } = valid.data;
const list = (mode === "whitelist" ? whitelists : blacklists).get(name);
if (!list) {
throw new HttpError(404, "List not found");
}
if (valid.data.action === "remove") {
const newRanges = new Set(list.ranges);
newRanges.delete(mask);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
} else if (valid.data.action === "add") {
const result = parseCidrs(mask);
if (result.length === 0) {
throw new HttpError(400, "Invalid CIDR mask");
}
const newRanges = new Set([...list.ranges, mask]);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
}
});
router.post("/create-user", (req, res) => {
const body = req.body;
@@ -196,13 +268,21 @@ router.post("/maintenance", (req, res) => {
let flash = { type: "", message: "" };
switch (action) {
case "recheck": {
keyPool.recheck("openai");
keyPool.recheck("anthropic");
const size = keyPool
const checkable: LLMService[] = [
"openai",
"anthropic",
"aws",
"gcp",
"azure",
"google-ai"
];
checkable.forEach((s) => keyPool.recheck(s));
const keyCount = keyPool
.list()
.filter((k) => k.service !== "google-palm").length;
.filter((k) => checkable.includes(k.service)).length;
flash.type = "success";
flash.message = `Scheduled recheck of ${size} keys for OpenAI and Anthropic.`;
flash.message = `Scheduled recheck of ${keyCount} keys.`;
break;
}
case "resetQuotas": {
@@ -220,14 +300,139 @@ router.post("/maintenance", (req, res) => {
flash.message = `All users' token usage records reset.`;
break;
}
case "downloadImageMetadata": {
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
generations: getLastNImages(),
},
null,
2
);
res.setHeader(
"Content-Disposition",
`attachment; filename=image-metadata-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
case "expireTempTokens": {
const users = userStore.getUsers();
const temps = users.filter((u) => u.type === "temporary");
temps.forEach((user) => {
user.expiresAt = Date.now();
user.disabledReason = "Admin forced expiration.";
userStore.upsertUser(user);
});
invalidatePowChallenges();
flash.type = "success";
flash.message = `${temps.length} temporary users marked for expiration.`;
break;
}
case "cleanTempTokens": {
const users = userStore.getUsers();
const disabledTempUsers = users.filter(
(u) => u.type === "temporary" && u.expiresAt && u.expiresAt < Date.now()
);
disabledTempUsers.forEach((user) => {
user.disabledAt = 1; //will be cleaned up by the next cron job
userStore.upsertUser(user);
});
flash.type = "success";
flash.message = `${disabledTempUsers.length} disabled temporary users marked for cleanup.`;
break;
}
case "setDifficulty": {
const selected = req.body["pow-difficulty"];
const valid = ["low", "medium", "high", "extreme"];
const isNumber = Number.isInteger(Number(selected));
if (!selected || !valid.includes(selected) && !isNumber) {
throw new HttpError(400, "Invalid difficulty " + selected);
}
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
invalidatePowChallenges();
break;
}
case "generateTempIpReport": {
const tempUsers = userStore
.getUsers()
.filter((u) => u.type === "temporary");
const ipv4RangeMap = new Map<string, Set<string>>();
const ipv6RangeMap = new Map<string, Set<string>>();
tempUsers.forEach((u) => {
u.ip.forEach((ip) => {
try {
const parsed = ipaddr.parse(ip);
if (parsed.kind() === "ipv4") {
const subnet =
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
".0/24";
const userSet = ipv4RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv4RangeMap.set(subnet, userSet);
} else if (parsed.kind() === "ipv6") {
const subnet =
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
"::/48";
const userSet = ipv6RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv6RangeMap.set(subnet, userSet);
}
} catch (e) {
req.log.warn(
{ ip, error: e.message },
"Invalid IP address; skipping"
);
}
});
});
const ipv4Ranges = Array.from(ipv4RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => b.distinctTokens - a.distinctTokens);
const ipv6Ranges = Array.from(ipv6RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => {
if (a.distinctTokens === b.distinctTokens) {
return a.subnet.localeCompare(b.subnet);
}
return b.distinctTokens - a.distinctTokens;
});
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
ipv4Ranges,
ipv6Ranges,
},
null,
2
);
res.setHeader(
"Content-Disposition",
`attachment; filename=temp-ip-report-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
default: {
throw new HttpError(400, "Invalid action");
}
}
req.session.flash = flash;
const referer = req.get("referer");
return res.redirect(`/admin/manage`);
return res.redirect(referer || "/admin/manage");
});
router.get("/download-stats", (_req, res) => {
+160
View File
@@ -0,0 +1,160 @@
<%- include("partials/shared_header", { title: "Proof of Work Verification Settings - OAI Reverse Proxy Admin" }) %>
<style>
details {
margin-top: 1em;
}
details summary {
font-weight: bold;
cursor: pointer;
}
details p {
margin-left: 1em;
}
#token-manage {
display: flex;
width: 100%;
}
#token-manage button {
flex-grow: 1;
margin: 0 0.5em;
}
</style>
<h1>Abuse Mitigation Settings</h1>
<div>
<h2>Proof-of-Work Verification</h2>
<p>
The Proof-of-Work difficulty level is used to determine how much work a client must perform to earn a temporary user
token. Higher difficulty levels require more work, which can help mitigate abuse by making it more expensive for
attackers to generate tokens. However, higher difficulty levels can also make it more difficult for legitimate users
to generate tokens. Refer to documentation for guidance.
</p>
<%if (captchaMode === "none") { %>
<p>
<strong>PoW verification is not enabled. Set <code>CAPTCHA_MODE=proof_of_work</code> to enable.</strong>
</p>
<% } else { %>
<h3>Difficulty Level</h3>
<div>
<label for="difficulty">Difficulty Level:</label>
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
<option value="low">Low</option>
<option value="medium">Medium</option>
<option value="high">High</option>
<option value="extreme">Extreme</option>
<option value="custom">Custom</option>
</select>
<div id="custom-difficulty-container" style="display: none">
<label for="customDifficulty">Hashes required (average):</label>
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
</div>
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
</div>
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
<% } %>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<input id="hiddenDifficulty" type="hidden" name="pow-difficulty" value="" />
</form>
<h3>Manage Temporary User Tokens</h3>
<div id="token-manage">
<p><button onclick='doAction("expireTempTokens")'>🕒 Expire All Temp Tokens</button></p>
<p><button onclick='doAction("cleanTempTokens")'>🧹 Delete Expired Temp Tokens</button></p>
<p><button onclick='doAction("generateTempIpReport")'>📊 Generate Temp Token IP Report</button></p>
</div>
</div>
<div>
<h2>IP Whitelists and Blacklists</h2>
<p>
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
addresses or
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
supported but not recommended for use with the current version of the proxy.
</p>
<p>
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
you can copy the values to your deployment configuration.
</p>
<% for (let i = 0; i < whitelists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
<% } %>
<% for (let i = 0; i < blacklists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: blacklists[i] }) %>
<% } %>
<form action="/admin/manage/cidr" method="post" id="cidrForm">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input type="hidden" name="action" value="add" />
<input type="hidden" name="name" value="" />
<input type="hidden" name="mode" value="" />
<input type="hidden" name="mask" value="" />
</form>
<details>
<summary>Copy environment variables</summary>
<p>
If you have made changes with the UI, you can copy the values below to your deployment configuration to persist
them across server restarts.
</p>
<pre>
<% for (let i = 0; i < whitelists.length; i++) { %><%= whitelists[i].name %>=<%= whitelists[i].ranges.join(",") %><% } %>
<% for (let i = 0; i < blacklists.length; i++) { %><%= blacklists[i].name %>=<%= blacklists[i].ranges.join(",") %><% } %>
</pre>
</details>
</div>
<script>
function difficultyChanged(event) {
const value = event.target.value;
if (value === "custom") {
document.getElementById("custom-difficulty-container").style.display = "block";
} else {
document.getElementById("custom-difficulty-container").style.display = "none";
}
}
function doAction(action) {
document.getElementById("hiddenAction").value = action;
if (action === "setDifficulty") {
const selected = document.getElementById("difficulty").value;
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
if (selected === "custom") {
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
} else {
hiddenDifficulty.value = selected;
}
}
document.getElementById("maintenanceForm").submit();
}
function onAddCidr(event) {
const list = event.target.dataset;
const newMask = prompt("Enter the IP or CIDR range to add to the list:");
if (!newMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "add";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = newMask;
form.submit();
}
function onRemoveCidr(event) {
const list = event.target.dataset;
const removeMask = event.target.dataset.mask;
if (!removeMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "remove";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = removeMask;
form.submit();
}
</script>
<%- include("partials/admin-footer") %>
+2 -3
View File
@@ -51,9 +51,8 @@
<legend>Temporary User Options</legend>
<div class="temporary-user-fieldset">
<p class="full-width">
Temporary users will be disabled after the specified duration, and their records will be deleted 72 hours after that.
These options apply only to new
temporary users; existing ones use whatever options were in effect when they were created.
Temporary users will be disabled after the specified duration, and their records will be permanently deleted after some time.
These options apply only to new temporary users; existing ones use whatever options were in effect when they were created.
</p>
<label for="temporaryUserDuration" class="full-width">Access duration (in minutes)</label>
<input type="number" name="temporaryUserDuration" id="temporaryUserDuration" value="60" class="full-width" />
+27 -36
View File
@@ -5,18 +5,6 @@
flex-direction: column;
}
#statsForm div {
display: flex;
flex-direction: row;
margin-bottom: 0.5em;
}
#statsForm div label {
width: 6em;
text-align: right;
margin-right: 1em;
}
#statsForm ul {
margin: 0;
padding-left: 2em;
@@ -33,17 +21,17 @@
}
</style>
<h1>Download Stats</h1>
<p>
Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.
</p>
<p>Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.</p>
<div>
<h3>Options</h3>
<form id="statsForm" action="/admin/manage/generate-stats" method="post"
style="display: flex; flex-direction: column;">
<form
id="statsForm"
action="/admin/manage/generate-stats"
method="post"
style="display: flex; flex-direction: column">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<div>
<label for="anon">Anonymize</label>
<input id="anon" type="checkbox" name="anon" value="true" />
<label for="anon"><input id="anon" type="checkbox" name="anon" value="true" /> <span>Anonymize</span></label>
</div>
<div>
<label for="sort">Sort</label>
@@ -64,11 +52,12 @@
</select>
</div>
<div>
<label for="format">Custom Format <ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul></label>
<label for="format">Custom Format</label>
<ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul>
<textarea id="format" name="format" rows="10" cols="50" placeholder="{{stats}}">
# Stats
{{header}}
@@ -115,33 +104,35 @@
loadDefaults();
async function fetchAndCopy() {
const form = document.getElementById('statsForm');
const form = document.getElementById("statsForm");
const formData = new FormData(form);
const response = await fetch(form.action, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
credentials: 'same-origin',
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
credentials: "same-origin",
body: new URLSearchParams(formData),
});
if (response.ok) {
const content = await response.text();
copyToClipboard(content);
} else {
throw new Error('Failed to fetch generated stats. Try reloading the page.');
throw new Error("Failed to fetch generated stats. Try reloading the page.");
}
}
function copyToClipboard(text) {
navigator.clipboard.writeText(text).then(() => {
alert('Copied to clipboard');
}).catch(err => {
alert('Failed to copy to clipboard. Try downloading the file instead.');
});
navigator.clipboard
.writeText(text)
.then(() => {
alert("Copied to clipboard");
})
.catch((err) => {
alert("Failed to copy to clipboard. Try downloading the file instead.");
});
}
document.getElementById('copyButton').addEventListener('click', fetchAndCopy);
document.getElementById("copyButton").addEventListener("click", fetchAndCopy);
</script>
<%- include("partials/admin-footer") %>
+17 -2
View File
@@ -1,5 +1,11 @@
<%- include("partials/shared_header", { title: "OAI Reverse Proxy Admin" }) %>
<h1>OAI Reverse Proxy Admin</h1>
<% if (!usersEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>🚨 <code>user_token</code> gatekeeper is not enabled.</strong><br />
<br />None of the user management features will do anything.
</p>
<% } %>
<% if (!persistenceEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>⚠️ Users will be lost when the server restarts because persistence is not configured.</strong><br />
@@ -19,12 +25,14 @@
<li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li>
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
<li><a href="/admin/manage/anti-abuse">Abuse Mitigation Settings</a></li>
<li><a href="/admin/service-info">Service Info</a></li>
</ul>
<h3>Maintenance</h3>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<div display="flex" flex-direction="column">
<div>
<fieldset>
<legend>Key Recheck</legend>
<button id="recheck-keys" type="button" onclick="submitForm('recheck')">Force Key Recheck</button>
@@ -35,7 +43,7 @@
<legend>Bulk Quota Management</legend>
<p>
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
Immediately refreshes all users' quotas by the configured amounts.
</p>
<p>
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
@@ -43,6 +51,13 @@
</p>
</fieldset>
<% } %>
<% if (imageGenerationEnabled) { %>
<fieldset>
<legend>Image Generation</legend>
<button id="download-image-metadata" type="button" onclick="submitForm('downloadImageMetadata')">Download Image Metadata</button>
<label for="download-image-metadata">Downloads a metadata file containing URL, prompt, and truncated user token for all cached images.</label>
</fieldset>
<% } %>
</div>
</form>
+2 -3
View File
@@ -4,9 +4,8 @@
<% if (users.length === 0) { %>
<p>No users found.</p>
<% } else { %>
<input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
<label for="toggle-nicknames">Show Nicknames</label>
<table>
<label for="toggle-nicknames"><input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" /> Show Nicknames</label>
<table class="striped full-width">
<thead>
<tr>
<th>User</th>
+33 -14
View File
@@ -55,8 +55,9 @@
<td><%- user.disabledReason %></td>
<% if (user.disabledAt) { %>
<td class="actions">
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason"
data-token="<%= user.token %>">✏️</a>
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason" data-token="<%= user.token %>"
>✏️</a
>
</td>
<% } %>
</tr>
@@ -72,7 +73,8 @@
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: false }) %></td>
</tr>
<tr>
<th scope="row">Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
<th scope="row">
Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
</th>
<td><%- user.adminNote ?? "none" %></td>
<td class="actions">
@@ -85,14 +87,24 @@
<td colspan="2"><%- user.expiresAt %></td>
</tr>
<% } %>
<% if (user.meta) { %>
<tr>
<th scope="row">Meta</th>
<td colspan="2"><%- JSON.stringify(user.meta) %></td>
</tr>
<% } %>
</tbody>
</table>
<form style="display:none" id="current-values">
<form style="display: none" id="current-values">
<input type="hidden" name="token" value="<%- user.token %>" />
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
<% }); %>
<!-- tokenRefresh_ keys are dynamically generated -->
<% Object.entries(quota).forEach(([family]) => { %>
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
<% }); %>
</form>
<h3>Quota Information</h3>
@@ -102,7 +114,8 @@
<input type="hidden" name="_csrf" value="<%- csrfToken %>" />
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
</form>
<% } %> <%- include("partials/shared_quota-info", { quota, user }) %>
<% } %>
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
<p><a href="/admin/manage/list-users">Back to User List</a></p>
@@ -113,18 +126,25 @@
const token = a.dataset.token;
const field = a.dataset.field;
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
let value = prompt(`Enter new value for '${field}'':`, existingValue);
let value = prompt(`Enter new value for '${field}':`, existingValue);
if (value !== null) {
if (value === "") {
value = null;
}
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
if (field.startsWith("tokenRefresh_")) {
const family = field.slice("tokenRefresh_".length);
payload.tokenRefresh = { [family]: Number(value) };
} else {
payload[field] = value;
}
fetch(`/admin/manage/edit-user/${token}`, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify({
[field]: value,
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
}),
body: JSON.stringify(payload),
headers: { "Content-Type": "application/json", Accept: "application/json" },
})
.then((res) => Promise.all([res.ok, res.json()]))
@@ -132,9 +152,7 @@
const url = new URL(window.location.href);
const params = new URLSearchParams();
if (!ok) {
params.set("flash", `error: ${json.error.message}`);
} else {
params.set("flash", `success: User's ${field} updated.`);
alert(`Failed to edit user: ${json.message}`);
}
url.search = params.toString();
window.location.assign(url);
@@ -144,4 +162,5 @@
});
</script>
<%- include("partials/admin-ban-xhr-script") %> <%- include("partials/admin-footer") %>
<%- include("partials/admin-ban-xhr-script") %>
<%- include("partials/admin-footer") %>
@@ -0,0 +1,13 @@
<h3>
<%= list.name %>
(<%= list.mode %>)
</h3>
<ul>
<% list.ranges.forEach(function(mask) { %>
<li>
<%= mask %>
<button class="remove" data-mode="<%= list.mode %>" data-name="<%= list.name %>" data-mask="<%= mask %>" onclick="onRemoveCidr(event)">Remove</button>
</li>
<% }); %>
</ul>
<button class="add" data-mode="<%= list.mode %>" data-name="<%= list.name %>" onclick="onAddCidr(event)">Add</button>
+511 -83
View File
@@ -1,25 +1,68 @@
import crypto from "crypto";
import dotenv from "dotenv";
import type firebase from "firebase-admin";
import path from "path";
import pino from "pino";
import type { ModelFamily } from "./shared/models";
import type { LLMService, ModelFamily } from "./shared/models";
import { MODEL_FAMILIES } from "./shared/models";
dotenv.config();
// Can't import the usual logger here because it itself needs the config.
const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
const isDev = process.env.NODE_ENV !== "production";
type PromptLoggingBackend = "google_sheets";
export const DATA_DIR = path.join(__dirname, "..", "data");
export const USER_ASSETS_DIR = path.join(DATA_DIR, "user-files");
type Config = {
/** The port the proxy server will listen on. */
port: number;
/** The network interface the proxy server will listen on. */
bindAddress: string;
/** Comma-delimited list of OpenAI API keys. */
openaiKey?: string;
/** Comma-delimited list of Anthropic API keys. */
anthropicKey?: string;
/** Comma-delimited list of Google PaLM API keys. */
googlePalmKey?: string;
/**
* Comma-delimited list of Google AI API keys. Note that these are not the
* same as the GCP keys/credentials used for Vertex AI; the models are the
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/**
* Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region.
*
* The credentials must have access to the actions `bedrock:InvokeModel` and
* `bedrock:InvokeModelWithResponseStream`. You must also have already
* provisioned the necessary models in your AWS account, on the specific
* regions specified for each credential. Models are region-specific.
*
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/
awsCredentials?: string;
/**
* Comma-delimited list of GCP credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and GCP region.
*
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
*/
gcpCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
* API key.
*
* The resource name is the subdomain in your Azure OpenAI deployment's URL,
* e.g. `https://resource-name.openai.azure.com
*
* @example `AZURE_CREDENTIALS=resource_name_1:deployment_id_1:api_key_1,resource_name_2:deployment_id_2:api_key_2`
*/
azureCredentials?: string;
/**
* The proxy key to require for requests. Only applicable if the user
* management mode is set to 'proxy_key', and required if so.
@@ -30,6 +73,11 @@ type Config = {
* management mode is set to 'user_token'.
*/
adminKey?: string;
/**
* The password required to view the service info/status page. If not set, the
* info page will be publicly accessible.
*/
serviceInfoPassword?: string;
/**
* Which user management mode to use.
* - `none`: No user management. Proxy is open to all requests with basic
@@ -57,13 +105,81 @@ type Config = {
*/
firebaseKey?: string;
/**
* Maximum number of IPs per user, after which their token is disabled.
* Maximum number of IPs allowed per user token.
* Users with the manually-assigned `special` role are exempt from this limit.
* - Defaults to 0, which means that users are not IP-limited.
*/
maxIpsPerUser: number;
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */
modelRateLimit: number;
/**
* Whether a user token should be automatically disabled if it exceeds the
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
*/
maxIpsAutoBan: boolean;
/**
* Which captcha verification mode to use. Requires `user_token` gatekeeper.
* Allows users to automatically obtain a token by solving a captcha.
* - `none`: No captcha verification; tokens are issued manually.
* - `proof_of_work`: Users must solve an Argon2 proof of work to obtain a
* temporary usertoken valid for a limited period.
*/
captchaMode: "none" | "proof_of_work";
/**
* Duration (in hours) for which a PoW-issued temporary user token is valid.
*/
powTokenHours: number;
/**
* The maximum number of IPs from which a single temporary user token can be
* used. Upon reaching the limit, the `maxIpsAutoBan` behavior is triggered.
*/
powTokenMaxIps: number;
/**
* Difficulty level for the proof-of-work challenge.
* - `low`: 200 iterations
* - `medium`: 900 iterations
* - `high`: 1900 iterations
* - `extreme`: 4000 iterations
* - `number`: A custom number of iterations to use.
*
* Difficulty level only affects the number of iterations used in the PoW,
* not the complexity of the hash itself. Therefore, the average time-to-solve
* will scale linearly with the number of iterations.
*
* Refer to docs/proof-of-work.md for guidance and hashrate benchmarks.
*/
powDifficultyLevel: "low" | "medium" | "high" | "extreme" | number;
/**
* Duration (in minutes) before a PoW challenge expires. Users' browsers must
* solve the challenge within this time frame or it will be rejected. Should
* be kept somewhat low to prevent abusive clients from working on many
* challenges in parallel, but you may need to increase this value for higher
* difficulty levels or older devices will not be able to solve the challenge
* in time.
*
* Defaults to 30 minutes.
*/
powChallengeTimeout: number;
/**
* Duration (in hours) before expired temporary user tokens are purged from
* the user database. Users can refresh expired tokens by solving a faster PoW
* challenge as long as the original token has not been purged. Once purged,
* the user must solve a full PoW challenge to obtain a new token.
*
* Defaults to 48 hours. At 0, tokens are purged immediately upon expiry.
*/
powTokenPurgeHours: number;
/**
* Maximum number of active temporary user tokens that can be associated with
* a single IP address. Note that this may impact users sending requests from
* hosted AI chat clients such as Agnaistic or RisuAI, as they may share IPs.
*
* When the limit is reached, the oldest token with the same IP will be
* expired. At 0, no limit is enforced. Defaults to 0.
*/
// powMaxTokensPerIp: number;
/** Per-user limit for requests per minute to text and chat models. */
textModelRateLimit: number;
/** Per-user limit for requests per minute to image generation models. */
imageModelRateLimit: number;
/**
* For OpenAI, the maximum number of context tokens (prompt + max output) a
* user can request before their request is rejected.
@@ -82,16 +198,55 @@ type Config = {
maxOutputTokensOpenAI: number;
/** For Anthropic, the maximum number of sampled tokens a user can request. */
maxOutputTokensAnthropic: number;
/** Whether requests containing disallowed characters should be rejected. */
rejectDisallowed?: boolean;
/** Whether requests containing the following phrases should be rejected. */
rejectPhrases: string[];
/** Message to return when rejecting requests. */
rejectMessage?: string;
rejectMessage: string;
/** Verbosity level of diagnostic logging. */
logLevel: "trace" | "debug" | "info" | "warn" | "error";
/**
* Whether to allow the usage of AWS credentials which could be logging users'
* model invocations. By default, such keys are treated as if they were
* disabled because users may not be aware that their usage is being logged.
*
* Some credentials do not have the policy attached that allows the proxy to
* confirm logging status, in which case the proxy assumes that logging could
* be enabled and will refuse to use the key. If you still want to use such a
* key and can't attach the policy, you can set this to true.
*/
allowAwsLogging?: boolean;
/**
* Path to the SQLite database file for storing data such as event logs. By
* default, the database will be stored at `data/database.sqlite`.
*
* Ensure target is writable by the server process, and be careful not to
* select a path that is served publicly. The default path is safe.
*/
sqliteDataPath?: string;
/**
* Whether to log events, such as generated completions, to the database.
* Events are associated with IP+user token pairs. If user_token mode is
* disabled, no events will be logged.
*
* Currently there is no pruning mechanism for the events table, so it will
* grow indefinitely. You may want to periodically prune the table manually.
*/
eventLogging?: boolean;
/**
* When hashing prompt histories, how many messages to trim from the end.
* If zero, only the full prompt hash will be stored.
* If greater than zero, for each number N, a hash of the prompt with the
* last N messages removed will be stored.
*
* Experimental function, config may change in future versions.
*/
eventLoggingTrim?: number;
/** Whether prompts and responses should be logged to persistent storage. */
promptLogging?: boolean;
/** Which prompt logging backend to use. */
promptLoggingBackend?: PromptLoggingBackend;
promptLoggingBackend?: "google_sheets" | "file";
/** Prefix for prompt logging files when using the file backend. */
promptLoggingFilePrefix?: string;
/** Base64-encoded Google Sheets API key. */
googleSheetsKey?: string;
/** Google Sheets spreadsheet ID. */
@@ -110,7 +265,7 @@ type Config = {
blockedOrigins?: string;
/** Message to return when rejecting requests from blocked origins. */
blockMessage?: string;
/** Desination URL to redirect blocked requests to, for non-JSON requests. */
/** Destination URL to redirect blocked requests to, for non-JSON requests. */
blockRedirect?: string;
/** Which model families to allow requests for. Applies only to OpenAI. */
allowedModelFamilies: ModelFamily[];
@@ -133,43 +288,188 @@ type Config = {
quotaRefreshPeriod?: "hourly" | "daily" | string;
/** Whether to allow users to change their own nicknames via the UI. */
allowNicknameChanges: boolean;
/** Whether to show recent DALL-E image generations on the homepage. */
showRecentImages: boolean;
/**
* If true, cookies will be set without the `Secure` attribute, allowing
* the admin UI to used over HTTP.
*/
useInsecureCookies: boolean;
/**
* Whether to use a more minimal public Service Info page with static content.
* Disables all stats pertaining to traffic, prompt/token usage, and queues.
* The full info page will appear if you have signed in as an admin using the
* configured ADMIN_KEY and go to /admin/service-info.
**/
staticServiceInfo?: boolean;
/**
* Trusted proxy hops. If you are deploying the server behind a reverse proxy
* (Nginx, Cloudflare Tunnel, AWS WAF, etc.) the IP address of incoming
* requests will be the IP address of the proxy, not the actual user.
*
* Depending on your hosting configuration, there may be multiple proxies/load
* balancers between your server and the user. Each one will append the
* incoming IP address to the `X-Forwarded-For` header. The user's real IP
* address will be the first one in the list, assuming the header has not been
* tampered with. Setting this value correctly ensures that the server doesn't
* trust values in `X-Forwarded-For` not added by trusted proxies.
*
* In order for the server to determine the user's real IP address, you need
* to tell it how many proxies are between the user and the server so it can
* select the correct IP address from the `X-Forwarded-For` header.
*
* *WARNING:* If you set it incorrectly, the proxy will either record the
* wrong IP address, or it will be possible for users to spoof their IP
* addresses and bypass rate limiting. Check the request logs to see what
* incoming X-Forwarded-For values look like.
*
* Examples:
* - X-Forwarded-For: "34.1.1.1, 172.1.1.1, 10.1.1.1" => trustedProxies: 3
* - X-Forwarded-For: "34.1.1.1" => trustedProxies: 1
* - no X-Forwarded-For header => trustedProxies: 0 (the actual IP of the incoming request will be used)
*
* As of 2024/01/08:
* For HuggingFace or Cloudflare Tunnel, use 1.
* For Render, use 3.
* For deployments not behind a load balancer, use 0.
*
* You should double check against your actual request logs to be sure.
*
* Defaults to 1, as most deployments are on HuggingFace or Cloudflare Tunnel.
*/
trustedProxies?: number;
/**
* Whether to allow OpenAI tool usage. The proxy doesn't impelment any
* support for tools/function calling but can pass requests and responses as
* is. Note that the proxy also cannot accurately track quota usage for
* requests involving tools, so you must opt in to this feature at your own
* risk.
*/
allowOpenAIToolUsage?: boolean;
/**
* Which services will accept prompts containing images, for use with
* multimodal models. Users with `special` role are exempt from this
* restriction.
*
* Do not enable this feature for untrusted users, as malicious users could
* send images which violate your provider's terms of service or local laws.
*
* Defaults to no services, meaning image prompts are disabled. Use a comma-
* separated list. Available services are:
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure
*/
allowedVisionServices: LLMService[];
/**
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
* A leading slash is required.
*/
proxyEndpointRoute: string;
/**
* If set, only requests from these IP addresses will be permitted to use the
* admin API and UI. Provide a comma-separated list of IP addresses or CIDR
* ranges. If not set, the admin API and UI will be open to all requests.
*/
adminWhitelist: string[];
/**
* If set, requests from these IP addresses will be blocked from using the
* application. Provide a comma-separated list of IP addresses or CIDR ranges.
* If not set, no IP addresses will be blocked.
*
* Takes precedence over the adminWhitelist.
*/
ipBlacklist: string[];
/**
* If set, pushes requests further back into the queue according to their
* token costs by factor*tokens*milliseconds (or more intuitively
* factor*thousands_of_tokens*seconds).
* Accepts floats.
*/
tokensPunishmentFactor: number;
/**
* Configuration for HTTP requests made by the proxy to other servers, such
* as when checking keys or forwarding users' requests to external services.
* If not set, all requests will be made using the default agent.
*
* If set, the proxy may make requests to other servers using the specified
* settings. This is useful if you wish to route users' requests through
* another proxy or VPN, or if you have multiple network interfaces and want
* to use a specific one for outgoing requests.
*/
httpAgent?: {
/**
* The name of the network interface to use. The first external IPv4 address
* belonging to this interface will be used for outgoing requests.
*/
interface?: string;
/**
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
* HTTPS. If not set, the proxy will be made using the default agent.
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
* - HTTP: `http://proxy-server-over-tcp.com:3128`
* - HTTPS: `https://proxy-server-over-tls.com:3129`
*
* **Note:** If your proxy server issues a certificate, you may need to set
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
* application will reject TLS connections.
*/
proxyUrl?: string;
};
};
// To change configs, create a file called .env in the root directory.
// See .env.example for an example.
export const config: Config = {
port: getEnvWithDefault("PORT", 7860),
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
googlePalmKey: getEnvWithDefault("GOOGLE_PALM_KEY", ""),
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
sqliteDataPath: getEnvWithDefault(
"SQLITE_DATA_PATH",
path.join(DATA_DIR, "database.sqlite")
),
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
powTokenHours: getEnvWithDefault("POW_TOKEN_HOURS", 24),
powTokenMaxIps: getEnvWithDefault("POW_TOKEN_MAX_IPS", 2),
powDifficultyLevel: getEnvWithDefault("POW_DIFFICULTY_LEVEL", "low"),
powChallengeTimeout: getEnvWithDefault("POW_CHALLENGE_TIMEOUT", 30),
powTokenPurgeHours: getEnvWithDefault("POW_TOKEN_PURGE_HOURS", 48),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 0),
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 32768),
maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC",
0
32768
),
maxOutputTokensOpenAI: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
300
1024
),
maxOutputTokensAnthropic: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
400
1024
),
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
"turbo",
"gpt4",
"gpt4-32k",
"claude",
]),
rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false),
allowedModelFamilies: getEnvWithDefault(
"ALLOWED_MODEL_FAMILIES",
getDefaultModelFamilies()
),
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE",
"This content violates /aicg/'s acceptable use policy."
@@ -177,8 +477,13 @@ export const config: Config = {
logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev),
showTokenCosts: getEnvWithDefault("SHOW_TOKEN_COSTS", false),
allowAwsLogging: getEnvWithDefault("ALLOW_AWS_LOGGING", false),
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
promptLoggingFilePrefix: getEnvWithDefault(
"PROMPT_LOGGING_FILE_PREFIX",
"prompt-logs"
),
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
googleSheetsSpreadsheetId: getEnvWithDefault(
"GOOGLE_SHEETS_SPREADSHEET_ID",
@@ -190,36 +495,102 @@ export const config: Config = {
"You must be over the age of majority in your country to use this service."
),
blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"),
tokenQuota: {
turbo: getEnvWithDefault("TOKEN_QUOTA_TURBO", 0),
gpt4: getEnvWithDefault("TOKEN_QUOTA_GPT4", 0),
"gpt4-32k": getEnvWithDefault("TOKEN_QUOTA_GPT4_32K", 0),
claude: getEnvWithDefault("TOKEN_QUOTA_CLAUDE", 0),
bison: getEnvWithDefault("TOKEN_QUOTA_BISON", 0),
},
tokenQuota: MODEL_FAMILIES.reduce(
(acc, family: ModelFamily) => {
acc[family] = getEnvWithDefault(
`TOKEN_QUOTA_${family.toUpperCase().replace(/-/g, "_")}`,
0
) as number;
return acc;
},
{} as { [key in ModelFamily]: number }
),
quotaRefreshPeriod: getEnvWithDefault("QUOTA_REFRESH_PERIOD", undefined),
allowNicknameChanges: getEnvWithDefault("ALLOW_NICKNAME_CHANGES", true),
showRecentImages: getEnvWithDefault("SHOW_RECENT_IMAGES", true),
useInsecureCookies: getEnvWithDefault("USE_INSECURE_COOKIES", isDev),
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
allowedVisionServices: parseCsv(
getEnvWithDefault("ALLOWED_VISION_SERVICES", "")
) as LLMService[],
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
adminWhitelist: parseCsv(
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
),
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
httpAgent: {
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
},
} as const;
function generateCookieSecret() {
function generateSigningKey() {
if (process.env.COOKIE_SECRET !== undefined) {
// legacy, replaced by SIGNING_KEY
return process.env.COOKIE_SECRET;
} else if (process.env.SIGNING_KEY !== undefined) {
return process.env.SIGNING_KEY;
}
const seed = "" + config.adminKey + config.openaiKey + config.anthropicKey;
const crypto = require("crypto");
const secrets = [
config.adminKey,
config.openaiKey,
config.anthropicKey,
config.googleAIKey,
config.mistralAIKey,
config.awsCredentials,
config.gcpCredentials,
config.azureCredentials,
];
if (secrets.filter((s) => s).length === 0) {
startupLogger.warn(
"No SIGNING_KEY or secrets are set. All sessions, cookies, and proofs of work will be invalidated on restart."
);
return crypto.randomBytes(32).toString("hex");
}
startupLogger.info("No SIGNING_KEY set; one will be generated from secrets.");
startupLogger.info(
"It's recommended to set SIGNING_KEY explicitly to ensure users' sessions and cookies always persist across restarts."
);
const seed = secrets.map((s) => s || "n/a").join("");
return crypto.createHash("sha256").update(seed).digest("hex");
}
export const COOKIE_SECRET = generateCookieSecret();
const signingKey = generateSigningKey();
export const SECRET_SIGNING_KEY = signingKey;
export async function assertConfigIsValid() {
if (process.env.TURBO_ONLY === "true") {
if (process.env.MODEL_RATE_LIMIT !== undefined) {
const limit =
parseInt(process.env.MODEL_RATE_LIMIT, 10) || config.textModelRateLimit;
config.textModelRateLimit = limit;
config.imageModelRateLimit = Math.max(Math.floor(limit / 2), 1);
startupLogger.warn(
"TURBO_ONLY is deprecated. Use ALLOWED_MODEL_FAMILIES=turbo instead."
{ textLimit: limit, imageLimit: config.imageModelRateLimit },
"MODEL_RATE_LIMIT is deprecated. Use TEXT_MODEL_RATE_LIMIT and IMAGE_MODEL_RATE_LIMIT instead."
);
config.allowedModelFamilies = config.allowedModelFamilies.filter(
(f) => !f.includes("gpt4")
}
if (process.env.ALLOW_IMAGE_PROMPTS === "true") {
const hasAllowedServices = config.allowedVisionServices.length > 0;
if (!hasAllowedServices) {
config.allowedVisionServices = ["openai", "anthropic"];
startupLogger.warn(
{ allowedVisionServices: config.allowedVisionServices },
"ALLOW_IMAGE_PROMPTS is deprecated. Use ALLOWED_VISION_SERVICES instead."
);
}
}
if (config.promptLogging && !config.promptLoggingBackend) {
throw new Error(
"Prompt logging is enabled but no backend is configured. Set PROMPT_LOGGING_BACKEND to 'google_sheets' or 'file'."
);
}
@@ -235,15 +606,32 @@ export async function assertConfigIsValid() {
);
}
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
if (
config.captchaMode === "proof_of_work" &&
config.gatekeeper !== "user_token"
) {
throw new Error(
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
"Captcha mode 'proof_of_work' requires gatekeeper mode 'user_token'."
);
}
if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
if (config.captchaMode === "proof_of_work") {
const val = config.powDifficultyLevel;
const isDifficulty =
typeof val === "string" &&
["low", "medium", "high", "extreme"].includes(val);
const isIterations =
typeof val === "number" && Number.isInteger(val) && val > 0;
if (!isDifficulty && !isIterations) {
throw new Error(
"Invalid POW_DIFFICULTY_LEVEL. Must be one of: low, medium, high, extreme, or a positive integer."
);
}
}
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
throw new Error(
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
);
}
@@ -256,11 +644,22 @@ export async function assertConfigIsValid() {
);
}
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
delete config.httpAgent;
} else if (config.httpAgent) {
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
throw new Error(
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
);
}
}
// Ensure forks which add new secret-like config keys don't unwittingly expose
// them to users.
for (const key of getKeys(config)) {
const maybeSensitive = ["key", "credentials", "secret", "password"].some(
(sensitive) => key.toLowerCase().includes(sensitive)
(sensitive) =>
key.toLowerCase().includes(sensitive) && !["checkKeys"].includes(key)
);
const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]);
if (maybeSensitive && !secured.has(key))
@@ -268,67 +667,101 @@ export async function assertConfigIsValid() {
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
);
}
await maybeInitializeFirebase();
}
/**
* Config keys that are masked on the info page, but not hidden as their
* presence may be relevant to the user due to privacy implications.
*/
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
export const SENSITIVE_KEYS: (keyof Config)[] = [
"googleSheetsSpreadsheetId",
"httpAgent",
];
/**
* Config keys that are not displayed on the info page at all, generally because
* they are not relevant to the user or can be inferred from other config.
*/
export const OMITTED_KEYS: (keyof Config)[] = [
export const OMITTED_KEYS = [
"port",
"bindAddress",
"logLevel",
"openaiKey",
"anthropicKey",
"googlePalmKey",
"googleAIKey",
"mistralAIKey",
"awsCredentials",
"gcpCredentials",
"azureCredentials",
"proxyKey",
"adminKey",
"checkKeys",
"serviceInfoPassword",
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
"promptLoggingFilePrefix",
"googleSheetsKey",
"firebaseKey",
"firebaseRtdbUrl",
"sqliteDataPath",
"eventLogging",
"eventLoggingTrim",
"gatekeeperStore",
"maxIpsPerUser",
"blockedOrigins",
"blockMessage",
"blockRedirect",
"allowNicknameChanges",
];
"showRecentImages",
"useInsecureCookies",
"staticServiceInfo",
"checkKeys",
"allowedModelFamilies",
"trustedProxies",
"proxyEndpointRoute",
"adminWhitelist",
"ipBlacklist",
"powTokenPurgeHours",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
type Printable<T> = {
[P in keyof T as Exclude<P, OmitKeys>]: T[P] extends object
? Printable<T[P]>
: string;
};
type PublicConfig = Printable<Config>;
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
export function listConfig(obj: Config = config): Record<string, any> {
const result: Record<string, any> = {};
export function listConfig(obj: Config = config) {
const result: Record<string, unknown> = {};
for (const key of getKeys(obj)) {
const value = obj[key]?.toString() || "";
const shouldOmit =
OMITTED_KEYS.includes(key) || value === "" || value === "undefined";
const shouldMask = SENSITIVE_KEYS.includes(key);
const shouldOmit =
OMITTED_KEYS.includes(key as OmitKeys) ||
value === "" ||
value === "undefined";
if (shouldOmit) {
continue;
}
const validKey = key as keyof Printable<Config>;
if (value && shouldMask) {
result[key] = "********";
result[validKey] = "********";
} else {
result[key] = value;
result[validKey] = value;
}
if (typeof obj[key] === "object" && !Array.isArray(obj[key])) {
result[key] = listConfig(obj[key] as unknown as Config);
}
}
return result;
return result as PublicConfig;
}
/**
@@ -344,7 +777,14 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
}
try {
if (
["OPENAI_KEY", "ANTHROPIC_KEY", "GOOGLE_PALM_KEY"].includes(String(env))
[
"OPENAI_KEY",
"ANTHROPIC_KEY",
"GOOGLE_AI_KEY",
"AWS_CREDENTIALS",
"GCP_CREDENTIALS",
"AZURE_CREDENTIALS",
].includes(String(env))
) {
return value as unknown as T;
}
@@ -360,28 +800,16 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
}
}
let firebaseApp: firebase.app.App | undefined;
function parseCsv(val: string): string[] {
if (!val) return [];
async function maybeInitializeFirebase() {
if (!config.gatekeeperStore.startsWith("firebase")) {
return;
}
const firebase = await import("firebase-admin");
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
const app = firebase.initializeApp({
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
databaseURL: config.firebaseRtdbUrl,
});
await app.database().ref("connection-test").set(Date.now());
firebaseApp = app;
const regex = /(".*?"|[^",]+)(?=\s*,|\s*$)/g;
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
export function getFirebaseApp(): firebase.app.App {
if (!firebaseApp) {
throw new Error("Firebase app not initialized.");
}
return firebaseApp;
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter(
(f) => !f.includes("dall-e") && !f.includes("o1")
) as ModelFamily[];
}
+202 -359
View File
@@ -1,420 +1,178 @@
/** This whole module kinda sucks */
import fs from "fs";
import { Request, Response } from "express";
import express, { Router, Request, Response } from "express";
import showdown from "showdown";
import { config, listConfig } from "./config";
import {
AnthropicKey,
GooglePalmKey,
OpenAIKey,
keyPool,
} from "./shared/key-management";
import { ModelFamily, OpenAIModelFamily } from "./shared/models";
import { getUniqueIps } from "./proxy/rate-limit";
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
import { getTokenCostUsd, prettyTokens } from "./shared/stats";
import { assertNever } from "./shared/utils";
import { config } from "./config";
import { buildInfo, ServiceInfo } from "./service-info";
import { getLastNImages } from "./shared/file-storage/image-history";
import { keyPool } from "./shared/key-management";
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
import { withSession } from "./shared/with-session";
import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
turbo: "GPT-4o Mini / 3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
gpt4o: "GPT-4o",
o1: "OpenAI o1",
"o1-mini": "OpenAI o1 mini",
"dall-e": "DALL-E",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-flash": "Gemini Flash",
"gemini-pro": "Gemini Pro",
"gemini-ultra": "Gemini Ultra",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mistral Nemo",
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"aws-claude-opus": "AWS Claude (Opus)",
"aws-mistral-tiny": "AWS Mistral 7B",
"aws-mistral-small": "AWS Mistral Nemo",
"aws-mistral-medium": "AWS Mistral Medium",
"aws-mistral-large": "AWS Mistral Large",
"gcp-claude": "GCP Claude (Sonnet)",
"gcp-claude-opus": "GCP Claude (Opus)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-gpt4o": "Azure GPT-4o",
"azure-o1": "Azure o1",
"azure-o1-mini": "Azure o1 mini",
"azure-dall-e": "Azure DALL-E",
};
const converter = new showdown.Converter();
const customGreeting = fs.existsSync("greeting.md")
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
: "";
let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0;
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsGooglePalmKey = (k: KeyPoolKey): k is GooglePalmKey =>
k.service === "google-palm";
type ModelAggregates = {
active: number;
trial?: number;
revoked?: number;
overQuota?: number;
pozzed?: number;
queued: number;
queueTime: string;
tokens: number;
};
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
type ServiceAggregates = {
status?: string;
openaiKeys?: number;
openaiOrgs?: number;
anthropicKeys?: number;
palmKeys?: number;
proompts: number;
tokens: number;
tokenCost: number;
openAiUncheckedKeys?: number;
anthropicUncheckedKeys?: number;
} & {
[modelFamily in ModelFamily]?: ModelAggregates;
};
const modelStats = new Map<ModelAggregateKey, number>();
const serviceStats = new Map<keyof ServiceAggregates, number>();
export const handleInfoPage = (req: Request, res: Response) => {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
res.send(infoPageHtml);
return;
return res.send(infoPageHtml);
}
// Sometimes huggingface doesn't send the host header and makes us guess.
const baseUrl =
process.env.SPACE_ID && !req.get("host")?.includes("hf.space")
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
: req.protocol + "://" + req.get("host");
res.send(cacheInfoPageHtml(baseUrl));
const info = buildInfo(baseUrl + config.proxyEndpointRoute);
infoPageHtml = renderPage(info);
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
};
function getCostString(cost: number) {
if (!config.showTokenCosts) return "";
return ` ($${cost.toFixed(2)})`;
}
function cacheInfoPageHtml(baseUrl: string) {
const keys = keyPool.list();
modelStats.clear();
serviceStats.clear();
keys.forEach(addKeyToAggregates);
const openaiKeys = serviceStats.get("openaiKeys") || 0;
const anthropicKeys = serviceStats.get("anthropicKeys") || 0;
const palmKeys = serviceStats.get("palmKeys") || 0;
const proompts = serviceStats.get("proompts") || 0;
const tokens = serviceStats.get("tokens") || 0;
const tokenCost = serviceStats.get("tokenCost") || 0;
const info = {
uptime: Math.floor(process.uptime()),
endpoints: {
...(openaiKeys ? { openai: baseUrl + "/proxy/openai" } : {}),
...(openaiKeys
? { ["openai2"]: baseUrl + "/proxy/openai/turbo-instruct" }
: {}),
...(anthropicKeys ? { anthropic: baseUrl + "/proxy/anthropic" } : {}),
...(palmKeys ? { "google-palm": baseUrl + "/proxy/google-palm" } : {}),
},
proompts,
tookens: `${prettyTokens(tokens)}${getCostString(tokenCost)}`,
...(config.modelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
openaiKeys,
anthropicKeys,
palmKeys,
...(openaiKeys ? getOpenAIInfo() : {}),
...(anthropicKeys ? getAnthropicInfo() : {}),
...(palmKeys ? { "palm-bison": getPalmInfo() } : {}),
config: listConfig(),
build: process.env.BUILD_INFO || "dev",
};
export function renderPage(info: ServiceInfo) {
const title = getServerTitle();
const headerHtml = buildInfoPageHeader(new showdown.Converter(), title);
const headerHtml = buildInfoPageHeader(info);
const pageBody = `<!DOCTYPE html>
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
<style>
body {
font-family: sans-serif;
padding: 1em;
max-width: 900px;
margin: 0;
}
.self-service-links {
display: flex;
justify-content: center;
margin-bottom: 1em;
padding: 0.5em;
font-size: 0.8em;
}
.self-service-links a {
margin: 0 0.5em;
}
</style>
</head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
<body>
${headerHtml}
<hr />
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
${getSelfServiceLinks()}
</body>
</html>`;
infoPageHtml = pageBody;
infoPageLastUpdated = Date.now();
return pageBody;
}
function getUniqueOpenAIOrgs(keys: KeyPoolKey[]) {
const orgIds = new Set(
keys.filter((k) => k.service === "openai").map((k: any) => k.organizationId)
);
return orgIds.size;
}
function increment<T extends keyof ServiceAggregates | ModelAggregateKey>(
map: Map<T, number>,
key: T,
delta = 1
) {
map.set(key, (map.get(key) || 0) + delta);
}
function addKeyToAggregates(k: KeyPoolKey) {
increment(serviceStats, "proompts", k.promptCount);
increment(serviceStats, "openaiKeys", k.service === "openai" ? 1 : 0);
increment(serviceStats, "anthropicKeys", k.service === "anthropic" ? 1 : 0);
increment(serviceStats, "palmKeys", k.service === "google-palm" ? 1 : 0);
let sumTokens = 0;
let sumCost = 0;
let family: ModelFamily;
const families = k.modelFamilies.filter((f) =>
config.allowedModelFamilies.includes(f)
);
switch (k.service) {
case "openai":
case "openai-text":
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
increment(
serviceStats,
"openAiUncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
// Technically this would not account for keys that have tokens recorded
// on models they aren't provisioned for, but that would be strange
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
});
if (families.includes("gpt4-32k")) {
family = "gpt4-32k";
} else if (families.includes("gpt4")) {
family = "gpt4";
} else {
family = "turbo";
}
break;
case "anthropic":
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
family = "claude";
sumTokens += k.claudeTokens;
sumCost += getTokenCostUsd(family, k.claudeTokens);
increment(modelStats, `${family}__tokens`, k.claudeTokens);
increment(modelStats, `${family}__pozzed`, k.isPozzed ? 1 : 0);
increment(
serviceStats,
"anthropicUncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
break;
case "google-palm":
if (!keyIsGooglePalmKey(k)) throw new Error("Invalid key type");
family = "bison";
sumTokens += k.bisonTokens;
sumCost += getTokenCostUsd(family, k.bisonTokens);
increment(modelStats, `${family}__tokens`, k.bisonTokens);
break;
default:
assertNever(k.service);
}
increment(serviceStats, "tokens", sumTokens);
increment(serviceStats, "tokenCost", sumCost);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__trial`, k.isTrial ? 1 : 0);
if ("isRevoked" in k) {
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
}
if ("isOverQuota" in k) {
increment(modelStats, `${family}__overQuota`, k.isOverQuota ? 1 : 0);
}
}
function getOpenAIInfo() {
const info: { status?: string; openaiKeys?: number; openaiOrgs?: number } & {
[modelFamily in OpenAIModelFamily]?: {
usage?: string;
activeKeys: number;
trialKeys?: number;
revokedKeys?: number;
overQuotaKeys?: number;
proomptersInQueue?: number;
estimatedQueueTime?: string;
};
} = {};
const allowedFamilies = new Set(config.allowedModelFamilies);
let families = new Set<OpenAIModelFamily>();
const keys = keyPool.list().filter((k) => {
const isOpenAI = keyIsOpenAIKey(k);
if (isOpenAI) k.modelFamilies.forEach((f) => families.add(f));
return isOpenAI;
}) as Omit<OpenAIKey, "key">[];
families = new Set([...families].filter((f) => allowedFamilies.has(f)));
if (config.checkKeys) {
const unchecked = serviceStats.get("openAiUncheckedKeys") || 0;
if (unchecked > 0) {
info.status = `Checking ${unchecked} keys...`;
}
info.openaiKeys = keys.length;
info.openaiOrgs = getUniqueOpenAIOrgs(keys);
families.forEach((f) => {
const tokens = modelStats.get(`${f}__tokens`) || 0;
const cost = getTokenCostUsd(f, tokens);
info[f] = {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: modelStats.get(`${f}__active`) || 0,
trialKeys: modelStats.get(`${f}__trial`) || 0,
revokedKeys: modelStats.get(`${f}__revoked`) || 0,
overQuotaKeys: modelStats.get(`${f}__overQuota`) || 0,
};
});
} else {
info.status = "Key checking is disabled.";
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
info.gpt4 = {
activeKeys: keys.filter(
(k) => !k.isDisabled && k.modelFamilies.includes("gpt4")
).length,
};
}
families.forEach((f) => {
if (info[f]) {
const { estimatedQueueTime, proomptersInQueue } = getQueueInformation(f);
info[f]!.proomptersInQueue = proomptersInQueue;
info[f]!.estimatedQueueTime = estimatedQueueTime;
}
});
return info;
}
function getAnthropicInfo() {
const claudeInfo: Partial<ModelAggregates> = {
active: modelStats.get("claude__active") || 0,
pozzed: modelStats.get("claude__pozzed") || 0,
};
const queue = getQueueInformation("claude");
claudeInfo.queued = queue.proomptersInQueue;
claudeInfo.queueTime = queue.estimatedQueueTime;
const tokens = modelStats.get("claude__tokens") || 0;
const cost = getTokenCostUsd("claude", tokens);
const unchecked =
(config.checkKeys && serviceStats.get("anthropicUncheckedKeys")) || 0;
return {
claude: {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
...(unchecked > 0 ? { status: `Checking ${unchecked} keys...` } : {}),
activeKeys: claudeInfo.active,
...(config.checkKeys ? { pozzedKeys: claudeInfo.pozzed } : {}),
proomptersInQueue: claudeInfo.queued,
estimatedQueueTime: claudeInfo.queueTime,
},
};
}
function getPalmInfo() {
const bisonInfo: Partial<ModelAggregates> = {
active: modelStats.get("bison__active") || 0,
};
const queue = getQueueInformation("bison");
bisonInfo.queued = queue.proomptersInQueue;
bisonInfo.queueTime = queue.estimatedQueueTime;
const tokens = modelStats.get("bison__tokens") || 0;
const cost = getTokenCostUsd("bison", tokens);
return {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: bisonInfo.active,
proomptersInQueue: bisonInfo.queued,
estimatedQueueTime: bisonInfo.queueTime,
};
}
const customGreeting = fs.existsSync("greeting.md")
? fs.readFileSync("greeting.md", "utf8")
: null;
/**
* If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page.
**/
function buildInfoPageHeader(converter: showdown.Converter, title: string) {
function buildInfoPageHeader(info: ServiceInfo) {
const title = getServerTitle();
// TODO: use some templating engine instead of this mess
let infoBody = `<!-- Header for Showdown's parser, don't remove this line -->
# ${title}`;
let infoBody = `# ${title}`;
if (config.promptLogging) {
infoBody += `\n## Prompt logging is enabled!
The server operator has enabled prompt logging. The prompts you send to this proxy and the AI responses you receive may be saved.
infoBody += `\n## Prompt Logging Enabled
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
Logs are anonymous and do not contain IP addresses or timestamps. [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/prompt-logging/index.ts).
[You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/shared/prompt-logging/index.ts).
**If you are uncomfortable with this, don't send prompts to this proxy!**`;
}
if (config.staticServiceInfo) {
return converter.makeHtml(infoBody + customGreeting);
}
const waits: string[] = [];
infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
if (config.openaiKey) {
// TODO: un-fuck this
const keys = keyPool.list().filter((k) => k.service === "openai");
for (const modelFamily of config.allowedModelFamilies) {
const service = MODEL_FAMILY_SERVICE[modelFamily];
const turboWait = getQueueInformation("turbo").estimatedQueueTime;
waits.push(`**Turbo:** ${turboWait}`);
const hasKeys = keyPool.list().some((k) => {
return k.service === service && k.modelFamilies.includes(modelFamily);
});
const gpt4Wait = getQueueInformation("gpt4").estimatedQueueTime;
const hasGpt4 = keys.some((k) => k.modelFamilies.includes("gpt4"));
const allowedGpt4 = config.allowedModelFamilies.includes("gpt4");
if (hasGpt4 && allowedGpt4) {
waits.push(`**GPT-4:** ${gpt4Wait}`);
}
const gpt432kWait = getQueueInformation("gpt4-32k").estimatedQueueTime;
const hasGpt432k = keys.some((k) => k.modelFamilies.includes("gpt4-32k"));
const allowedGpt432k = config.allowedModelFamilies.includes("gpt4-32k");
if (hasGpt432k && allowedGpt432k) {
waits.push(`**GPT-4-32k:** ${gpt432kWait}`);
const wait = info[modelFamily]?.estimatedQueueTime;
if (hasKeys && wait) {
waits.push(
`**${MODEL_FAMILY_FRIENDLY_NAME[modelFamily] || modelFamily}**: ${wait}`
);
}
}
if (config.anthropicKey) {
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
waits.push(`**Claude:** ${claudeWait}`);
}
infoBody += "\n\n" + waits.join(" / ");
if (customGreeting) {
infoBody += `\n## Server Greeting\n${customGreeting}`;
}
infoBody += customGreeting;
infoBody += buildRecentImageSection();
return converter.makeHtml(infoBody);
}
function getSelfServiceLinks() {
if (config.gatekeeper !== "user_token") return "";
return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
}
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
function getQueueInformation(partition: ModelFamily) {
const waitMs = getEstimatedWaitTime(partition);
const waitTime =
waitMs < 60000
? `${Math.round(waitMs / 1000)}sec`
: `${Math.round(waitMs / 60000)}min, ${Math.round(
(waitMs % 60000) / 1000
)}sec`;
return {
proomptersInQueue: getQueueLength(partition),
estimatedQueueTime: waitMs > 2000 ? waitTime : "no wait",
};
const links = [["Check your user token", "/user/lookup"]];
if (config.captchaMode !== "none") {
links.unshift(["Request a user token", "/user/captcha"]);
}
return `<div class="self-service-links">${links
.map(([text, link]) => `<a href="${link}">${text}</a>`)
.join(" | ")}</div>`;
}
function getServerTitle() {
@@ -436,9 +194,48 @@ function getServerTitle() {
return "OAI Reverse Proxy";
}
function buildRecentImageSection() {
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
if (
!config.showRecentImages ||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return "";
}
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
}
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
return html;
}
function escapeHtml(unsafe: string) {
return unsafe
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;")
.replace(/\[/g, "&#91;")
.replace(/]/g, "&#93;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
// Huggingface broke their amazon elb config and no longer sends the
// x-forwarded-host header. This is a workaround.
try {
const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
@@ -446,3 +243,49 @@ function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
return "";
}
}
function checkIfUnlocked(
req: Request,
res: Response,
next: express.NextFunction
) {
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
return res.redirect("/unlock-info");
}
next();
}
const infoPageRouter = Router();
if (config.serviceInfoPassword?.length) {
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
infoPageRouter.use(withSession);
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
infoPageRouter.post("/unlock-info", (req, res) => {
if (req.body.password !== config.serviceInfoPassword) {
return res.status(403).send("Incorrect password");
}
req.session!.unlocked = true;
res.redirect("/");
});
infoPageRouter.get("/unlock-info", (_req, res) => {
if (_req.session?.unlocked) return res.redirect("/");
res.send(`
<form method="post" action="/unlock-info">
<h1>Unlock Service Info</h1>
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
<input type="password" name="password" placeholder="Password" />
<button type="submit">Unlock</button>
</form>
`);
});
infoPageRouter.use(checkIfUnlocked);
}
infoPageRouter.get("/", handleInfoPage);
infoPageRouter.get("/status", (req, res) => {
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
});
export { infoPageRouter };
+14
View File
@@ -1,6 +1,20 @@
import pino from "pino";
import { config } from "./config";
const transport =
process.env.NODE_ENV === "production"
? undefined
: {
target: "pino-pretty",
options: {
singleLine: true,
messageFormat: "{if module}\x1b[90m[{module}] \x1b[39m{end}{msg}",
ignore: "module",
},
};
export const logger = pino({
level: config.logLevel,
base: { pid: process.pid, module: "server" },
transport,
});
+9
View File
@@ -0,0 +1,9 @@
import { NextFunction, Request, Response } from "express";
export function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/") && !req.path.startsWith("/v1beta/")) {
req.url = `/v1${req.url}`;
}
next();
}
+190 -98
View File
@@ -1,25 +1,14 @@
import { Request, RequestHandler, Router } from "express";
import * as http from "http";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
applyQuotaLimits,
addAnthropicPreamble,
blockZoomerOrigins,
createPreprocessorMiddleware,
finalizeBody,
languageFilter,
removeOriginHeaders,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
let modelsCache: any = null;
let modelsCacheTime = 0;
@@ -43,8 +32,17 @@ const getModelsResponse = () => {
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
"claude-2", // claude-2 is 100k by default it seems
"claude-2",
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-opus-20240229",
"claude-3-opus-latest",
"claude-3-sonnet-20240229",
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
];
const models = claudeVariants.map((id) => ({
@@ -67,33 +65,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const rewriteAnthropicRequest = (
proxyReq: http.ClientRequest,
req: Request,
res: http.ServerResponse
) => {
const rewriterPipeline = [
applyQuotaLimits,
addKey,
addAnthropicPreamble,
languageFilter,
blockZoomerOrigins,
removeOriginHeaders,
finalizeBody,
];
try {
for (const rewriter of rewriterPipeline) {
rewriter(proxyReq, req, res, {});
}
} catch (error) {
req.log.error(error, "Error while executing proxy rewriter");
proxyReq.destroy(error as Error);
}
};
/** Only used for non-streaming requests. */
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -103,31 +75,50 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAnthropicTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to Anthropic chat format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
if (req.inboundApi === "openai") {
req.log.info("Transforming Anthropic response to OpenAI format");
body = transformAnthropicResponse(body, req);
}
// TODO: Remove once tokenization is stable
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
res.status(200).json(body);
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAnthropicResponse(
function flattenChatResponse(
content: { type: string; text: string }[]
): string {
return content
.map((part: { type: string; text: string }) =>
part.type === "text" ? part.text : ""
)
.join("\n");
}
export function transformAnthropicChatResponseToAnthropicText(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
type: "completion",
id: "ant-" + anthropicBody.id,
completion: flattenChatResponse(anthropicBody.content),
stop_reason: anthropicBody.stop_reason,
stop: anthropicBody.stop_sequence,
model: anthropicBody.model,
usage: anthropicBody.usage,
};
}
function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any>,
req: Request
): Record<string, any> {
@@ -155,54 +146,155 @@ function transformAnthropicResponse(
};
}
const anthropicProxy = createQueueMiddleware(
createProxyMiddleware({
target: "https://api.anthropic.com",
changeOrigin: true,
on: {
proxyReq: rewriteAnthropicRequest,
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
error: handleProxyError,
},
selfHandleResponse: true,
logger,
pathRewrite: {
// Send OpenAI-compat requests to the real Anthropic endpoint.
"^/v1/chat/completions": "/v1/complete",
},
})
export function transformAnthropicChatResponseToOpenAI(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
id: "ant-" + anthropicBody.id,
object: "chat.completion",
created: Date.now(),
model: anthropicBody.model,
usage: anthropicBody.usage,
choices: [
{
message: {
role: "assistant",
content: flattenChatResponse(anthropicBody.content),
},
finish_reason: anthropicBody.stop_reason,
index: 0,
},
],
};
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Sonnet.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (model.includes("claude")) return; // use whatever model the user requested
req.body.model = "claude-3-5-sonnet-latest";
}
/**
* If client requests more than 4096 output tokens the request must have a
* particular version header.
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
*/
function setAnthropicBetaHeader(req: Request) {
const { max_tokens_to_sample } = req.body;
if (max_tokens_to_sample > 4096) {
req.headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15";
}
}
function selectUpstreamPath(manager: ProxyReqManager) {
const req = manager.request;
const pathname = req.url.split("?")[0];
req.log.debug({ pathname }, "Anthropic path filter");
const isText = req.outboundApi === "anthropic-text";
const isChat = req.outboundApi === "anthropic-chat";
if (isChat && pathname === "/v1/complete") {
manager.setPath("/v1/messages");
}
if (isText && pathname === "/v1/chat/completions") {
manager.setPath("/v1/complete");
}
if (isChat && pathname === "/v1/chat/completions") {
manager.setPath("/v1/messages");
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
manager.setPath("/v1/messages");
}
}
const anthropicProxy = createQueuedProxyMiddleware({
target: "https://api.anthropic.com",
mutations: [selectUpstreamPath, addKey, finalizeBody],
blockingResponseHandler: anthropicBlockingResponseHandler,
});
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
{ afterTransform: [setAnthropicBetaHeader] }
);
const anthropicRouter = Router();
// Fix paths because clients don't consistently use the /v1 prefix.
anthropicRouter.use((req, _res, next) => {
if (!req.path.startsWith("/v1/")) {
req.url = `/v1${req.url}`;
}
next();
const nativeTextPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
});
const textToChatPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.startsWith("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
});
const oaiToChatPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
if (req.body.model?.includes("claude-3")) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
}
};
const anthropicRouter = Router();
anthropicRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
anthropicRouter.post(
"/v1/messages",
ipLimiter,
nativeAnthropicChatPreprocessor,
anthropicProxy
);
// Anthropic text completion endpoint. Translates to Anthropic chat completion
// if the requested model is a Claude 3 model.
anthropicRouter.post(
"/v1/complete",
ipLimiter,
createPreprocessorMiddleware({ inApi: "anthropic", outApi: "anthropic" }),
preprocessAnthropicTextRequest,
anthropicProxy
);
// OpenAI-to-Anthropic compatibility endpoint.
// OpenAI-to-Anthropic compatibility endpoint. Accepts an OpenAI chat completion
// request and transforms/routes it to the appropriate Anthropic format and
// endpoint based on the requested model.
anthropicRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({ inApi: "openai", outApi: "anthropic" }),
preprocessOpenAICompatRequest,
anthropicProxy
);
// Redirect browser requests to the homepage.
anthropicRouter.get("*", (req, res, next) => {
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
if (isBrowser) {
res.redirect("/");
} else {
next();
}
});
export const anthropic = anthropicRouter;
+257
View File
@@ -0,0 +1,257 @@
import { Request, RequestHandler, Router } from "express";
import { v4 } from "uuid";
import {
transformAnthropicChatResponseToAnthropicText,
transformAnthropicChatResponseToOpenAI,
} from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsClaudeProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest, finalizeSignedRequest],
blockingResponseHandler: awsBlockingResponseHandler,
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
oaiToAwsChatPreprocessor(req, res, next);
} else {
oaiToAwsTextPreprocessor(req, res, next);
}
};
const awsClaudeRouter = Router();
// Native(ish) Anthropic text completion endpoint.
awsClaudeRouter.post(
"/v1/complete",
ipLimiter,
preprocessAwsTextRequest,
awsClaudeProxy
);
// Native Anthropic chat completion endpoint.
awsClaudeRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsClaudeProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsClaudeRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
awsClaudeProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends AWS model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-AWS model name to AWS model ID.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.includes("anthropic.claude")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620
// - claude-3-opus-latest
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*)/i;
const match = model.match(pattern);
if (!match) {
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, rawName, rev] = match;
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
const ver = minor ? `${major}.${minor}` : major;
const name = rawName?.match(/([a-z]+)/)?.[1] || "";
switch (ver) {
case "1":
case "1.0":
req.body.model = "anthropic.claude-v1";
return;
case "2":
case "2.0":
req.body.model = "anthropic.claude-v2";
return;
case "2.1":
req.body.model = "anthropic.claude-v2:1";
return;
case "3":
case "3.0":
// there is only one snapshot for all Claude 3 models so there is no need
// to check the revision
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
case "haiku":
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
return;
}
break;
case "3.5":
switch (name) {
case "sonnet":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
return;
case "20240620":
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
return;
}
break;
case "haiku":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
return;
}
case "opus":
// Add after model id is announced never
break;
}
}
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
}
export const awsClaude = awsClaudeRouter;
+95
View File
@@ -0,0 +1,95 @@
import { Request, Router } from "express";
import {
detectMistralInputApi,
transformMistralTextToMistralChat,
} from "./mistral-ai";
import { ipLimiter } from "./rate-limit";
import { ProxyResHandlerWithBody } from "./middleware/response";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
newBody = transformMistralTextToMistralChat(body);
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const awsMistralProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest,finalizeSignedRequest],
blockingResponseHandler: awsMistralBlockingResponseHandler,
});
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.startsWith("mistral.")) {
return;
}
// Mistral 7B Instruct
else if (model.includes("7b")) {
req.body.model = "mistral.mistral-7b-instruct-v0:2";
}
// Mistral 8x7B Instruct
else if (model.includes("8x7b")) {
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
}
// Mistral Large (Feb 2024)
else if (model.includes("large-2402")) {
req.body.model = "mistral.mistral-large-2402-v1:0";
}
// Mistral Large 2 (July 2024)
else if (model.includes("large")) {
req.body.model = "mistral.mistral-large-2407-v1:0";
}
// Mistral Small (Feb 2024)
else if (model.includes("small")) {
req.body.model = "mistral.mistral-small-2402-v1:0";
} else {
throw new Error(
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
);
}
}
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
{
beforeTransform: [detectMistralInputApi],
afterTransform: [maybeReassignModel],
}
);
const awsMistralRouter = Router();
awsMistralRouter.post(
"/v1/chat/completions",
ipLimiter,
nativeMistralChatPreprocessor,
awsMistralProxy
);
export const awsMistral = awsMistralRouter;
+77
View File
@@ -0,0 +1,77 @@
/* Shared code between AWS Claude and AWS Mistral endpoints. */
import { Request, Response, Router } from "express";
import { config } from "../config";
import { addV1 } from "./add-v1";
import { awsClaude } from "./aws-claude";
import { awsMistral } from "./aws-mistral";
import { AwsBedrockKey, keyPool } from "../shared/key-management";
const awsRouter = Router();
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
awsRouter.use("/claude", addV1, awsClaude);
awsRouter.use("/mistral", addV1, awsMistral);
const MODELS_CACHE_TTL = 10000;
let modelsCache: Record<string, any> = {};
let modelsCacheTime: Record<string, number> = {};
function handleModelsRequest(req: Request, res: Response) {
if (!config.awsCredentials) return { object: "list", data: [] };
const vendor = req.params.vendor?.length
? req.params.vendor === "claude"
? "anthropic"
: req.params.vendor
: "all";
const cacheTime = modelsCacheTime[vendor] || 0;
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
return res.json(modelsCache[vendor]);
}
const availableModelIds = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "aws") continue;
(key as AwsBedrockKey).modelIds.forEach((id) => availableModelIds.add(id));
}
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const models = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-opus-20240229-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-large-2407-v1:0",
"mistral.mistral-small-2402-v1:0",
]
.filter((id) => availableModelIds.has(id))
.map((id) => {
const vendor = id.match(/^(.*)\./)?.[1];
return {
id,
object: "model",
created: new Date().getTime(),
owned_by: vendor,
permission: [],
root: vendor,
parent: null,
};
});
modelsCache[vendor] = {
object: "list",
data: models.filter((m) => vendor === "all" || m.root === vendor),
};
modelsCacheTime[vendor] = new Date().getTime();
return res.json(modelsCache[vendor]);
}
export const aws = awsRouter;
+77
View File
@@ -0,0 +1,77 @@
import { RequestHandler, Router } from "express";
import { config } from "../config";
import { generateModelList } from "./openai";
import { ipLimiter } from "./rate-limit";
import {
addAzureKey,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
let modelsCache: any = null;
let modelsCacheTime = 0;
const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return res.status(200).json(modelsCache);
}
if (!config.azureCredentials) return { object: "list", data: [] };
const result = generateModelList("azure");
modelsCache = { object: "list", data: result };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
};
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const azureOpenAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { hostname, protocol } = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addAzureKey, finalizeSignedRequest],
blockingResponseHandler: azureOpenaiResponseHandler,
});
const azureOpenAIRouter = Router();
azureOpenAIRouter.get("/v1/models", handleModelRequest);
azureOpenAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "openai",
service: "azure",
}),
azureOpenAIProxy
);
azureOpenAIRouter.post(
"/v1/images/generations",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "azure",
}),
azureOpenAIProxy
);
export const azure = azureOpenAIRouter;
+9 -4
View File
@@ -21,7 +21,7 @@ kYuIJbnAGw5Oq0L6dXFW2DFwlcLz51kPVOmDc159FsQjyuPnta7NiZAANS8KM1CJ
pwIDAQAB`;
let IMPORTED_RISU_KEY: CryptoKey | null = null;
type RisuToken = { id: Uint8Array; expiresIn: number };
type RisuToken = { id: string; expiresIn: number };
type SignedToken = { data: RisuToken; sig: string };
(async () => {
@@ -54,14 +54,14 @@ export async function checkRisuToken(
try {
const { valid, data } = await validCheck(header);
if (!valid) {
if (!valid || !data) {
req.log.warn(
{ token: header, data },
"Invalid RisuAI token; using IP instead"
);
} else {
req.log.info("RisuAI token validated");
req.risuToken = header;
req.risuToken = String(data.id);
}
} catch (err) {
req.log.warn(
@@ -81,12 +81,13 @@ async function validCheck(header: string) {
);
} catch (err) {
log.warn({ error: err.message }, "Provided unparseable RisuAI token");
return { valid: false, data: "[unparseable]" };
return { valid: false };
}
const data: RisuToken = tk.data;
const sig = Buffer.from(tk.sig, "base64");
if (data.expiresIn < Math.floor(Date.now() / 1000)) {
log.warn({ token: header }, "Provided expired RisuAI token");
return { valid: false };
}
@@ -97,5 +98,9 @@ async function validCheck(header: string) {
Buffer.from(JSON.stringify(data))
);
if (!valid) {
log.warn({ token: header }, "RisuAI token failed signature check");
}
return { valid, data };
}
+67 -15
View File
@@ -1,6 +1,7 @@
import type { Request, RequestHandler } from "express";
import type { Request, Response, RequestHandler } from "express";
import { config } from "../config";
import { authenticate, getUser } from "../shared/users/user-store";
import { sendErrorToClient } from "./middleware/response/error-generator";
const GATEKEEPER = config.gatekeeper;
const PROXY_KEY = config.proxyKey;
@@ -11,6 +12,7 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
// pass the _proxy_ key in this header too, instead of providing it as a
// Bearer token in the Authorization header. So we need to check both.
// Prefer the Authorization header if both are present.
// Google AI uses a key querystring parameter.
if (req.headers.authorization) {
const token = req.headers.authorization?.slice("Bearer ".length);
@@ -23,6 +25,12 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
delete req.headers["x-api-key"];
return token;
}
if (req.query.key) {
const token = req.query.key?.toString();
delete req.query.key;
return token;
}
return undefined;
}
@@ -46,21 +54,65 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
}
if (GATEKEEPER === "user_token" && token) {
const user = authenticate(token, req.ip);
if (user) {
req.user = user;
return next();
} else {
const maybeBannedUser = getUser(token);
if (maybeBannedUser?.disabledAt) {
return res.status(403).json({
error: `Forbidden: ${
maybeBannedUser.disabledReason || "Token disabled"
}`,
});
}
// RisuAI users all come from a handful of aws lambda IPs so we cannot use
// IP alone to distinguish between them and prevent usertoken sharing.
// Risu sends a signed token in the request headers with an anonymous user
// ID that we can instead use to associate requests with an individual.
const ip = req.risuToken?.length
? `risu${req.risuToken}-${req.ip}`
: req.ip;
const { user, result } = authenticate(token, ip);
switch (result) {
case "success":
req.user = user;
return next();
case "limited":
return sendError(
req,
res,
403,
`Forbidden: no more IP addresses allowed for this user token`,
{ currentIp: ip, maxIps: user?.maxIps }
);
case "disabled":
const bannedUser = getUser(token);
if (bannedUser?.disabledAt) {
const reason = bannedUser.disabledReason || "User token disabled";
return sendError(req, res, 403, `Forbidden: ${reason}`);
}
}
}
res.status(401).json({ error: "Unauthorized" });
sendError(req, res, 401, "Unauthorized");
};
function sendError(
req: Request,
res: Response,
status: number,
message: string,
data: any = {}
) {
const isPost = req.method === "POST";
const hasBody = isPost && req.body;
const hasModel = hasBody && req.body.model;
if (!hasModel) {
return res.status(status).json({ error: message });
}
sendErrorToClient({
req,
res,
options: {
title: `Proxy gatekeeper error (HTTP ${status})`,
message,
format: "unknown",
statusCode: status,
reqId: req.id,
obj: data,
},
});
}
+196
View File
@@ -0,0 +1,196 @@
import { Request, RequestHandler, Router } from "express";
import { config } from "../config";
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signGcpRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.gcpCredentials) return { object: "list", data: [] };
// https://docs.anthropic.com/en/docs/about-claude/models
const variants = [
"claude-3-haiku@20240307",
"claude-3-5-haiku@20241022",
"claude-3-sonnet@20240229",
"claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-v2@20241022",
"claude-3-opus@20240229",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const gcpProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signGcpRequest, finalizeSignedRequest],
blockingResponseHandler: gcpBlockingResponseHandler,
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
oaiToChatPreprocessor(req, res, next);
};
const gcpRouter = Router();
gcpRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
gcpRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
),
gcpProxy
);
// OpenAI-to-GCP Anthropic compatibility endpoint.
gcpRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
gcpProxy
);
/**
* Tries to deal with:
* - frontends sending GCP model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that GCP doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends GCP model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-GCP model name to GCP model ID.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an GCP model, use it as-is
// if (model.includes("anthropic.claude")) {
if (model.startsWith("claude-") && model.includes("@")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620-v1:0
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d{1}))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
const match = model.match(pattern);
// If there's no match, fallback to Claude3 Sonnet as it is most likely to be
// available on GCP.
if (!match) {
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
return;
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, rev] = match;
// TODO: rework this to function similarly to aws-claude.ts maybeReassignModel
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "3":
case "3.0":
if (name.includes("opus")) {
req.body.model = "claude-3-opus@20240229";
} else if (name.includes("haiku")) {
req.body.model = "claude-3-haiku@20240307";
} else {
req.body.model = "claude-3-sonnet@20240229";
}
return;
case "3.5":
switch (name) {
case "sonnet":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "claude-3-5-sonnet-v2@20241022";
return;
case "20240620":
req.body.model = "claude-3-5-sonnet@20240620";
return;
}
break;
case "haiku":
req.body.model = "claude-3-5-haiku@20241022";
return;
case "opus":
// Add after model ids are announced late 2024
break;
}
}
// Fallback to Claude3 Sonnet
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
return;
}
export const gcp = gcpRouter;
+176
View File
@@ -0,0 +1,176 @@
import { Request, RequestHandler, Router } from "express";
import { v4 } from "uuid";
import { GoogleAIKey, keyPool } from "../shared/key-management";
import { config } from "../config";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
let modelsCache: any = null;
let modelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.googleAIKey) return { object: "list", data: [] };
const keys = keyPool
.list()
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
if (keys.length === 0) {
modelsCache = { object: "list", data: [] };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const modelIds = Array.from(
new Set(keys.map((k) => k.modelIds).flat())
).filter((id) => id.startsWith("models/gemini"));
const models = modelIds.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
permission: [],
root: "google",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "openai") {
req.log.info("Transforming Google AI response to OpenAI format");
newBody = transformGoogleAIResponse(body, req);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformGoogleAIResponse(
resBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
return {
id: "goo-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates[0].finishReason,
index: 0,
},
],
};
}
const googleAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { protocol, hostname} = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addGoogleAIKey, finalizeSignedRequest],
blockingResponseHandler: googleAIBlockingResponseHandler,
});
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/v1beta/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
{ beforeTransform: [maybeReassignModel], afterTransform: [setStreamFlag] }
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{ afterTransform: [maybeReassignModel] }
),
googleAIProxy
);
function setStreamFlag(req: Request) {
const isStreaming = req.url.includes("streamGenerateContent");
if (isStreaming) {
req.body.stream = true;
req.isStreaming = true;
} else {
req.body.stream = false;
req.isStreaming = false;
}
}
/**
* Replaces requests for non-Google AI models with gemini-1.5-pro-latest.
* Also strips models/ from the beginning of the model IDs.
**/
function maybeReassignModel(req: Request) {
// Ensure model is on body as a lot of middleware will expect it.
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
if (!model) {
throw new Error("You must specify a model with your request.");
}
req.body.model = model;
const requested = model;
if (requested.startsWith("models/")) {
req.body.model = requested.slice("models/".length);
}
if (requested.includes("gemini")) {
return;
}
req.log.info({ requested }, "Reassigning model to gemini-1.5-pro-latest");
req.body.model = "gemini-1.5-pro-latest";
}
export const googleAI = googleAIRouter;
-98
View File
@@ -1,98 +0,0 @@
/* Pretends to be a KoboldAI API endpoint and translates incoming Kobold
requests to OpenAI API equivalents. */
import { Request, Response, Router } from "express";
import http from "http";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createPreprocessorMiddleware,
finalizeBody,
languageFilter,
transformKoboldPayload,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
export const handleModelRequest = (_req: Request, res: Response) => {
res.status(200).json({ result: "Connected to OpenAI reverse proxy" });
};
export const handleSoftPromptsRequest = (_req: Request, res: Response) => {
res.status(200).json({ soft_prompts_list: [] });
};
const rewriteRequest = (
proxyReq: http.ClientRequest,
req: Request,
res: Response
) => {
req.body.stream = false;
const rewriterPipeline = [
addKey,
transformKoboldPayload,
languageFilter,
finalizeBody,
];
try {
for (const rewriter of rewriterPipeline) {
rewriter(proxyReq, req, res, {});
}
} catch (error) {
logger.error(error, "Error while executing proxy rewriter");
proxyReq.destroy(error as Error);
}
};
const koboldResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
const koboldResponse = {
results: [{ text: body.choices[0].message.content }],
model: body.model,
...(config.promptLogging && {
proxy_note: `Prompt logging is enabled on this proxy instance. See ${req.get(
"host"
)} for more information.`,
}),
};
res.send(JSON.stringify(koboldResponse));
};
const koboldOaiProxy = createProxyMiddleware({
target: "https://api.openai.com",
changeOrigin: true,
pathRewrite: {
"^/api/v1/generate": "/v1/chat/completions",
},
on: {
proxyReq: rewriteRequest,
proxyRes: createOnProxyResHandler([koboldResponseHandler]),
error: handleProxyError,
},
selfHandleResponse: true,
logger,
});
const koboldRouter = Router();
koboldRouter.use((req, res) => {
logger.warn(`Unhandled kobold request: ${req.method} ${req.path}`);
res.status(404).json({ error: "Not found" });
});
export const kobold = koboldRouter;
+262 -159
View File
@@ -1,199 +1,302 @@
import { Request, Response } from "express";
import httpProxy from "http-proxy";
import http from "http";
import { Socket } from "net";
import { ZodError } from "zod";
import { APIFormat } from "../../shared/key-management";
import { generateErrorMessage } from "zod-error";
import { HttpError } from "../../shared/errors";
import { assertNever } from "../../shared/utils";
import { QuotaExceededError } from "./request/apply-quota-limits";
import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
import { sendErrorToClient } from "./response/error-generator";
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
const GOOGLE_AI_COMPLETION_ENDPOINT = "/v1beta/models";
/** Returns true if we're making a request to a completion endpoint. */
export function isCompletionRequest(req: Request) {
// 99% sure this function is not needed anymore
export function isTextGenerationRequest(req: Request) {
return (
req.method === "POST" &&
[
OPENAI_CHAT_COMPLETION_ENDPOINT,
OPENAI_TEXT_COMPLETION_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
GOOGLE_AI_COMPLETION_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint))
);
}
export function writeErrorResponse(
export function isImageGenerationRequest(req: Request) {
return (
req.method === "POST" &&
req.path.startsWith(OPENAI_IMAGE_COMPLETION_ENDPOINT)
);
}
export function isEmbeddingsRequest(req: Request) {
return (
req.method === "POST" && req.path.startsWith(OPENAI_EMBEDDINGS_ENDPOINT)
);
}
export function sendProxyError(
req: Request,
res: Response,
statusCode: number,
statusMessage: string,
errorPayload: Record<string, any>
) {
const errorSource = errorPayload.error?.type?.startsWith("proxy")
? "proxy"
: "upstream";
const msg =
statusCode === 500
? `The proxy encountered an error while trying to process your prompt.`
: `The proxy encountered an error while trying to send your prompt to the API.`;
// If we're mid-SSE stream, send a data event with the error payload and end
// the stream. Otherwise just send a normal error response.
if (
res.headersSent ||
res.getHeader("content-type") === "text/event-stream"
) {
const errorContent =
statusCode === 403
? JSON.stringify(errorPayload)
: JSON.stringify(errorPayload, null, 2);
const msg = buildFakeSseMessage(
`${errorSource} error (${statusCode})`,
errorContent,
req
);
res.write(msg);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
if (req.debug) {
errorPayload.error.proxy_tokenizer_debug_info = req.debug;
}
res.status(statusCode).json(errorPayload);
}
sendErrorToClient({
options: {
format: req.inboundApi,
title: `Proxy error (HTTP ${statusCode} ${statusMessage})`,
message: `${msg} Further details are provided below.`,
obj: errorPayload,
reqId: req.id,
model: req.body?.model,
},
req,
res,
});
}
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
req.log.error({ err }, `Error during proxy request middleware`);
handleInternalError(err, req as Request, res as Response);
};
export const handleInternalError = (
/**
* Handles errors thrown during preparation of a proxy request (before it is
* sent to the upstream API), typically due to validation, quota, or other
* pre-flight checks. Depending on the error class, this function will send an
* appropriate error response to the client, streaming it if necessary.
*/
export const classifyErrorAndSend = (
err: Error,
req: Request,
res: Response
res: Response | Socket
) => {
if (res instanceof Socket) {
// We should always have an Express response object here, but http-proxy's
// ErrorCallback type says it could be just a Socket.
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
return res.destroy();
}
try {
if (err instanceof ZodError) {
writeErrorResponse(req, res, 400, {
error: {
type: "proxy_validation_error",
proxy_note: `Reverse proxy couldn't validate your request when trying to transform it. Your client may be sending invalid data.`,
issues: err.issues,
stack: err.stack,
message: err.message,
},
});
} else if (err.name === "ForbiddenError") {
// Spoofs a vaguely threatening OpenAI error message. Only invoked by the
// block-zoomers rewriter to scare off tiktokers.
writeErrorResponse(req, res, 403, {
error: {
type: "organization_account_disabled",
code: "policy_violation",
param: null,
message: err.message,
},
});
} else if (err instanceof QuotaExceededError) {
writeErrorResponse(req, res, 429, {
error: {
type: "proxy_quota_exceeded",
code: "quota_exceeded",
message: `You've exceeded your token quota for this model type.`,
info: err.quotaInfo,
stack: err.stack,
},
});
} else {
writeErrorResponse(req, res, 500, {
error: {
type: "proxy_internal_error",
proxy_note: `Reverse proxy encountered an error before it could reach the upstream API.`,
message: err.message,
stack: err.stack,
},
});
}
} catch (e) {
req.log.error(
{ error: e },
`Error writing error response headers, giving up.`
);
const { statusCode, statusMessage, userMessage, ...errorDetails } =
classifyError(err);
sendProxyError(req, res, statusCode, statusMessage, {
error: { message: userMessage, ...errorDetails },
});
} catch (error) {
req.log.error(error, `Error writing error response headers, giving up.`);
res.end();
}
};
export function buildFakeSseMessage(
type: string,
string: string,
req: Request
) {
let fakeEvent;
const useBackticks = !type.includes("403");
const msgContent = useBackticks
? `\`\`\`\n[${type}: ${string}]\n\`\`\`\n`
: `[${type}: ${string}]`;
function classifyError(err: Error): {
/** HTTP status code returned to the client. */
statusCode: number;
/** HTTP status message returned to the client. */
statusMessage: string;
/** Message displayed to the user. */
userMessage: string;
/** Short error type, e.g. "proxy_validation_error". */
type: string;
} & Record<string, any> {
const defaultError = {
statusCode: 500,
statusMessage: "Internal Server Error",
userMessage: `Reverse proxy error: ${err.message}`,
type: "proxy_internal_error",
stack: err.stack,
};
switch (req.inboundApi) {
case "openai":
fakeEvent = {
id: "chatcmpl-" + req.id,
object: "chat.completion.chunk",
created: Date.now(),
model: req.body?.model,
choices: [
{
delta: { content: msgContent },
index: 0,
finish_reason: type,
},
],
switch (err.constructor.name) {
case "HttpError":
const statusCode = (err as HttpError).status;
return {
statusCode,
statusMessage: `HTTP ${statusCode} ${http.STATUS_CODES[statusCode]}`,
userMessage: `Reverse proxy error: ${err.message}`,
type: "proxy_http_error",
};
break;
case "openai-text":
fakeEvent = {
id: "cmpl-" + req.id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: msgContent, index: 0, logprobs: null, finish_reason: type },
],
model: req.body?.model,
case "BadRequestError":
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage: `Request is not valid. (${err.message})`,
type: "proxy_bad_request",
};
break;
case "anthropic":
fakeEvent = {
completion: msgContent,
stop_reason: type,
truncated: false, // I've never seen this be true
stop: null,
model: req.body?.model,
log_id: "proxy-req-" + req.id,
case "NotFoundError":
return {
statusCode: 404,
statusMessage: "Not Found",
userMessage: `Requested resource not found. (${err.message})`,
type: "proxy_not_found",
};
break;
case "google-palm":
throw new Error("PaLM not supported as an inbound API format");
case "PaymentRequiredError":
return {
statusCode: 402,
statusMessage: "No Keys Available",
userMessage: err.message,
type: "proxy_no_keys_available",
};
case "ZodError":
const userMessage = generateErrorMessage((err as ZodError).issues, {
prefix: "Request validation failed. ",
path: { enabled: true, label: null, type: "breadcrumbs" },
code: { enabled: false },
maxErrors: 3,
transform: ({ issue, ...rest }) => {
return `At '${rest.pathComponent}': ${issue.message}`;
},
});
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage,
type: "proxy_validation_error",
};
case "ZoomerForbiddenError":
// Mimics a ban notice from OpenAI, thrown when blockZoomerOrigins blocks
// a request.
return {
statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Your account has been disabled for violating our terms of service.`,
type: "organization_account_disabled",
code: "policy_violation",
};
case "ForbiddenError":
return {
statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Request is not allowed. (${err.message})`,
type: "proxy_forbidden",
};
case "QuotaExceededError":
return {
statusCode: 429,
statusMessage: "Too Many Requests",
userMessage: `You've exceeded your token quota for this model type.`,
type: "proxy_quota_exceeded",
info: (err as QuotaExceededError).quotaInfo,
};
case "Error":
if ("code" in err) {
switch (err.code) {
case "ENOTFOUND":
return {
statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy encountered a DNS error while trying to connect to the upstream service.`,
type: "proxy_network_error",
code: err.code,
};
case "ECONNREFUSED":
return {
statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy couldn't connect to the upstream service.`,
type: "proxy_network_error",
code: err.code,
};
case "ECONNRESET":
return {
statusCode: 504,
statusMessage: "Gateway Timeout",
userMessage: `Reverse proxy timed out while waiting for the upstream service to respond.`,
type: "proxy_network_error",
code: err.code,
};
}
}
return defaultError;
default:
assertNever(req.inboundApi);
}
return `data: ${JSON.stringify(fakeEvent)}\n\n`;
}
export function getCompletionForService({
service,
body,
req,
}: {
service: APIFormat;
body: Record<string, any>;
req?: Request;
}): { completion: string; model: string } {
switch (service) {
case "openai":
return { completion: body.choices[0].message.content, model: body.model };
case "openai-text":
return { completion: body.choices[0].text, model: body.model };
case "anthropic":
return { completion: body.completion.trim(), model: body.model };
case "google-palm":
return { completion: body.candidates[0].output, model: req?.body.model };
default:
assertNever(service);
return defaultError;
}
}
export function getCompletionFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "mistral-ai":
// Few possible values:
// - choices[0].message.content
// - choices[0].message with no content if model is invoking a tool
return body.choices?.[0]?.message?.content || "";
case "mistral-text":
return body.outputs?.[0]?.text || "";
case "openai-text":
return body.choices[0].text;
case "anthropic-chat":
if (!body.content) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic chat completion"
);
return "";
}
return body.content
.map(({ text, type }: { type: string; text: string }) =>
type === "text" ? text : `[Unsupported content type: ${type}]`
)
.join("\n");
case "anthropic-text":
if (!body.completion) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic text completion"
);
return "";
}
return body.completion.trim();
case "google-ai":
if ("choices" in body) {
return body.choices[0].message.content;
}
const text = body.candidates[0].content?.parts?.[0]?.text;
if (!text) {
req.log.warn(
{ body: JSON.stringify(body) },
"Received empty Google AI text completion"
);
return "";
}
return text;
case "openai-image":
return body.data?.map((item: any) => item.url).join("\n");
default:
assertNever(format);
}
}
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "openai-text":
return resBody.model;
case "mistral-ai":
case "mistral-text":
case "openai-image":
case "google-ai":
// These formats don't have a model in the response body.
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return resBody.model || req.body.model;
default:
assertNever(format);
}
}
@@ -1,32 +0,0 @@
import { AnthropicKey, Key } from "../../../shared/key-management";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/**
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
* know this without trying to send the request and seeing if it fails. If a
* key is marked as requiring a preamble, it will be added here.
*/
export const addAnthropicPreamble: ProxyRequestMiddleware = (
_proxyReq,
req
) => {
if (!isCompletionRequest(req) || req.key?.service !== "anthropic") {
return;
}
let preamble = "";
let prompt = req.body.prompt;
assertAnthropicKey(req.key);
if (req.key.requiresPreamble) {
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
}
req.body.prompt = preamble + prompt;
};
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
if (key.service !== "anthropic") {
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
}
}
-97
View File
@@ -1,97 +0,0 @@
import { Key, OpenAIKey, keyPool } from "../../../shared/key-management";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
import { assertNever } from "../../../shared/utils";
/** Add a key that can service this request to the request object. */
export const addKey: ProxyRequestMiddleware = (proxyReq, req) => {
let assignedKey: Key;
if (!isCompletionRequest(req)) {
// Horrible, horrible hack to stop the proxy from complaining about clients
// not sending a model when they are requesting the list of models (which
// requires a key, but obviously not a model).
// TODO: shouldn't even proxy /models to the upstream API, just fake it
// using the models our key pool has available.
req.body.model = "gpt-3.5-turbo";
}
if (!req.inboundApi || !req.outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error(
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
err.message
);
throw err;
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
// TODO: use separate middleware to deal with stream flags
req.isStreaming = req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
if (req.inboundApi === req.outboundApi) {
assignedKey = keyPool.get(req.body.model);
} else {
switch (req.outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
case "anthropic":
assignedKey = keyPool.get("claude-v1");
break;
case "google-palm":
assignedKey = keyPool.get("text-bison-001");
delete req.body.stream;
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct");
break;
case "openai":
throw new Error(
"OpenAI Chat as an API translation target is not supported"
);
default:
assertNever(req.outboundApi);
}
}
req.key = assignedKey;
req.log.info(
{
key: assignedKey.hash,
model: req.body?.model,
fromApi: req.inboundApi,
toApi: req.outboundApi,
},
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
case "openai-text":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "google-palm":
const originalPath = proxyReq.path;
proxyReq.path = originalPath.replace(
/(\?.*)?$/,
`?key=${assignedKey.key}`
);
break;
default:
assertNever(assignedKey.service);
}
};
@@ -1,30 +0,0 @@
import { hasAvailableQuota } from "../../../shared/users/user-store";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
export class QuotaExceededError extends Error {
public quotaInfo: any;
constructor(message: string, quotaInfo: any) {
super(message);
this.name = "QuotaExceededError";
this.quotaInfo = quotaInfo;
}
}
export const applyQuotaLimits: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!isCompletionRequest(req) || !req.user) {
return;
}
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
if (!hasAvailableQuota(req.user.token, req.body.model, requestedTokens)) {
throw new QuotaExceededError(
"You have exceeded your proxy token quota for this model.",
{
quota: req.user.tokenLimits,
used: req.user.tokenCounts,
requested: requestedTokens,
}
);
}
};
@@ -1,163 +0,0 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { OpenAIPromptMessage, countTokens } from "../../../shared/tokenization";
import { RequestPreprocessor } from ".";
import { assertNever } from "../../../shared/utils";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
const BISON_MAX_CONTEXT = 8100;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
* and outbound API format, which combined determine the size of the context.
* If the context is too large, an error is thrown.
* This preprocessor should run after any preprocessor that transforms the
* request body.
*/
export const checkContextSize: RequestPreprocessor = async (req) => {
const service = req.outboundApi;
let result;
switch (service) {
case "openai": {
req.outputTokens = req.body.max_tokens;
const prompt: OpenAIPromptMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "google-palm": {
req.outputTokens = req.body.maxOutputTokens;
const prompt: string = req.body.prompt.text;
result = await countTokens({ req, prompt, service });
break;
}
default:
assertNever(service);
}
req.promptTokens = result.token_count;
// TODO: Remove once token counting is stable
req.log.debug({ result: result }, "Counted prompt tokens.");
req.debug = req.debug ?? {};
req.debug = { ...req.debug, ...result };
maybeTranslateOpenAIModel(req);
validateContextSize(req);
};
function validateContextSize(req: Request) {
assertRequestHasTokenCounts(req);
const promptTokens = req.promptTokens;
const outputTokens = req.outputTokens;
const contextTokens = promptTokens + outputTokens;
const model = req.body.model;
let proxyMax: number;
switch (req.outboundApi) {
case "openai":
case "openai-text":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic":
proxyMax = CLAUDE_MAX_CONTEXT;
break;
case "google-palm":
proxyMax = BISON_MAX_CONTEXT;
break;
default:
assertNever(req.outboundApi);
}
proxyMax ||= Number.MAX_SAFE_INTEGER;
let modelMax = 0;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
modelMax = 8192;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?(?:-100k)/)) {
modelMax = 100000;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000;
} else if (model.match(/claude-2/)) {
modelMax = 100000;
} else if (model.match(/^text-bison-\d{3}$/)) {
modelMax = BISON_MAX_CONTEXT;
} else {
// Don't really want to throw here because I don't want to have to update
// this ASAP every time a new model is released.
req.log.warn({ model }, "Unknown model, using 100k token limit.");
modelMax = 100000;
}
const finalMax = Math.min(proxyMax, modelMax);
z.number()
.int()
.max(finalMax, {
message: `Your request exceeds the context size limit for this model or proxy. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
})
.parse(contextTokens);
req.log.debug(
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
"Prompt size validated"
);
req.debug.prompt_tokens = promptTokens;
req.debug.completion_tokens = outputTokens;
req.debug.max_model_tokens = modelMax;
req.debug.max_proxy_tokens = proxyMax;
}
function assertRequestHasTokenCounts(
req: Request
): asserts req is Request & { promptTokens: number; outputTokens: number } {
z.object({
promptTokens: z.number().int().min(1),
outputTokens: z.number().int().min(1),
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
/**
* For OpenAI-to-Anthropic requests, users can't specify the model, so we need
* to pick one based on the final context size. Ideally this would happen in
* the `transformOutboundPayload` preprocessor, but we don't have the context
* size at that point (and need a transformed body to calculate it).
*/
function maybeTranslateOpenAIModel(req: Request) {
if (req.inboundApi !== "openai" || req.outboundApi !== "anthropic") {
return;
}
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const contextSize = req.promptTokens! + req.outputTokens!;
if (contextSize > 8500) {
req.log.debug(
{ model: bigModel, contextSize },
"Using Claude 100k model for OpenAI-to-Anthropic request"
);
req.body.model = bigModel;
}
// Small model is the default already set in `transformOutboundPayload`
}
@@ -1,14 +0,0 @@
import { fixRequestBody } from "http-proxy-middleware";
import type { ProxyRequestMiddleware } from ".";
/** Finalize the rewritten request body. Must be the last rewriter. */
export const finalizeBody: ProxyRequestMiddleware = (proxyReq, req) => {
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
const updatedBody = JSON.stringify(req.body);
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
(req as any).rawBody = Buffer.from(updatedBody);
// body-parser and http-proxy-middleware don't play nice together
fixRequestBody(proxyReq, req);
}
};
+38 -34
View File
@@ -1,34 +1,38 @@
import type { Request } from "express";
import type { ClientRequest } from "http";
import type { ProxyReqCallback } from "http-proxy";
// Express middleware (runs before http-proxy-middleware, can be async)
export { applyQuotaLimits } from "./apply-quota-limits";
export { createPreprocessorMiddleware } from "./preprocess";
export { checkContextSize } from "./check-context-size";
export { setApiFormat } from "./set-api-format";
export { transformOutboundPayload } from "./transform-outbound-payload";
import { ProxyReqManager } from "./proxy-req-manager";
export {
createPreprocessorMiddleware,
createEmbeddingsPreprocessorMiddleware,
} from "./preprocessor-factory";
// HPM middleware (runs on onProxyReq, cannot be async)
export { addKey } from "./add-key";
export { addAnthropicPreamble } from "./add-anthropic-preamble";
export { blockZoomerOrigins } from "./block-zoomer-origins";
export { finalizeBody } from "./finalize-body";
export { languageFilter } from "./language-filter";
export { limitCompletions } from "./limit-completions";
export { removeOriginHeaders } from "./remove-origin-headers";
export { transformKoboldPayload } from "./transform-kobold-payload";
// Preprocessors (runs before request is queued, usually body transformation/validation)
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { languageFilter } from "./preprocessors/language-filter";
export { setApiFormat } from "./preprocessors/set-api-format";
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { validateModelFamily } from "./preprocessors/validate-model-family";
export { validateVision } from "./preprocessors/validate-vision";
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
export { addAzureKey } from "./mutators/add-azure-key";
export { finalizeBody } from "./mutators/finalize-body";
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
export { signAwsRequest } from "./mutators/sign-aws-request";
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
export { stripHeaders } from "./mutators/strip-headers";
/**
* Middleware that runs prior to the request being handled by http-proxy-
* middleware.
* Middleware that runs prior to the request being queued or handled by
* http-proxy-middleware. You will not have access to the proxied
* request/response objects since they have not yet been sent to the API.
*
* Async functions can be used here, but you will not have access to the proxied
* request/response objects, nor the data set by ProxyRequestMiddleware
* functions as they have not yet been run.
*
* User will have been authenticated by the time this middleware runs, but your
* request won't have been assigned an API key yet.
* User will have been authenticated by the proxy's gatekeeper, but the request
* won't have been assigned an upstream API key yet.
*
* Note that these functions only run once ever per request, even if the request
* is automatically retried by the request queue middleware.
@@ -36,14 +40,14 @@ export { transformKoboldPayload } from "./transform-kobold-payload";
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
/**
* Middleware that runs immediately before the request is sent to the API in
* response to http-proxy-middleware's `proxyReq` event.
* Middleware that runs immediately before the request is proxied to the
* upstream API, after dequeueing the request from the request queue.
*
* Async functions cannot be used here as HPM's event emitter is not async and
* will not wait for the promise to resolve before sending the request.
*
* Note that these functions may be run multiple times per request if the
* first attempt is rate limited and the request is automatically retried by the
* request queue middleware.
* Because these middleware may be run multiple times per request if a retryable
* error occurs and the request put back in the queue, they must be idempotent.
* A change manager is provided to allow the middleware to make changes to the
* request which can be automatically reverted.
*/
export type ProxyRequestMiddleware = ProxyReqCallback<ClientRequest, Request>;
export type ProxyReqMutator = (
changeManager: ProxyReqManager
) => void | Promise<void>;
@@ -1,56 +0,0 @@
import { Request } from "express";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { assertNever } from "../../../shared/utils";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
const DISALLOWED_REGEX =
/[\u2E80-\u2E99\u2E9B-\u2EF3\u2F00-\u2FD5\u3005\u3007\u3021-\u3029\u3038-\u303B\u3400-\u4DB5\u4E00-\u9FD5\uF900-\uFA6D\uFA70-\uFAD9]/;
// Our shitty free-tier VMs will fall over if we test every single character in
// each 15k character request ten times a second. So we'll just sample 20% of
// the characters and hope that's enough.
const containsDisallowedCharacters = (text: string) => {
const sampleSize = Math.ceil(text.length * 0.2);
const sample = text
.split("")
.sort(() => 0.5 - Math.random())
.slice(0, sampleSize)
.join("");
return DISALLOWED_REGEX.test(sample);
};
/** Block requests containing too many disallowed characters. */
export const languageFilter: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!config.rejectDisallowed) {
return;
}
if (isCompletionRequest(req)) {
const combinedText = getPromptFromRequest(req);
if (containsDisallowedCharacters(combinedText)) {
logger.warn(`Blocked request containing bad characters`);
_proxyReq.destroy(new Error(config.rejectMessage));
}
}
};
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic":
return body.prompt;
case "openai":
return body.messages
.map((m: { content: string }) => m.content)
.join("\n");
case "openai-text":
return body.prompt;
case "google-palm":
return body.prompt.text;
default:
assertNever(service);
}
}
@@ -1,16 +0,0 @@
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/**
* Don't allow multiple completions to be requested to prevent abuse.
* OpenAI-only, Anthropic provides no such parameter.
**/
export const limitCompletions: ProxyRequestMiddleware = (_proxyReq, req) => {
if (isCompletionRequest(req) && req.outboundApi === "openai") {
const originalN = req.body?.n || 1;
req.body.n = 1;
if (originalN !== req.body.n) {
req.log.warn(`Limiting completion choices from ${originalN} to 1`);
}
}
};
@@ -0,0 +1,84 @@
import {
APIFormat,
AzureOpenAIKey,
keyPool,
} from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
export const addAzureKey: ProxyReqMutator = async (manager) => {
const req = manager.request;
const validAPIs: APIFormat[] = ["openai", "openai-image"];
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
validAPIs.includes(api)
);
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
// TODO: untracked mutation to body, I think this should just be a
// RequestPreprocessor because we don't need to do it every dequeue.
req.body.model = model;
const key = keyPool.get(model, "azure");
manager.setKey(key);
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
// TODO: this should also probably be a RequestPreprocessor
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
// OpenAI wants logprobs: true/false and top_logprobs: number
// Azure seems to just want to combine them into logprobs: number
// if (typeof req.body.logprobs === "boolean") {
// req.body.logprobs = req.body.top_logprobs || undefined;
// delete req.body.top_logprobs
// }
// Temporarily just disabling logprobs for Azure because their model support
// is random: `This model does not support the 'logprobs' parameter.`
delete req.body.logprobs;
delete req.body.top_logprobs;
}
req.log.info(
{ key: key.hash, model },
"Assigned Azure OpenAI key to request"
);
const cred = req.key as AzureOpenAIKey;
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
const operation =
req.outboundApi === "openai" ? "/chat/completions" : "/images/generations";
const apiVersion =
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
path: `/openai/deployments/${deploymentId}${operation}?api-version=${apiVersion}`,
headers: {
["host"]: `${resourceName}.openai.azure.com`,
["content-type"]: "application/json",
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
});
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
const [resourceName, deploymentId, apiKey] = key.key.split(":");
if (!resourceName || !deploymentId || !apiKey) {
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
}
return { resourceName, deploymentId, apiKey };
}
@@ -0,0 +1,44 @@
import { keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
const req = manager.request;
const inboundValid =
req.inboundApi === "openai" || req.inboundApi === "google-ai";
const outboundValid = req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!inboundValid || !outboundValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
const model = req.body.model;
const key = keyPool.get(model, "google-ai");
manager.setKey(key);
req.log.info(
{ key: key.hash, model, stream: req.isStreaming },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
const payload = { ...req.body, stream: undefined, model: undefined };
// TODO: this isn't actually signed, so the manager api is a little unclear
// with the ProxyReqManager refactor, it's probably no longer necesasry to
// do this because we can modify the path using Manager.setPath.
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
}key=${key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(payload),
});
};
@@ -0,0 +1,130 @@
import { AnthropicChatMessage } from "../../../../shared/api-schemas";
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { assertNever } from "../../../../shared/utils";
import { ProxyReqMutator } from "../index";
export const addKey: ProxyReqMutator = (manager) => {
const req = manager.request;
let assignedKey: Key;
const { service, inboundApi, outboundApi, body } = req;
if (!inboundApi || !outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error({ inboundApi, outboundApi, path: req.path }, err.message);
throw err;
}
if (!body?.model) {
throw new Error("You must specify a model with your request.");
}
let needsMultimodal = false;
if (outboundApi === "anthropic-chat") {
needsMultimodal = containsImageContent(
body.messages as AnthropicChatMessage[]
);
}
if (inboundApi === outboundApi) {
assignedKey = keyPool.get(body.model, service, needsMultimodal);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
// TODO: This whole else condition is probably no longer needed since API
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-text":
case "anthropic-chat":
case "mistral-ai":
case "mistral-text":
case "google-ai":
assignedKey = keyPool.get(body.model, service);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
assignedKey = keyPool.get("dall-e-3", service);
break;
case "openai":
throw new Error(
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
);
default:
assertNever(outboundApi);
}
}
manager.setKey(assignedKey);
req.log.info(
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
manager.setHeader("X-API-Key", assignedKey.key);
if (!manager.request.headers["anthropic-version"]) {
manager.setHeader("anthropic-version", "2023-06-01");
}
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId && !key.key.includes("svcacct")) {
manager.setHeader("OpenAI-Organization", key.organizationId);
}
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "mistral-ai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
manager.setHeader("api-key", azureKey);
break;
case "aws":
case "gcp":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
manager.setKey(key);
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
manager.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
manager.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -0,0 +1,22 @@
import type { ProxyReqMutator } from "../index";
/** Finalize the rewritten request body. Must be the last mutator. */
export const finalizeBody: ProxyReqMutator = (manager) => {
const req = manager.request;
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
const serialized =
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
}
};
@@ -0,0 +1,32 @@
import { ProxyReqMutator } from "../index";
/**
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
manager.setPath(req.signedRequest.path);
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
const headers = req.signedRequest.headers;
Object.keys(headers).forEach((key) => {
manager.removeHeader(key);
});
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
manager.setHeader(key, value);
});
const serialized =
typeof req.signedRequest.body === "string"
? req.signedRequest.body
: JSON.stringify(req.signedRequest.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
};
@@ -0,0 +1,148 @@
import express, { Request } from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-schemas";
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
import {
AWSMistralV1ChatCompletionsSchema,
AWSMistralV1TextCompletionsSchema,
} from "../../../../shared/api-schemas/mistral-ai";
import { ProxyReqMutator } from "../index";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
* This happens AFTER request transformation.
*/
export const signAwsRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const { model, stream } = req.body;
const key = keyPool.get(model, "aws") as AwsBedrockKey;
manager.setKey(key);
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
// If our key has an inference profile compatible with the requested model,
// we want to use the inference profile instead of the model ID when calling
// InvokeModel as that will give us higher rate limits.
const profile =
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${profile}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
const { body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
// AWS uses vendor API formats but imposes additional (more strict) validation
// rules, namely that extraneous parameters are not allowed. We will validate
// using the vendor's zod schema but apply `.strip` to ensure that any
// extraneous parameters are removed.
let strippedParams: Record<string, unknown> = {};
switch (req.outboundApi) {
case "anthropic-text":
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
break;
case "anthropic-chat":
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
break;
case "mistral-ai":
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
break;
case "mistral-text":
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
break;
default:
throw new Error("Unexpected outbound API for AWS.");
}
return strippedParams;
}
@@ -0,0 +1,75 @@
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
import { GcpKey, keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
import {
getCredentialsFromGcpKey,
refreshGcpAccessToken,
} from "../../../../shared/key-management/gcp/oauth";
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
export const signGcpRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const serviceValid = req.service === "gcp";
if (!serviceValid) {
throw new Error("addVertexAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const { model } = req.body;
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
const [token, durationSec] = await refreshGcpAccessToken(key);
keyPool.update(key, {
accessToken: token,
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
} as GcpKey);
// nb: key received by `get` is a clone and will not have the new access
// token we just set, so it must be manually updated.
key.accessToken = token;
}
manager.setKey(key);
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
// TODO: This should happen in transform-outbound-payload.ts
// TODO: Support tools
let strippedParams: Record<string, unknown>;
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
stream: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "vertex-2023-10-16";
const credential = await getCredentialsFromGcpKey(key);
const host = GCP_HOST.replace("%REGION%", credential.region);
// GCP doesn't use the anthropic-version header, but we set it to ensure the
// stream adapter selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
headers: {
["host"]: host,
["content-type"]: "application/json",
["authorization"]: `Bearer ${key.accessToken}`,
},
body: JSON.stringify(strippedParams),
});
};
@@ -0,0 +1,33 @@
import { ProxyReqMutator } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
*/
export const stripHeaders: ProxyReqMutator = (manager) => {
manager.removeHeader("origin");
manager.removeHeader("referer");
// Some APIs refuse requests coming from browsers to discourage embedding
// API keys in client-side code, so we must remove all CORS/fetch headers.
Object.keys(manager.request.headers).forEach((key) => {
if (key.startsWith("sec-")) {
manager.removeHeader(key);
}
});
manager.removeHeader("tailscale-user-login");
manager.removeHeader("tailscale-user-name");
manager.removeHeader("tailscale-headers-info");
manager.removeHeader("tailscale-user-profile-pic");
manager.removeHeader("cf-connecting-ip");
manager.removeHeader("cf-ray");
manager.removeHeader("cf-visitor");
manager.removeHeader("cf-warp-tag-id");
manager.removeHeader("forwarded");
manager.removeHeader("true-client-ip");
manager.removeHeader("x-forwarded-for");
manager.removeHeader("x-forwarded-host");
manager.removeHeader("x-forwarded-proto");
manager.removeHeader("x-real-ip");
};
@@ -1,36 +0,0 @@
import { RequestHandler } from "express";
import { handleInternalError } from "../common";
import {
RequestPreprocessor,
checkContextSize,
setApiFormat,
transformOutboundPayload,
} from ".";
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
additionalPreprocessors?: RequestPreprocessor[]
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
...(additionalPreprocessors ?? []),
transformOutboundPayload,
checkContextSize,
];
return async function executePreprocessors(req, res, next) {
try {
for (const preprocessor of preprocessors) {
await preprocessor(req);
}
next();
} catch (error) {
req.log.error(error, "Error while executing request preprocessor");
handleInternalError(error as Error, req, res);
}
};
};
@@ -0,0 +1,176 @@
import { RequestHandler } from "express";
import { ZodIssue } from "zod";
import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common";
import {
RequestPreprocessor,
blockZoomerOrigins,
countPromptTokens,
languageFilter,
setApiFormat,
transformOutboundPayload,
validateContextSize,
validateModelFamily,
validateVision,
applyQuotaLimits,
} from ".";
type RequestPreprocessorOptions = {
/**
* Functions to run before the request body is transformed between API
* formats. Use this to change the behavior of the transformation, such as for
* endpoints which can accept multiple API formats.
*/
beforeTransform?: RequestPreprocessor[];
/**
* Functions to run after the request body is transformed and token counts are
* assigned. Use this to perform validation or other actions that depend on
* the request body being in the final API format.
*/
afterTransform?: RequestPreprocessor[];
};
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
* These should be used for validation and transformations that only need to
* happen once per request.
*
* These run first in the request lifecycle, a single time per request before it
* is added to the request queue. They aren't run again if the request is
* re-attempted after a rate limit.
*
* To run functions against requests every time they are re-attempted, write a
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
{ beforeTransform, afterTransform }: RequestPreprocessorOptions = {}
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
blockZoomerOrigins,
...(beforeTransform ?? []),
transformOutboundPayload,
countPromptTokens,
languageFilter,
...(afterTransform ?? []),
validateContextSize,
validateVision,
validateModelFamily,
applyQuotaLimits,
];
return async (...args) => executePreprocessors(preprocessors, args);
};
/**
* Returns a middleware function that specifically prepares requests for
* OpenAI's embeddings API. Tokens are not counted because embeddings requests
* are basically free.
*/
export const createEmbeddingsPreprocessorMiddleware = (): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat({ inApi: "openai", outApi: "openai", service: "openai" }),
(req) => void (req.promptTokens = req.outputTokens = 0),
];
return async (...args) => executePreprocessors(preprocessors, args);
};
async function executePreprocessors(
preprocessors: RequestPreprocessor[],
[req, res, next]: Parameters<RequestHandler>
) {
handleTestMessage(req, res, next);
if (res.headersSent) return;
try {
for (const preprocessor of preprocessors) {
await preprocessor(req);
}
next();
} catch (error) {
if (error.constructor.name === "ZodError") {
const issues = error?.issues
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
.join("; ");
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
} else {
req.log.error(error, "Error while executing request preprocessor");
}
// If the requested has opted into streaming, the client probably won't
// handle a non-eventstream response, but we haven't initialized the SSE
// stream yet as that is typically done later by the request queue. We'll
// do that here and then call classifyErrorAndSend to use the streaming
// error handler.
const { stream } = req.body;
const isStreaming = stream === "true" || stream === true;
if (isStreaming && !res.headersSent) {
initializeSseStream(res);
}
classifyErrorAndSend(error as Error, req, res);
}
}
/**
* Bypasses the API call and returns a test message response if the request body
* is a known test message from SillyTavern. Otherwise these messages just waste
* API request quota and confuse users when the proxy is busy, because ST always
* makes them with `stream: false` (which is not allowed when the proxy is busy)
*/
const handleTestMessage: RequestHandler = (req, res) => {
const { method, body } = req;
if (method !== "POST") {
return;
}
if (isTestMessage(body)) {
req.log.info({ body }, "Received test message. Skipping API call.");
res.json({
id: "test-message",
object: "chat.completion",
created: Date.now(),
model: body.model,
// openai chat
choices: [
{
message: { role: "assistant", content: "Hello!" },
finish_reason: "stop",
index: 0,
},
],
// anthropic text
completion: "Hello!",
// anthropic chat
content: [{ type: "text", text: "Hello!" }],
// gemini
candidates: [
{
content: { parts: [{ text: "Hello!" }] },
finishReason: "stop",
},
],
proxy_note:
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
});
}
};
function isTestMessage(body: any) {
const { messages, prompt, contents } = body;
if (messages) {
return (
messages.length === 1 &&
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else if (contents) {
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
prompt?.startsWith("Hi\n\n")
);
}
}
@@ -0,0 +1,37 @@
import { hasAvailableQuota } from "../../../../shared/users/user-store";
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
import { RequestPreprocessor } from "../index";
export class QuotaExceededError extends Error {
public quotaInfo: any;
constructor(message: string, quotaInfo: any) {
super(message);
this.name = "QuotaExceededError";
this.quotaInfo = quotaInfo;
}
}
export const applyQuotaLimits: RequestPreprocessor = (req) => {
const subjectToQuota =
isTextGenerationRequest(req) || isImageGenerationRequest(req);
if (!subjectToQuota || !req.user) return;
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
if (
!hasAvailableQuota({
userToken: req.user.token,
model: req.body.model,
api: req.outboundApi,
requested: requestedTokens,
})
) {
throw new QuotaExceededError(
"You have exceeded your proxy token quota for this model.",
{
quota: req.user.tokenLimits,
used: req.user.tokenCounts,
requested: requestedTokens,
}
);
}
};
@@ -1,12 +1,11 @@
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
import { RequestPreprocessor } from "../index";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
class ForbiddenError extends Error {
class ZoomerForbiddenError extends Error {
constructor(message: string) {
super(message);
this.name = "ForbiddenError";
this.name = "ZoomerForbiddenError";
}
}
@@ -14,11 +13,7 @@ class ForbiddenError extends Error {
* Blocks requests from Janitor AI users with a fake, scary error message so I
* stop getting emails asking for tech support.
*/
export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!isCompletionRequest(req)) {
return;
}
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
const origin = req.headers.origin || req.headers.referer;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working.
@@ -27,7 +22,7 @@ export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => {
return;
}
throw new ForbiddenError(
throw new ZoomerForbiddenError(
`Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.`
);
}
@@ -0,0 +1,77 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import {
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-schemas";
/**
* Given a request with an already-transformed body, counts the number of
* tokens and assigns the count to the request.
*/
export const countPromptTokens: RequestPreprocessor = async (req) => {
const service = req.outboundApi;
let result;
switch (service) {
case "openai": {
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
}
const prompt = { system, messages: req.body.messages };
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-text": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "google-ai": {
req.outputTokens = req.body.generationConfig.maxOutputTokens;
const prompt: GoogleAIChatMessage[] = req.body.contents;
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai":
case "mistral-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string | MistralAIChatMessage[] =
req.body.messages ?? req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-image": {
req.outputTokens = 1;
result = await countTokens({ req, service });
break;
}
default:
assertNever(service);
}
req.promptTokens = result.token_count;
req.log.debug({ result: result }, "Counted prompt tokens.");
req.tokenizerInfo = req.tokenizerInfo ?? {};
req.tokenizerInfo = { ...req.tokenizerInfo, ...result };
};
@@ -0,0 +1,94 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { BadRequestError } from "../../../../shared/errors";
import {
MistralAIChatMessage,
OpenAIChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas";
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
const rejectedClients = new Map<string, number>();
setInterval(() => {
rejectedClients.forEach((count, ip) => {
if (count > 0) {
rejectedClients.set(ip, Math.floor(count / 2));
} else {
rejectedClients.delete(ip);
}
});
}, 30000);
/**
* Block requests containing blacklisted phrases. Repeated rejections from the
* same IP address will be throttled.
*/
export const languageFilter: RequestPreprocessor = async (req) => {
if (!config.rejectPhrases.length) return;
const prompt = getPromptFromRequest(req);
const match = config.rejectPhrases.find((phrase) =>
prompt.match(new RegExp(phrase, "i"))
);
if (match) {
const ip = req.ip;
const rejections = (rejectedClients.get(req.ip) || 0) + 1;
const delay = Math.min(60000, Math.pow(2, rejections - 1) * 1000);
rejectedClients.set(ip, rejections);
req.log.warn(
{ match, ip, rejections, delay },
"Prompt contains rejected phrase"
);
await new Promise((resolve) => {
req.res!.once("close", resolve);
setTimeout(resolve, delay);
});
throw new BadRequestError(config.rejectMessage);
}
};
/*
TODO: this is not type safe and does not raise errors if request body zod schema
is changed.
*/
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "openai":
case "mistral-ai":
return body.messages
.map((msg: OpenAIChatMessage | MistralAIChatMessage) => {
const text = Array.isArray(msg.content)
? msg.content
.map((c) => {
if ("text" in c) return c.text;
})
.join()
: msg.content;
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "anthropic-text":
case "openai-text":
case "openai-image":
case "mistral-text":
return body.prompt;
case "google-ai": {
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
return [
b.systemInstruction?.parts.map((p) => p.text),
...b.contents.flatMap((c) => c.parts.map((p) => p.text)),
].join("\n");
}
default:
assertNever(service);
}
}
@@ -0,0 +1,30 @@
import { Request } from "express";
import { APIFormat } from "../../../../shared/key-management";
import { LLMService } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
export const setApiFormat = (api: {
/**
* The API format the user made the request in and expects the response to be
* in.
*/
inApi: Request["inboundApi"];
/**
* The API format the proxy will make the request in and expects the response
* to be in. If different from `inApi`, the proxy will transform the user's
* request body to this format, and will transform the response body or stream
* events from this format.
*/
outApi: APIFormat;
/**
* The service the request will be sent to, which determines authentication
* and possibly the streaming transport.
*/
service: LLMService;
}): RequestPreprocessor => {
return function configureRequestApiFormat(req) {
req.inboundApi = api.inApi;
req.outboundApi = api.outApi;
req.service = api.service;
};
};
@@ -0,0 +1,89 @@
import { Request } from "express";
import {
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-schemas";
import { BadRequestError } from "../../../../shared/errors";
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
import {
isImageGenerationRequest,
isTextGenerationRequest,
} from "../../common";
import { RequestPreprocessor } from "../index";
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed) {
return;
} else if (notTransformable) {
// This is probably an indication of a bug in the proxy.
const { inboundApi, outboundApi, method, path } = req;
req.log.warn(
{ inboundApi, outboundApi, method, path },
"`transformOutboundPayload` called on a non-transformable request."
);
return;
}
applyMistralPromptFixes(req);
// Native prompts are those which were already provided by the client in the
// target API format. We don't need to transform them.
const isNativePrompt = req.inboundApi === req.outboundApi;
if (isNativePrompt) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
req.body = result;
return;
}
// Prompt requires translation from one API format to another.
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request...");
req.body = await transFn(req);
return;
}
throw new BadRequestError(
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
// handles weird cases that don't fit into our abstractions
function applyMistralPromptFixes(req: Request): void {
if (req.inboundApi === "mistral-ai") {
// Mistral Chat is very similar to OpenAI but not identical and many clients
// don't properly handle the differences. We will try to validate the
// mistral prompt and try to fix it if it fails. It will be re-validated
// after this function returns.
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
req.body.messages = fixMistralPrompt(result.messages);
req.log.info(
{ n: req.body.messages.length, prev: result.messages.length },
"Applied Mistral chat prompt fixes."
);
// If the prompt relies on `prefix: true` for the last message, we need to
// convert it to a text completions request because AWS Mistral support for
// this feature is broken.
// On Mistral La Plateforme, we can't do this because they don't expose
// a text completions endpoint.
const { messages } = req.body;
const lastMessage = messages && messages[messages.length - 1];
if (lastMessage?.role === "assistant" && req.service === "aws") {
// enable prefix if client forgot, otherwise the template will insert an
// eos token which is very unlikely to be what the client wants.
lastMessage.prefix = true;
req.outboundApi = "mistral-text";
req.log.info(
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
);
}
}
}
@@ -0,0 +1,139 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
// todo: make configurable
const GOOGLE_AI_MAX_CONTEXT = 2048000;
const MISTRAL_AI_MAX_CONTENT = 131072;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
* and outbound API format, which combined determine the size of the context.
* If the context is too large, an error is thrown.
* This preprocessor should run after any preprocessor that transforms the
* request body.
*/
export const validateContextSize: RequestPreprocessor = async (req) => {
assertRequestHasTokenCounts(req);
const promptTokens = req.promptTokens;
const outputTokens = req.outputTokens;
const contextTokens = promptTokens + outputTokens;
const model = req.body.model;
let proxyMax: number;
switch (req.outboundApi) {
case "openai":
case "openai-text":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic-chat":
case "anthropic-text":
proxyMax = CLAUDE_MAX_CONTEXT;
break;
case "google-ai":
proxyMax = GOOGLE_AI_MAX_CONTEXT;
break;
case "mistral-ai":
case "mistral-text":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
return;
default:
assertNever(req.outboundApi);
}
proxyMax ||= Number.MAX_SAFE_INTEGER;
if (req.user?.type === "special") {
req.log.debug("Special user, not enforcing proxy context limit.");
proxyMax = Number.MAX_SAFE_INTEGER;
}
let modelMax: number;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/^gpt-4o/)) {
modelMax = 128000;
} else if (model.match(/^chatgpt-4o/)) {
modelMax = 128000;
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-(0125|1106)(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/^o1(-preview)?(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 16384;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
modelMax = 8192;
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?-100k/)) {
modelMax = 100000;
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000;
} else if (model.match(/^claude-2\.0/)) {
modelMax = 100000;
} else if (model.match(/^claude-2/)) {
modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^gemini-/)) {
modelMax = 1024000;
} else if (model.match(/^anthropic\.claude-3/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
modelMax = 100000;
} else if (model.match(/tral/)) {
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
// no name convention and wildly different context windows so this is a
// catch-all
modelMax = MISTRAL_AI_MAX_CONTENT;
} else {
req.log.warn({ model }, "Unknown model, using 200k token limit.");
modelMax = 200000;
}
const finalMax = Math.min(proxyMax, modelMax);
z.object({
tokens: z
.number()
.int()
.max(finalMax, {
message: `Your request exceeds the context size limit. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
}),
}).parse({ tokens: contextTokens });
req.log.debug(
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
"Prompt size validated"
);
req.tokenizerInfo.prompt_tokens = promptTokens;
req.tokenizerInfo.completion_tokens = outputTokens;
req.tokenizerInfo.max_model_tokens = modelMax;
req.tokenizerInfo.max_proxy_tokens = proxyMax;
};
function assertRequestHasTokenCounts(
req: Request
): asserts req is Request & { promptTokens: number; outputTokens: number } {
z.object({
promptTokens: z.number().int().min(1),
outputTokens: z.number().int().min(1),
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
@@ -0,0 +1,16 @@
import { config } from "../../../../config";
import { ForbiddenError } from "../../../../shared/errors";
import { getModelFamilyForRequest } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
/**
* Ensures the selected model family is enabled by the proxy configuration.
*/
export const validateModelFamily: RequestPreprocessor = (req) => {
const family = getModelFamilyForRequest(req);
if (!config.allowedModelFamilies.includes(family)) {
throw new ForbiddenError(
`Model family '${family}' is not enabled on this proxy`
);
}
};
@@ -0,0 +1,44 @@
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
import { ForbiddenError } from "../../../../shared/errors";
/**
* Rejects prompts containing images if multimodal prompts are disabled.
*/
export const validateVision: RequestPreprocessor = async (req) => {
if (req.service === undefined) {
throw new Error("Request service must be set before validateVision");
}
if (req.user?.type === "special") return;
if (config.allowedVisionServices.includes(req.service)) return;
// vision not allowed for req's service, block prompts with images
let hasImage = false;
switch (req.outboundApi) {
case "openai":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "anthropic-chat":
hasImage = containsImageContentAnthropic(req.body.messages);
break;
case "anthropic-text":
case "google-ai":
case "mistral-ai":
case "mistral-text":
case "openai-image":
case "openai-text":
return;
default:
assertNever(req.outboundApi);
}
if (hasImage) {
throw new ForbiddenError(
"Prompts containing images are not permitted. Disable 'Send Inline Images' in your client and try again."
);
}
};
@@ -0,0 +1,135 @@
import { Request, Response } from "express";
import http from "http";
import ProxyServer from "http-proxy";
import { Readable } from "stream";
import {
createProxyMiddleware,
Options,
debugProxyErrorsPlugin,
proxyEventsPlugin,
} from "http-proxy-middleware";
import { ProxyReqMutator, stripHeaders } from "./index";
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
import { createQueueMiddleware } from "../../queue";
import { getHttpAgents } from "../../../shared/network";
import { classifyErrorAndSend } from "../common";
/**
* Options for the `createQueuedProxyMiddleware` factory function.
*/
type ProxyMiddlewareFactoryOptions = {
/**
* Functions which receive a ProxyReqManager and can modify the request before
* it is proxied. The modifications will be automatically reverted if the
* request needs to be returned to the queue.
*/
mutations?: ProxyReqMutator[];
/**
* The target URL to proxy requests to. This can be a string or a function
* which accepts the request and returns a string.
*/
target: string | Options<Request>["router"];
/**
* A function which receives the proxy response and the JSON-decoded request
* body. Only fired for non-streaming responses; streaming responses are
* handled in `handle-streaming-response.ts`.
*/
blockingResponseHandler?: ProxyResHandlerWithBody;
};
/**
* Returns a middleware function that accepts incoming requests and places them
* into the request queue. When the request is dequeued, it is proxied to the
* target URL using the given options and middleware. Non-streaming responses
* are handled by the given `blockingResponseHandler`.
*/
export function createQueuedProxyMiddleware({
target,
mutations,
blockingResponseHandler,
}: ProxyMiddlewareFactoryOptions) {
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
const hpmRouter = typeof target === "function" ? target : undefined;
const [httpAgent, httpsAgent] = getHttpAgents();
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
const proxyMiddleware = createProxyMiddleware<Request, Response>({
target: hpmTarget,
router: hpmRouter,
agent,
changeOrigin: true,
toProxy: true,
selfHandleResponse: typeof blockingResponseHandler === "function",
// Disable HPM logger plugin (requires re-adding the other default plugins).
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
// fixes several error handling/connection close issues in http-proxy core.
ejectPlugins: true,
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
// the default plugins only allow http.IncomingMessage for TReq. They are
// compatible with express.Request, so we can use them. `Plugin` type is not
// exported for some reason.
plugins: [
debugProxyErrorsPlugin,
pinoLoggerPlugin,
proxyEventsPlugin,
] as any,
on: {
proxyRes: createOnProxyResHandler(
blockingResponseHandler ? [blockingResponseHandler] : []
),
error: classifyErrorAndSend,
},
buffer: ((req: Request) => {
// This is a hack/monkey patch and is not part of the official
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
let payload = req.body;
if (typeof payload === "string") {
payload = Buffer.from(payload);
}
const stream = new Readable();
stream.push(payload);
stream.push(null);
return stream;
}) as any,
});
return createQueueMiddleware({
mutations: [stripHeaders, ...(mutations ?? [])],
proxyMiddleware,
});
}
type ProxiedResponse = http.IncomingMessage & Response & any;
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
proxyServer.on("error", (err, req, res, target) => {
req.log.error(
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
"Error occurred while proxying request to target"
);
});
proxyServer.on("proxyReq", (proxyReq, req) => {
const { protocol, host, path } = proxyReq;
req.log.info(
{
from: req.originalUrl,
to: `${protocol}//${host}${path}`,
},
"Sending request to upstream API..."
);
});
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
const { protocol, host, path } = proxyRes.req;
req.log.info(
{
target: `${protocol}//${host}${path}`,
status: proxyRes.statusCode,
contentType: proxyRes.headers["content-type"],
contentEncoding: proxyRes.headers["content-encoding"],
contentLength: proxyRes.headers["content-length"],
transferEncoding: proxyRes.headers["transfer-encoding"],
},
"Got response from upstream API."
);
});
}
@@ -0,0 +1,112 @@
import { Request } from "express";
import { Key } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
/**
* Represents a change to the request that will be reverted if the request
* fails.
*/
interface ProxyReqMutation {
target: "header" | "path" | "body" | "api-key" | "signed-request";
key?: string;
originalValue: any | undefined;
}
/**
* Manages a request's headers, body, and path, allowing them to be modified
* before the request is proxied and automatically reverted if the request
* needs to be retried.
*/
export class ProxyReqManager {
private req: Request;
private mutations: ProxyReqMutation[] = [];
/**
* A read-only proxy of the request object. Avoid changing any properties
* here as they will persist across retries.
*/
public readonly request: Readonly<Request>;
constructor(req: Request) {
this.req = req;
this.request = new Proxy(req, {
get: (target, prop) => {
if (typeof prop === "string") return target[prop as keyof Request];
return undefined;
},
});
}
setHeader(name: string, newValue: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
this.req.headers[name.toLowerCase()] = newValue;
}
removeHeader(name: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
delete this.req.headers[name.toLowerCase()];
}
setBody(newBody: any): void {
const originalValue = this.req.body;
this.mutations.push({ target: "body", key: "body", originalValue });
this.req.body = newBody;
}
setKey(newKey: Key): void {
const originalValue = this.req.key;
this.mutations.push({ target: "api-key", key: "key", originalValue });
this.req.key = newKey;
}
setPath(newPath: string): void {
const originalValue = this.req.path;
this.mutations.push({ target: "path", key: "path", originalValue });
this.req.url = newPath;
}
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
const originalValue = this.req.signedRequest;
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
this.req.signedRequest = newSignedRequest;
}
hasChanged(): boolean {
return this.mutations.length > 0;
}
revert(): void {
for (const mutation of this.mutations.reverse()) {
switch (mutation.target) {
case "header":
if (mutation.originalValue === undefined) {
delete this.req.headers[mutation.key!.toLowerCase()];
continue;
} else {
this.req.headers[mutation.key!.toLowerCase()] =
mutation.originalValue;
}
break;
case "path":
this.req.url = mutation.originalValue;
break;
case "body":
this.req.body = mutation.originalValue;
break;
case "api-key":
// We don't reset the key here because it's not a property of the
// inbound request, so we'd only ever be reverting it to null.
break;
case "signed-request":
this.req.signedRequest = mutation.originalValue;
break;
default:
assertNever(mutation.target);
}
}
this.mutations = [];
}
}
@@ -1,10 +0,0 @@
import { ProxyRequestMiddleware } from ".";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
**/
export const removeOriginHeaders: ProxyRequestMiddleware = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
};
@@ -1,13 +0,0 @@
import { Request } from "express";
import { APIFormat } from "../../../shared/key-management";
import { RequestPreprocessor } from ".";
export const setApiFormat = (api: {
inApi: Request["inboundApi"];
outApi: APIFormat;
}): RequestPreprocessor => {
return (req) => {
req.inboundApi = api.inApi;
req.outboundApi = api.outApi;
};
};
@@ -1,112 +0,0 @@
/**
* Transforms a KoboldAI payload into an OpenAI payload.
* @deprecated Kobold input format isn't supported anymore as all popular
* frontends support reverse proxies or changing their base URL. It adds too
* many edge cases to be worth maintaining and doesn't work with newer features.
*/
import { logger } from "../../../logger";
import type { ProxyRequestMiddleware } from ".";
// Kobold requests look like this:
// body:
// {
// prompt: "Aqua is character from Konosuba anime. Aqua is a goddess, before life in the Fantasy World, she was a goddess of water who guided humans to the afterlife. Aqua looks like young woman with beauty no human could match. Aqua has light blue hair, blue eyes, slim figure, long legs, wide hips, blue waist-long hair that is partially tied into a loop with a spherical clip. Aqua's measurements are 83-56-83 cm. Aqua's height 157cm. Aqua wears sleeveless dark-blue dress with white trimmings, extremely short dark blue miniskirt, green bow around her chest with a blue gem in the middle, detached white sleeves with blue and golden trimmings, thigh-high blue heeled boots over white stockings with blue trimmings. Aqua is very strong in water magic, but a little stupid, so she does not always use it to the place. Aqua is high-spirited, cheerful, carefree. Aqua rarely thinks about the consequences of her actions and always acts or speaks on her whims. Because very easy to taunt Aqua with jeers or lure her with praises.\n" +
// "Aqua's personality: high-spirited, likes to party, carefree, cheerful.\n" +
// 'Circumstances and context of the dialogue: Aqua is standing in the city square and is looking for new followers\n' +
// 'This is how Aqua should talk\n' +
// 'You: Hi Aqua, I heard you like to spend time in the pub.\n' +
// "Aqua: *excitedly* Oh my goodness, yes! I just love spending time at the pub! It's so much fun to talk to all the adventurers and hear about their exciting adventures! And you are?\n" +
// "You: I'm a new here and I wanted to ask for your advice.\n" +
// 'Aqua: *giggles* Oh, advice! I love giving advice! And in gratitude for that, treat me to a drink! *gives signals to the bartender*\n' +
// 'This is how Aqua should talk\n' +
// 'You: Hello\n' +
// "Aqua: *excitedly* Hello there, dear! Are you new to Axel? Don't worry, I, Aqua the goddess of water, am here to help you! Do you need any assistance? And may I say, I look simply radiant today! *strikes a pose and looks at you with puppy eyes*\n" +
// '\n' +
// 'Then the roleplay chat between You and Aqua begins.\n' +
// "Aqua: *She is in the town square of a city named Axel. It's morning on a Saturday and she suddenly notices a person who looks like they don't know what they're doing. She approaches him and speaks* \n" +
// '\n' +
// `"Are you new here? Do you need help? Don't worry! I, Aqua the Goddess of Water, shall help you! Do I look beautiful?" \n` +
// '\n' +
// '*She strikes a pose and looks at him with puppy eyes.*\n' +
// 'You: test\n' +
// 'You: test\n' +
// 'You: t\n' +
// 'You: test\n',
// use_story: false,
// use_memory: false,
// use_authors_note: false,
// use_world_info: false,
// max_context_length: 2048,
// max_length: 180,
// rep_pen: 1.1,
// rep_pen_range: 1024,
// rep_pen_slope: 0.9,
// temperature: 0.65,
// tfs: 0.9,
// top_a: 0,
// top_k: 0,
// top_p: 0.9,
// typical: 1,
// sampler_order: [
// 6, 0, 1, 2,
// 3, 4, 5
// ],
// singleline: false
// }
// OpenAI expects this body:
// { model: 'gpt-3.5-turbo', temperature: 0.65, top_p: 0.9, max_tokens: 180, messages }
// there's also a frequency_penalty but it's not clear how that maps to kobold's
// rep_pen.
// messages is an array of { role: "system" | "assistant" | "user", content: ""}
// kobold only sends us the entire prompt. we can try to split the last two
// lines into user and assistant messages, but that's not always correct. For
// now it will have to do.
/**
* Transforms a KoboldAI payload into an OpenAI payload.
* @deprecated Probably doesn't work anymore, idk.
**/
export const transformKoboldPayload: ProxyRequestMiddleware = (
_proxyReq,
req
) => {
// if (req.inboundApi !== "kobold") {
// throw new Error("transformKoboldPayload called for non-kobold request.");
// }
const { body } = req;
const { prompt, max_length, rep_pen, top_p, temperature } = body;
if (!max_length) {
logger.error("KoboldAI request missing max_length.");
throw new Error("You must specify a max_length parameter.");
}
const promptLines = prompt.split("\n");
// The very last line is the contentless "Assistant: " hint to the AI.
// Tavern just leaves an empty line, Agnai includes the AI's name.
const assistantHint = promptLines.pop();
// The second-to-last line is the user's prompt, generally.
const userPrompt = promptLines.pop();
const messages = [
{ role: "system", content: promptLines.join("\n") },
{ role: "user", content: userPrompt },
{ role: "assistant", content: assistantHint },
];
// Kobold doesn't select a model. If the addKey rewriter assigned us a GPT-4
// key, use that. Otherwise, use GPT-3.5-turbo.
const model = "gpt-4";
const newBody = {
model,
temperature,
top_p,
frequency_penalty: rep_pen, // remove this if model turns schizo
max_tokens: max_length,
messages,
};
req.body = newBody;
};
@@ -1,336 +0,0 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { OpenAIPromptMessage } from "../../../shared/tokenization";
import { isCompletionRequest } from "../common";
import { RequestPreprocessor } from ".";
import { APIFormat } from "../../../shared/key-management";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
// https://console.anthropic.com/docs/api/reference#-v1-complete
const AnthropicV1CompleteSchema = z.object({
model: z.string().regex(/^claude-/, "Model must start with 'claude-'"),
prompt: z.string({
required_error:
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
}),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
stop_sequences: z.array(z.string()).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional().default(-1),
top_p: z.coerce.number().optional().default(-1),
metadata: z.any().optional(),
});
// https://platform.openai.com/docs/api-reference/chat/create
const OpenAIV1ChatCompletionSchema = z.object({
model: z.string().regex(/^gpt/, "Model must start with 'gpt-'"),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
name: z.string().optional(),
}),
{
required_error:
"No `messages` found. Ensure you've set the correct completion endpoint.",
invalid_type_error:
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
}
),
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
n: z
.literal(1, {
errorMap: () => ({
message: "You may only request a single completion at a time.",
}),
})
.optional(),
stream: z.boolean().optional().default(false),
stop: z.union([z.string(), z.array(z.string())]).optional(),
max_tokens: z.coerce
.number()
.int()
.nullish()
.default(16)
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
logit_bias: z.any().optional(),
user: z.string().optional(),
});
const OpenAIV1TextCompletionSchema = z
.object({
model: z
.string()
.regex(
/^gpt-3.5-turbo-instruct/,
"Model must start with 'gpt-3.5-turbo-instruct'"
),
prompt: z.string({
required_error:
"No `prompt` found. Ensure you've set the correct completion endpoint.",
}),
logprobs: z.number().int().nullish().default(null),
echo: z.boolean().optional().default(false),
best_of: z.literal(1).optional(),
stop: z.union([z.string(), z.array(z.string()).max(4)]).optional(),
suffix: z.string().optional(),
})
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true }));
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateText
const PalmV1GenerateTextSchema = z.object({
model: z.string().regex(/^\w+-bison-\d{3}$/),
prompt: z.object({ text: z.string() }),
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
stopSequences: z.array(z.string()).max(5).optional(),
});
const VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
anthropic: AnthropicV1CompleteSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"google-palm": PalmV1GenerateTextSchema,
};
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable = !isCompletionRequest(req);
if (alreadyTransformed || notTransformable) {
return;
}
if (sameService) {
const result = VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.body = openaiToAnthropic(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "google-palm") {
req.body = openaiToPalm(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "openai-text") {
req.body = openaiToOpenaiText(req);
return;
}
throw new Error(
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
);
};
function openaiToAnthropic(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic request"
);
throw result.error;
}
// Anthropic has started versioning their API, indicated by an HTTP header
// `anthropic-version`. The new June 2023 version is not backwards compatible
// with our OpenAI-to-Anthropic transformations so we need to explicitly
// request the older version for now. 2023-01-01 will be removed in September.
// https://docs.anthropic.com/claude/reference/versioning
req.headers["anthropic-version"] = "2023-01-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudePrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
...rest,
// Model may be overridden in `calculate-context-size.ts` to avoid having
// a circular dependency (`calculate-context-size.ts` needs an already-
// transformed request body to count tokens, but this function would like
// to know the count to select a model).
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
};
}
function openaiToOpenaiText(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-text request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAiChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
const transformed = { ...rest, prompt: prompt, stop: stops };
const validated = OpenAIV1TextCompletionSchema.parse(transformed);
return validated;
}
function openaiToPalm(req: Request): z.infer<typeof PalmV1GenerateTextSchema> {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse({
...body,
model: "text-bison-001",
});
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Palm request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAiChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
z.array(z.string()).max(5).parse(stops);
return {
prompt: { text: prompt },
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
model: "text-bison-001",
topP: rest.top_p,
temperature: rest.temperature,
safetySettings: [
{ category: "HARM_CATEGORY_UNSPECIFIED", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DEROGATORY", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_TOXICITY", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_VIOLENCE", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_SEXUAL", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_MEDICAL", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DANGEROUS", threshold: "BLOCK_NONE" },
],
};
}
export function openAIMessagesToClaudePrompt(messages: OpenAIPromptMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${m.name?.trim() ? `(as ${m.name}) ` : ""}${
m.content
}`;
})
.join("") + "\n\nAssistant:"
);
}
function flattenOpenAiChatMessages(messages: OpenAIPromptMessage[]) {
// Temporary to allow experimenting with prompt strategies
const PROMPT_VERSION: number = 1;
switch (PROMPT_VERSION) {
case 1:
return (
messages
.map((m) => {
// Claude-style human/assistant turns
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "User";
}
return `\n\n${role}: ${m.content}`;
})
.join("") + "\n\nAssistant:"
);
case 2:
return messages
.map((m) => {
// Claude without prefixes (except system) and no Assistant priming
let role: string = "";
if (role === "system") {
role = "System: ";
}
return `\n\n${role}${m.content}`;
})
.join("");
default:
throw new Error(`Unknown prompt version: ${PROMPT_VERSION}`);
}
}
@@ -0,0 +1,36 @@
import util from "util";
import zlib from "zlib";
import { PassThrough } from "stream";
const BUFFER_DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
text: (data: Buffer) => data,
};
const STREAM_DECODER_MAP = {
gzip: zlib.createGunzip,
deflate: zlib.createInflate,
br: zlib.createBrotliDecompress,
text: () => new PassThrough(),
};
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
const isSupportedContentEncoding = (
encoding: string
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
export function getStreamDecompressor(encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return STREAM_DECODER_MAP[encoding]();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
@@ -0,0 +1,405 @@
import express from "express";
import { APIFormat } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
import { initializeSseStream } from "../../../shared/streaming";
import http from "http";
/**
* Returns a Markdown-formatted message that renders semi-nicely in most chat
* frontends. For example:
*
* **Proxy error (HTTP 404 Not Found)**
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
* ***
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
* ```
* {
* "type": "error",
* "error": {
* "type": "not_found_error",
* "message": "model: some-invalid-model-id",
* },
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
* }
* ```
*/
function getMessageContent(params: {
title: string;
message: string;
obj?: Record<string, any>;
}) {
const { title, message, obj } = params;
const note = obj?.proxy_note || obj?.error?.message || "";
const header = `### **${title}**`;
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
const serializedObj = obj
? ["```", JSON.stringify(obj, null, 2), "```"].join("\n")
: "";
const { stack } = JSON.parse(JSON.stringify(obj ?? {}));
let prettyTrace = "";
if (stack && obj) {
prettyTrace = [
"Include this trace when reporting an issue.",
"```",
stack,
"```",
].join("\n");
delete obj.stack;
}
return [
header,
friendlyMessage,
serializedObj,
prettyTrace,
"<!-- oai-proxy-error -->",
].join("\n\n");
}
type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: Record<string, any>;
reqId: string | number | object;
model?: string;
statusCode?: number;
};
/**
* Very crude inference of the request format based on the request body. Don't
* rely on this to be very accurate.
*/
function tryInferFormat(body: any): APIFormat | "unknown" {
if (typeof body !== "object" || !body.model) {
return "unknown";
}
if (body.model.includes("gpt")) {
return "openai";
}
if (body.model.includes("mistral")) {
return "mistral-ai";
}
if (body.model.includes("claude")) {
return body.messages?.length ? "anthropic-chat" : "anthropic-text";
}
if (body.model.includes("gemini")) {
return "google-ai";
}
return "unknown";
}
/**
* Redacts the hostname from the error message if it contains a DNS resolution
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
* as those may contain sensitive information about the proxy's configuration.
*/
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
if (!options.message.includes("getaddrinfo")) return options;
const redacted = { ...options };
redacted.message = "Could not resolve hostname";
if (typeof redacted.obj?.error === "object") {
redacted.obj = {
...redacted.obj,
error: { message: "Could not resolve hostname" },
};
}
return redacted;
}
/**
* Generates an appropriately-formatted error response and sends it to the
* client over their requested transport (blocking or SSE stream).
*/
export function sendErrorToClient(params: {
options: ErrorGeneratorOptions;
req: express.Request;
res: express.Response;
}) {
const { req, res } = params;
const options = redactHostname(params.options);
const { statusCode, message, title, obj: details } = options;
// Since we want to send the error in a format the client understands, we
// need to know the request format. `setApiFormat` might not have been called
// yet, so we'll try to infer it from the request body.
const format =
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
if (format === "unknown") {
// Early middleware error (auth, rate limit) so we can only send something
// generic.
const code = statusCode || 400;
const hasDetails = details && Object.keys(details).length > 0;
return res.status(code).json({
error: {
message,
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
},
...(hasDetails ? { details } : {}),
});
}
// Cannot modify headers if client opted into streaming and made it into the
// proxy request queue, because that immediately starts an SSE stream.
if (!res.headersSent) {
res.setHeader("x-oai-proxy-error", title);
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
}
// By this point, we know the request format. To get the error to display in
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
// from the language model. Depending on whether the client is streaming, we
// will either send an SSE event or a JSON response.
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
if (isStreaming) {
// User can have opted into streaming but not made it into the queue yet,
// in which case the stream must be started first.
if (!res.headersSent) {
initializeSseStream(res);
}
res.write(buildSpoofedSSE({ ...options, format }));
res.write(`data: [DONE]\n\n`);
res.end();
} else {
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
}
}
/**
* Returns a non-streaming completion object that looks like it came from the
* service that the request is being proxied to. Used to send error messages to
* the client and have them look like normal responses, for clients with poor
* error handling.
*/
export function buildSpoofedCompletion({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
switch (format) {
case "openai":
case "mistral-ai":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "mistral-text":
return {
outputs: [{ text: content, stop_reason: title }],
model,
};
case "openai-text":
return {
id: "error-" + id,
object: "text_completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
};
case "anthropic-text":
return {
id: "error-" + id,
type: "completion",
completion: content,
stop_reason: title,
stop: null,
model,
};
case "anthropic-chat":
return {
id: "error-" + id,
type: "message",
role: "assistant",
content: [{ type: "text", text: content }],
model,
stop_reason: title,
stop_sequence: null,
};
case "google-ai":
return {
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
case "openai-image":
return obj;
default:
assertNever(format);
}
}
/**
* Returns an SSE message that looks like a completion event for the service
* that the request is being proxied to. Used to send error messages to the
* client in the middle of a streaming request.
*/
export function buildSpoofedSSE({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
let event;
switch (format) {
case "openai":
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-text":
event = {
outputs: [{ text: content, stop_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
model,
};
break;
case "anthropic-text":
event = {
completion: content,
stop_reason: title,
truncated: false,
stop: null,
model,
log_id: "proxy-req-" + id,
};
break;
case "anthropic-chat":
event = {
type: "content_block_delta",
index: 0,
delta: { type: "text_delta", text: content },
};
break;
case "google-ai":
// TODO: google ai supports two streaming transports, SSE and JSON.
// we currently only support SSE.
// return JSON.stringify({
event = {
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
break;
case "openai-image":
return JSON.stringify(obj);
default:
assertNever(format);
}
if (format === "anthropic-text") {
return (
["event: completion", `data: ${JSON.stringify(event)}`].join("\n") +
"\n\n"
);
}
// ugh.
if (format === "anthropic-chat") {
return (
[
[
"event: message_start",
`data: ${JSON.stringify({
type: "message_start",
message: {
id: "error-" + id,
type: "message",
role: "assistant",
content: [],
model,
},
})}`,
].join("\n"),
[
"event: content_block_start",
`data: ${JSON.stringify({
type: "content_block_start",
index: 0,
content_block: { type: "text", text: "" },
})}`,
].join("\n"),
["event: content_block_delta", `data: ${JSON.stringify(event)}`].join(
"\n"
),
[
"event: content_block_stop",
`data: ${JSON.stringify({ type: "content_block_stop", index: 0 })}`,
].join("\n"),
[
"event: message_delta",
`data: ${JSON.stringify({
type: "message_delta",
delta: { stop_reason: title, stop_sequence: null, usage: null },
})}`,
],
[
"event: message_stop",
`data: ${JSON.stringify({ type: "message_stop" })}`,
].join("\n"),
].join("\n\n") + "\n\n"
);
}
return `data: ${JSON.stringify(event)}\n\n`;
}
@@ -0,0 +1,70 @@
import { sendProxyError } from "../common";
import type { RawResponseBodyHandler } from "./index";
import { decompressBuffer } from "./compression";
/**
* Handles the response from the upstream service and decodes the body if
* necessary. If the response is JSON, it will be parsed and returned as an
* object. Otherwise, it will be returned as a string. Does not handle streaming
* responses.
* @throws {Error} Unsupported content-encoding or invalid application/json body
*/
export const handleBlockingResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
if (req.isStreaming) {
const err = new Error(
"handleBlockingResponse called for a streaming request."
);
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
}
return new Promise((resolve, reject) => {
let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => {
const contentEncoding = proxyRes.headers["content-encoding"];
const contentType = proxyRes.headers["content-type"];
let body: string | Buffer = Buffer.concat(chunks);
const rejectWithMessage = function (msg: string, err: Error) {
const error = `${msg} (${err.message})`;
req.log.warn(
{ msg: error, stack: err.stack },
"Error in blocking response handler"
);
sendProxyError(req, res, 500, "Internal Server Error", { error });
return reject(error);
};
try {
body = await decompressBuffer(body, contentEncoding);
} catch (e) {
return rejectWithMessage(`Could not decode response body`, e);
}
try {
return resolve(tryParseAsJson(body, contentType));
} catch (e) {
return rejectWithMessage("API responded with invalid JSON", e);
}
});
});
};
function tryParseAsJson(body: string, contentType?: string) {
// If the response is declared as JSON, it must parse or we will throw
if (contentType?.includes("application/json")) {
return JSON.parse(body);
}
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
// anyway since some APIs return the wrong content-type header in some cases.
// If it fails to parse, we'll just return the raw body without throwing.
try {
return JSON.parse(body);
} catch (e) {
return body;
}
}
@@ -1,410 +1,194 @@
import { Request, Response } from "express";
import * as http from "http";
import { buildFakeSseMessage } from "../common";
import { RawResponseBodyHandler, decodeResponseBody } from ".";
import { assertNever } from "../../../shared/utils";
import express from "express";
import { pipeline, Readable, Transform } from "stream";
import { StringDecoder } from "string_decoder";
import { promisify } from "util";
import type { logger } from "../../../logger";
import { BadRequestError, RetryableError } from "../../../shared/errors";
import { APIFormat, keyPool } from "../../../shared/key-management";
import {
copySseResponseHeaders,
initializeSseStream,
} from "../../../shared/streaming";
import { reenqueueRequest } from "../../queue";
import type { RawResponseBodyHandler } from ".";
import { handleBlockingResponse } from "./handle-blocking-response";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { getStreamDecompressor } from "./compression";
type OpenAiChatCompletionResponse = {
id: string;
object: string;
created: number;
model: string;
choices: {
message: { role: string; content: string };
finish_reason: string | null;
index: number;
}[];
};
type OpenAiTextCompletionResponse = {
id: string;
object: string;
created: number;
model: string;
choices: {
text: string;
finish_reason: string | null;
index: number;
logprobs: null;
}[];
};
type AnthropicCompletionResponse = {
completion: string;
stop_reason: string;
truncated: boolean;
stop: any;
model: string;
log_id: string;
exception: null;
};
const pipelineAsync = promisify(pipeline);
/**
* Consume the SSE stream and forward events to the client. Once the stream is
* stream is closed, resolve with the full response body so that subsequent
* middleware can work with it.
* `handleStreamedResponse` consumes a streamed response from the upstream API,
* decodes chunk-by-chunk into a stream of events, transforms those events into
* the client's requested format, and forwards the result to the client.
*
* Typically we would only need of the raw response handlers to execute, but
* in the event a streamed request results in a non-200 response, we need to
* fall back to the non-streaming response handler so that the error handler
* can inspect the error response.
* After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response (to count output tokens, track usage, etc).
*
* Currently most frontends don't support Anthropic streaming, so users can opt
* to send requests for Claude models via an endpoint that accepts OpenAI-
* compatible requests and translates the received Anthropic SSE events into
* OpenAI ones, essentially pretending to be an OpenAI streaming API.
* In the event of an error, the request's streaming flag is unset and the
* request is bounced back to the non-streaming response handler. If the error
* is retryable, that handler will re-enqueue the request and also reset the
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
* places, so it's hard to keep track of.
*/
export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
// If these differ, the user is using the OpenAI-compatibile endpoint, so
// we need to translate the SSE events into OpenAI completion events for their
// frontend.
const { headers, statusCode } = proxyRes;
if (!req.isStreaming) {
const err = new Error(
"handleStreamedResponse called for non-streaming request."
);
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
throw new Error("handleStreamedResponse called for non-streaming request.");
}
const key = req.key!;
if (proxyRes.statusCode !== 200) {
// Ensure we use the non-streaming middleware stack since we won't be
// getting any events.
if (statusCode! > 201) {
req.isStreaming = false;
req.log.warn(
{ statusCode: proxyRes.statusCode, key: key.hash },
{ statusCode },
`Streaming request returned error status code. Falling back to non-streaming response handler.`
);
return decodeResponseBody(proxyRes, req, res);
return handleBlockingResponse(proxyRes, req, res);
}
return new Promise((resolve, reject) => {
req.log.info({ key: key.hash }, `Starting to proxy SSE stream.`);
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
// Queued streaming requests will already have a connection open and headers
// sent due to the heartbeat handler. In that case we can just start
// streaming the response without sending headers.
if (!res.headersSent) {
res.setHeader("Content-Type", "text/event-stream");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
res.setHeader("X-Accel-Buffering", "no");
copyHeaders(proxyRes, res);
res.flushHeaders();
}
// Typically, streaming will have already been initialized by the request
// queue to send heartbeat pings.
if (!res.headersSent) {
copySseResponseHeaders(proxyRes, res);
initializeSseStream(res);
}
const originalEvents: string[] = [];
let partialMessage = "";
let lastPosition = 0;
let eventCount = 0;
const prefersNativeEvents = req.inboundApi === req.outboundApi;
const streamOptions = {
contentType: headers["content-type"],
api: req.outboundApi,
logger: req.log,
};
type ProxyResHandler<T extends unknown> = (...args: T[]) => void;
function withErrorHandling<T extends unknown>(fn: ProxyResHandler<T>) {
return (...args: T[]) => {
try {
fn(...args);
} catch (error) {
proxyRes.emit("error", error);
}
};
}
// While the request is streaming, aggregator collects all events so that we
// can compile them into a single response object and publish that to the
// remaining middleware. Because we have an OpenAI transformer for every
// supported format, EventAggregator always consumes OpenAI events so that we
// only have to write one aggregator (OpenAI input) for each output format.
const aggregator = new EventAggregator(req);
proxyRes.on(
"data",
withErrorHandling((chunk: Buffer) => {
// We may receive multiple (or partial) SSE messages in a single chunk,
// so we need to buffer and emit seperate stream events for full
// messages so we can parse/transform them properly.
const str = chunk.toString();
// Anthropic uses CRLF line endings (out-of-spec btw)
const fullMessages = (partialMessage + str).split(/\r?\n\r?\n/);
partialMessage = fullMessages.pop() || "";
for (const message of fullMessages) {
proxyRes.emit("full-sse-event", message);
}
})
);
proxyRes.on(
"full-sse-event",
withErrorHandling((data) => {
originalEvents.push(data);
const { event, position } = transformEvent({
data,
requestApi: req.inboundApi,
responseApi: req.outboundApi,
lastPosition,
index: eventCount++,
});
lastPosition = position;
res.write(event + "\n\n");
})
);
proxyRes.on(
"end",
withErrorHandling(() => {
let finalBody = convertEventsToFinalResponse(originalEvents, req);
req.log.info({ key: key.hash }, `Finished proxying SSE stream.`);
res.end();
resolve(finalBody);
})
);
proxyRes.on("error", (err) => {
req.log.error({ error: err, key: key.hash }, `Mid-stream error.`);
const fakeErrorEvent = buildFakeSseMessage(
"mid-stream-error",
err.message,
req
);
res.write(`data: ${JSON.stringify(fakeErrorEvent)}\n\n`);
res.write("data: [DONE]\n\n");
res.end();
reject(err);
const decompressor = getStreamDecompressor(headers["content-encoding"]);
// Decoder reads from the response bytes to produce a stream of plaintext.
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// Adapter consumes the decoded text and produces server-sent events so we
// have a standard event format for the client and to translate between API
// message formats.
const adapter = new SSEStreamAdapter(streamOptions);
// Transformer converts server-sent events from one vendor's API message
// format to another.
const transformer = new SSEMessageTransformer({
inputFormat: req.outboundApi, // The format of the upstream service's events
outputFormat: req.inboundApi, // The format the client requested
inputApiVersion: String(req.headers["anthropic-version"]),
logger: req.log,
requestId: String(req.id),
requestedModel: req.body.model,
})
.on("originalMessage", (msg: string) => {
if (prefersNativeEvents) res.write(msg);
})
.on("data", (msg) => {
if (!prefersNativeEvents) res.write(`data: ${JSON.stringify(msg)}\n\n`);
aggregator.addEvent(msg);
});
});
};
type SSETransformationArgs = {
data: string;
requestApi: string;
responseApi: string;
lastPosition: number;
index: number;
};
/**
* Transforms SSE events from the given response API into events compatible with
* the API requested by the client.
*/
function transformEvent(params: SSETransformationArgs) {
const { data, requestApi, responseApi } = params;
if (requestApi === responseApi) {
return { position: -1, event: data };
}
const trans = `${requestApi}->${responseApi}`;
switch (trans) {
case "openai->openai-text":
return transformOpenAITextEventToOpenAIChat(params);
case "openai->anthropic":
// TODO: handle new anthropic streaming format
return transformV1AnthropicEventToOpenAI(params);
case "openai->google-palm":
return transformPalmEventToOpenAI(params);
default:
throw new Error(`Unsupported streaming API transformation. ${trans}`);
}
}
function transformOpenAITextEventToOpenAIChat(params: SSETransformationArgs) {
const { data, index } = params;
if (!data.startsWith("data:")) return { position: -1, event: data };
if (data.startsWith("data: [DONE]")) return { position: -1, event: data };
const event = JSON.parse(data.slice("data: ".length));
// The very first event must be a role assignment with no content.
const createEvent = () => ({
id: event.id,
object: "chat.completion.chunk",
created: event.created,
model: event.model,
choices: [
{
message: { role: "", content: "" } as {
role?: string;
content: string;
try {
await Promise.race([
handleAbortedStream(req, res),
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
]);
req.log.debug(`Finished proxying SSE stream.`);
res.end();
return aggregator.getFinalResponse();
} catch (err) {
if (err instanceof RetryableError) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
} else if (err instanceof BadRequestError) {
sendErrorToClient({
req,
res,
options: {
format: req.inboundApi,
title: "Proxy streaming error (Bad Request)",
message: `The API returned an error while streaming your request. Your prompt might not be formatted correctly.\n\n*${err.message}*`,
reqId: req.id,
model: req.body?.model,
},
index: 0,
finish_reason: null,
});
} else {
const { message, stack, lastEvent } = err;
const eventText = JSON.stringify(lastEvent, null, 2) ?? "undefined";
const errorEvent = buildSpoofedSSE({
format: req.inboundApi,
title: "Proxy stream error",
message: "An unexpected error occurred while streaming the response.",
obj: { message, stack, lastEvent: eventText },
reqId: req.id,
model: req.body?.model,
});
res.write(errorEvent);
res.write(`data: [DONE]\n\n`);
res.end();
}
// At this point the response is closed. If the request resulted in any
// tokens being consumed (suggesting a mid-stream error), we will resolve
// and continue the middleware chain so tokens can be counted.
if (aggregator.hasEvents()) {
return aggregator.getFinalResponse();
} else {
// If there is nothing, then this was a completely failed prompt that
// will not have billed any tokens. Throw to stop the middleware chain.
throw err;
}
}
};
function handleAbortedStream(req: express.Request, res: express.Response) {
return new Promise<void>((resolve) =>
res.on("close", () => {
if (!res.writableEnded) {
req.log.info("Client prematurely closed connection during stream.");
}
resolve();
})
);
}
function getDecoder(options: {
input: Readable;
api: APIFormat;
logger: typeof logger;
contentType?: string;
}) {
const { contentType, input, logger } = options;
if (contentType?.includes("application/vnd.amazon.eventstream")) {
return getAwsEventStreamDecoder({ input, logger });
} else if (contentType?.includes("application/json")) {
throw new Error("JSON streaming not supported, request SSE instead");
} else {
// Ensures split chunks across multi-byte characters are handled correctly.
const stringDecoder = new StringDecoder("utf8");
return new Transform({
readableObjectMode: true,
writableObjectMode: false,
transform(chunk, _encoding, callback) {
const text = stringDecoder.write(chunk);
if (text) this.push(text);
callback();
},
],
});
let buffer = "";
if (index === 0) {
const initialEvent = createEvent();
initialEvent.choices[0].message.role = "assistant";
buffer = `data: ${JSON.stringify(initialEvent)}\n\n`;
}
const newEvent = {
...event,
choices: [
{
...event.choices[0],
delta: { content: event.choices[0].text },
text: undefined,
},
],
};
buffer += `data: ${JSON.stringify(newEvent)}`;
return { position: -1, event: buffer };
}
function transformV1AnthropicEventToOpenAI(params: SSETransformationArgs) {
const { data, lastPosition } = params;
// Anthropic sends the full completion so far with each event whereas OpenAI
// only sends the delta. To make the SSE events compatible, we remove
// everything before `lastPosition` from the completion.
if (!data.startsWith("data:")) {
return { position: lastPosition, event: data };
}
if (data.startsWith("data: [DONE]")) {
return { position: lastPosition, event: data };
}
const event = JSON.parse(data.slice("data: ".length));
const newEvent = {
id: "ant-" + event.log_id,
object: "chat.completion.chunk",
created: Date.now(),
model: event.model,
choices: [
{
index: 0,
delta: { content: event.completion?.slice(lastPosition) },
finish_reason: event.stop_reason,
},
],
};
return {
position: event.completion.length,
event: `data: ${JSON.stringify(newEvent)}`,
};
}
function transformPalmEventToOpenAI({ data }: SSETransformationArgs) {
throw new Error("PaLM streaming not yet supported.");
return { position: -1, event: data };
}
/** Copy headers, excluding ones we're already setting for the SSE response. */
function copyHeaders(proxyRes: http.IncomingMessage, res: Response) {
const toOmit = [
"content-length",
"content-encoding",
"transfer-encoding",
"content-type",
"connection",
"cache-control",
];
for (const [key, value] of Object.entries(proxyRes.headers)) {
if (!toOmit.includes(key) && value) {
res.setHeader(key, value);
}
}
}
/**
* Converts the list of incremental SSE events into an object that resembles a
* full, non-streamed response from the API so that subsequent middleware can
* operate on it as if it were a normal response.
* Events are expected to be in the format they were received from the API.
*/
function convertEventsToFinalResponse(events: string[], req: Request) {
switch (req.outboundApi) {
case "openai": {
let merged: OpenAiChatCompletionResponse = {
id: "",
object: "",
created: 0,
model: "",
choices: [],
};
merged = events.reduce((acc, event, i) => {
if (!event.startsWith("data: ")) return acc;
if (event === "data: [DONE]") return acc;
const data = JSON.parse(event.slice("data: ".length));
// The first chat chunk only contains the role assignment and metadata
if (i === 0) {
return {
id: data.id,
object: data.object,
created: data.created,
model: data.model,
choices: [
{
message: { role: data.choices[0].delta.role, content: "" },
index: 0,
finish_reason: null,
},
],
};
}
if (data.choices[0].delta.content) {
acc.choices[0].message.content += data.choices[0].delta.content;
}
acc.choices[0].finish_reason = data.choices[0].finish_reason;
return acc;
}, merged);
return merged;
}
case "openai-text": {
let merged: OpenAiTextCompletionResponse = {
id: "",
object: "",
created: 0,
model: "",
choices: [],
// TODO: merge logprobs
};
merged = events.reduce((acc, event, i) => {
if (!event.startsWith("data: ")) return acc;
if (event === "data: [DONE]") return acc;
const data = JSON.parse(event.slice("data: ".length));
return {
id: data.id,
object: data.object,
created: data.created,
model: data.model,
choices: [
{
text: acc.choices[0]?.text + data.choices[0].text,
index: 0,
finish_reason: data.choices[0].finish_reason,
logprobs: null,
},
],
};
}, merged);
return merged;
}
case "anthropic": {
/*
* Full complete responses from Anthropic are conveniently just the same as
* the final SSE event before the "DONE" event, so we can reuse that
*/
const lastEvent = events[events.length - 2].toString();
const data = JSON.parse(
lastEvent.slice(lastEvent.indexOf("data: ") + "data: ".length)
);
const final: AnthropicCompletionResponse = { ...data, log_id: req.id };
return final;
}
case "google-palm": {
throw new Error("PaLM streaming not yet supported.");
}
default:
assertNever(req.outboundApi);
});
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,81 @@
import { createHash } from "crypto";
import { config } from "../../../config";
import { eventLogger } from "../../../shared/prompt-logging";
import { getModelFromBody, isTextGenerationRequest } from "../common";
import { ProxyResHandlerWithBody } from ".";
import {
OpenAIChatMessage,
AnthropicChatMessage,
} from "../../../shared/api-schemas";
/** If event logging is enabled, logs a chat completion event. */
export const logEvent: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
responseBody
) => {
if (!config.eventLogging) {
return;
}
if (typeof responseBody !== "object") {
throw new Error("Expected body to be an object");
}
if (!["openai", "anthropic-chat"].includes(req.outboundApi)) {
// only chat apis are supported
return;
}
if (!req.user) {
return;
}
const loggable = isTextGenerationRequest(req);
if (!loggable) return;
const messages = req.body.messages as
| OpenAIChatMessage[]
| AnthropicChatMessage[];
let hashes = [];
hashes.push(hashMessages(messages));
for (
let i = 1;
i <= Math.min(config.eventLoggingTrim!, messages.length);
i++
) {
hashes.push(hashMessages(messages.slice(0, -i)));
}
const model = getModelFromBody(req, responseBody);
const userToken = req.user!.token;
const family = req.modelFamily!;
eventLogger.logEvent({
ip: req.ip,
type: "chat_completion",
model,
family,
hashes,
userToken,
inputTokens: req.promptTokens ?? 0,
outputTokens: req.outputTokens ?? 0,
});
};
const hashMessages = (
messages: OpenAIChatMessage[] | AnthropicChatMessage[]
): string => {
let hasher = createHash("sha256");
let messageTexts = [];
for (const msg of messages) {
if (!["system", "user", "assistant"].includes(msg.role)) continue;
if (typeof msg.content === "string") {
messageTexts.push(msg.content);
} else if (Array.isArray(msg.content)) {
if (msg.content[0].type === "text") {
messageTexts.push(msg.content[0].text);
}
}
}
hasher.update(messageTexts.join("<|im_sep|>"));
return hasher.digest("hex");
};

Some files were not shown because too many files have changed in this diff Show More