234 Commits

Author SHA1 Message Date
nai-degen 84acc429d7 wip 2024-03-16 00:04:27 -05:00
nai-degen d9117bf08e fixes AWS debug log 2024-03-14 21:34:07 -05:00
nai-degen 57d9791270 fixes uncounted tokens when Response stream is prematurely closed 2024-03-14 21:32:20 -05:00
nai-degen 367ac3d075 adds ?debug=true query param to have proxy respond with transformed prompt 2024-03-14 08:16:38 -05:00
nai-degen 276a1a1d44 small fix for recurring AWS logging check 2024-03-13 20:53:21 -05:00
nai-degen 6cf029112e adds Anthropic's SOTA Haiku model; misc code cleanup 2024-03-13 20:48:05 -05:00
nai-degen 4b86802eb2 adds separate model detection for gpt-4-32k-0314 2024-03-10 19:16:11 -05:00
nai-degen 7f431de98e sets cache-control on static user images 2024-03-10 15:50:40 -05:00
nai-degen e0bf10626e removes .reverse() from image history to avoid thumbnails shifting as users browse 2024-03-10 15:12:20 -05:00
nai-degen eb55f30414 adds input prompt to imagehistory 2024-03-10 15:08:44 -05:00
nai-degen e1fb53b461 pretty-prints dall-e image metadata JSON download 2024-03-10 15:04:44 -05:00
nai-degen 7610369c6d adds dall-e full history page and metadata downloader 2024-03-10 14:53:11 -05:00
nai-degen 37f17ded60 removes OpenAI max_tokens default as that isn't aligned with the real API 2024-03-10 12:32:15 -05:00
nai-degen 96b6ea9568 adds azure-image endpoint to service info; hides unavailable endpoints 2024-03-09 13:25:50 -06:00
nai-degen cec39328a2 adds azure dall-e support 2024-03-09 13:03:50 -06:00
nai-degen cab346787c fixes regression in anthropic text > anthropic chat api translation 2024-03-08 21:16:25 -06:00
nai-degen fab404b232 refactors api transformers and adds oai->anthropic chat api translation 2024-03-08 20:59:19 -06:00
nai-degen 8d84f289b2 fixes issue with mistral-large model family not being detected 2024-03-08 17:07:25 -06:00
nai-degen 9ce10b4f6a shows more helpful errors when users' prefills are invalid during AWS streaming 2024-03-07 13:28:23 -06:00
nai-degen 96756d32f3 fixes handling of DALL-E content_policy_violation errors 2024-03-07 12:56:35 -06:00
nai-degen 1fb3eac154 maybe shows clearer AWS ValidationExceptions when users have bad prefills 2024-03-06 05:12:47 -06:00
nai-degen 8f46bd4397 handles 'this organization is disabled' error from anthropic 2024-03-06 00:42:10 -06:00
nai-degen ddf34685df adds Claude 3 Vision support 2024-03-05 18:34:10 -06:00
nai-degen ea3aae5da6 allows selecting compat model via endpoint name and makes errors less confusing 2024-03-05 05:13:22 -06:00
nai-degen 055d650c5d fixes legacy compat endpoint 2024-03-05 01:38:39 -06:00
nai-degen 2643dfea61 improves aws sonnet key detection and no keys available error messaging 2024-03-05 01:04:08 -06:00
nai-degen 434445797a fixes bad handleCompatibilityRequest middleware fallthrough 2024-03-04 23:53:13 -06:00
nai-degen 03c5c473e1 improves error handling for sillytavern 2024-03-04 22:59:32 -06:00
nai-degen 068e7a834f fixes AWS legacy models for non-streaming requests 2024-03-04 21:22:43 -06:00
nai-degen 736803ad92 enables opus by default 2024-03-04 21:11:32 -06:00
nai-degen 6b22d17c50 fixes claude-opus token usage being attributed to regular claude 2024-03-04 17:03:02 -06:00
nai-degen 51ffca480a adds AWS Claude Chat Completions and Claude 3 Sonnet support 2024-03-04 16:25:06 -06:00
nai-degen 802d847cc6 enables Claude opus by default 2024-03-04 16:21:40 -06:00
nai-degen 90ddcac55b makes claude3 compat model customizable via environment variable 2024-03-04 14:21:55 -06:00
nai-degen 36923686f6 shows claude-opus key count on service info page 2024-03-04 14:12:38 -06:00
nai-degen 1edc93dc72 adds claude-opus model family 2024-03-04 14:08:59 -06:00
nai-degen f6c124c1d3 fixes issue with preamble-required claude keys and anthropic chat 2024-03-04 14:00:25 -06:00
nai-degen 90a053d0e0 detects and removes over-quota claude keys from keypool 2024-03-04 13:42:29 -06:00
khanon db318ec237 Implement Anthropic Chat Completions endpoint and Claude 3 (khanon/oai-reverse-proxy!64) 2024-03-04 19:06:46 +00:00
nai-degen b90abbda88 spoofs response for SillyTavern test messages 2024-02-28 15:57:18 -06:00
nai-degen 93cee1db9b removes claude v1 from AWS keychecker as it has been retired 2024-02-27 15:52:09 -06:00
nai-degen bd15728743 uses explicitly set keyprovider rather than inferring via requested model 2024-02-27 10:56:50 -06:00
nai-degen 627559b729 updates mistral modelids 2024-02-26 23:55:03 -06:00
nai-degen 428e103323 allows customizing the /proxy endpoint prefix 2024-02-26 18:20:34 -06:00
nai-degen fd742fc0cb Merge remote-tracking branch 'origin/main' 2024-02-26 18:12:23 -06:00
nai-degen 5e19e2756a adds mistral-large model family, untested 2024-02-26 18:12:08 -06:00
devvnull d3f7c675e3 add pricing for Azure GPT counterparts and update Claude pricing (khanon/oai-reverse-proxy!65) 2024-02-20 03:53:26 +00:00
nai-degen 59bda40bbc handles google streaming json response format variation 2024-02-19 00:12:09 -06:00
nai-degen 68d829bceb adds Claude over-quota detection 2024-02-17 15:56:22 -06:00
nai-degen 9c03290a3d detects anthropic copyright prefill pozzing 2024-02-16 10:22:45 -06:00
nai-degen 3498584a1f removes forceModel on Google AI endpoint 2024-02-15 11:41:34 -06:00
nai-degen 21d61da62b increases max image payload size for gpt4v 2024-02-12 21:59:48 -06:00
nai-degen 35dc0f4826 fixes 'Premature close' caused by fucked up AWS unmarshaller errors 2024-02-10 14:47:14 -06:00
nai-degen a2ae9f32db handles OpenAI organization check failures due to missing API scopes 2024-02-09 10:10:22 -06:00
devvnull 0ce4582f3b Improve "\n\nHuman" prefix requirement detection for Anthropic (khanon/oai-reverse-proxy!63) 2024-02-08 16:28:11 +00:00
nai-degen bbee056114 fixes Force Key Recheck admin function for azure/aws 2024-02-07 19:54:40 -06:00
nai-degen ecc804887b uses EventStreamMarshaller from AWS SDK to hopefully handle split messages 2024-02-05 19:56:41 -06:00
nai-degen a8fd3c7240 fixes AWS Claude throttlingException handling 2024-02-04 20:48:20 -06:00
nai-degen 40240601f5 refactors SSEStreamAdapter to fix leaking decoder streams 2024-02-04 18:38:06 -06:00
nai-degen 98cea2da02 replaces eventstream lib to (hopefully) fix interrupted AWS streams 2024-02-04 17:18:28 -06:00
nai-degen c88f47d0ed fixes middleware order breaking /proxy endpoint 2024-02-04 16:21:44 -06:00
nai-degen 43106d9c7f tracks Risu userid rather than IP address on usertokens 2024-02-04 14:14:36 -06:00
nai-degen fe429a7610 adds SERVICE_INFO_PASSWORD to gate infopage behind a password 2024-02-04 14:04:46 -06:00
nai-degen 235510e588 fixes incorrect AWS Claude 2.1 max context limit 2024-02-01 20:40:15 -06:00
nai-degen 7eb6eb90ad moves api schema validators from transform-outbound-payload into shared 2024-01-29 19:38:22 -06:00
nai-degen 924db33f7e attempts to auto-convert Mistral prompts for its more strict rules 2024-01-28 17:42:23 -06:00
nai-degen 3f2f30e605 updates gpt4-v tokenizer for previous Risu change 2024-01-27 13:35:46 -06:00
nai-degen c9791acd85 makes gpt4-v input validation less strict to accomodate Risu 2024-01-27 13:24:11 -06:00
nai-degen e871b8ecf1 removes logprobs default value since it breaks gpt-4-vision 2024-01-27 12:19:24 -06:00
nai-degen 37ca98ad30 adds dark mode (infopage only currently) 2024-01-25 16:24:11 -06:00
nai-degen e6dc4475e6 fixes max context size for nu-gpt4-turbo 2024-01-25 14:07:42 -06:00
nai-degen 5e646b1c86 adds gpt-4-0125-preview and gpt-4-turbo-preview alias 2024-01-25 13:27:03 -06:00
nai-degen 6f626e623e fixes OAI trial keys bricking the dall-e queue 2024-01-25 01:47:51 -06:00
nai-degen 02a54bf4e3 fixes azure openai logprobs (actually tested this time) 2024-01-25 01:17:18 -06:00
nai-degen 79b2e5b6fd adds very basic support for OpenAI function calling 2024-01-24 16:42:26 -06:00
nai-degen 935a633325 fixes typo in Azure logprob adjustment 2024-01-24 16:03:47 -06:00
nai-degen 4a4b60ebcd handles Azure deviation from OpenAI spec on logprobs param 2024-01-24 16:01:19 -06:00
nai-degen ad465be363 fixes logprobs schema validation for turbo instruct endpoint 2024-01-24 14:31:10 -06:00
nai-degen c7a351baa8 adds support for requesting logprobs from OpenAI Chat Completions API 2024-01-24 11:46:09 -06:00
nai-degen ba8b052b17 adds bindAddress to omitted config keys 2024-01-18 04:14:15 -06:00
nai-degen e813cd9d22 default claude 2.1 instead of 1.3 in openai compat endpoint since 1.3 is not accessible on all keys 2024-01-18 04:14:15 -06:00
nai-degen 4c2a2c1e6c improves handle-streamed-response comments/docs [skip-ci] 2024-01-18 04:14:15 -06:00
nai-degen f1d927fa62 updates README with building/forking info [skip-ci] 2024-01-15 11:46:09 -06:00
nai-degen ad6e5224e3 allows binding to loopback interface via app config instead of only docker 2024-01-15 11:32:26 -06:00
nai-degen 85d89bdb9f fixes CI image tagging on main branch 2024-01-15 01:37:50 -06:00
khanon f5e7195cc9 Add Gitlab CI and self-hosting instructions (khanon/oai-reverse-proxy!61) 2024-01-15 06:51:12 +00:00
nai-degen 81f1e2bc37 fixes broken GET models endpoint for openai/mistral 2024-01-14 05:33:24 -06:00
nai-degen c2a686f229 Revert "reduces max request body size for now"
This reverts commit 4ffa7fb12b.
2024-01-13 18:12:16 -06:00
twinkletoes 96a0f94041 Fix Mistral safe_prompt schema property (khanon/oai-reverse-proxy!60) 2024-01-14 00:11:39 +00:00
nai-degen d56043616e adds keychecker workaround for OpenAI API bug falsely returning gpt4-32k 2024-01-12 10:33:48 -06:00
nai-degen e3e06b065d fixes sourcemap dependency in package.json 2024-01-09 00:32:34 -06:00
nai-degen 1bbb515200 updates static service info 2024-01-08 23:32:25 -06:00
nai-degen a57cc4e8d4 updates dotenv 2024-01-08 23:25:02 -06:00
nai-degen 2239bead2c updates README.md 2024-01-08 19:36:35 -06:00
nai-degen 1a585ddd32 adds TRUSTED_PROXIES to .env.example 2024-01-08 16:41:30 -06:00
nai-degen be731691a1 allows configurable trust proxy setting for Render deployments 2024-01-08 16:39:28 -06:00
nai-degen c2e442e030 long overdue removal of tired in-joke 2024-01-08 11:01:44 -06:00
nai-degen d3ac3b362b trusts only one proxy hop (AWS WAF in huggingface's case) 2024-01-07 19:18:01 -06:00
nai-degen 7b0892ddae fixes unawaited call to async enqueue 2024-01-07 16:23:53 -06:00
nai-degen 7f92565739 SSE queueing adjustments, untested 2024-01-07 16:19:22 -06:00
nai-degen 936d3c0721 corrects nodejs max heap memory config 2024-01-07 16:16:27 -06:00
nai-degen 4ffa7fb12b reduces max request body size for now 2024-01-07 13:03:24 -06:00
nai-degen 8dc7464381 strips extraneous properties on zod schemas 2024-01-07 13:00:48 -06:00
nai-degen d2cd24bfd2 suggest larger nodejs max heap 2024-01-07 12:58:50 -06:00
twinkletoes e33f778192 Change mistral-medium friendly name (khanon/oai-reverse-proxy!59) 2023-12-26 00:27:17 +00:00
twinkletoes 4a823b216f Mistral AI support (khanon/oai-reverse-proxy!58) 2023-12-25 18:33:16 +00:00
nai-degen 01e76cbb1c restores accidentally deleted line breaking infopage stats 2023-12-17 00:25:58 -06:00
nai-degen 655703e680 refactors infopage 2023-12-16 20:30:20 -06:00
nai-degen 3be2687793 tries to detect Azure GPT4-Turbo deployments more reliably 2023-12-15 12:14:23 -06:00
nai-degen 5599a83ae4 improves streaming error handling 2023-12-14 05:01:10 -06:00
nai-degen de34d41918 fixes gemini name prefixing when 'Add character names' is disabled in ST 2023-12-13 23:21:30 -06:00
nai-degen c5cd90dcef adjusts prompt transform to discourage Gemini from speaking for user 2023-12-13 23:03:57 -06:00
nai-degen 8a135a960d fixes gemini prompt reformatting for jbs; adds stop sequences 2023-12-13 21:45:53 -06:00
nai-degen 707cbbce16 fixes gemini throwing an error on JB prompts 2023-12-13 19:14:31 -06:00
khanon fad16cc268 Add Google AI API (khanon/oai-reverse-proxy!57) 2023-12-13 21:56:07 +00:00
nai-degen 0d3682197c treats 403 from anthropic as key dead 2023-12-11 09:13:53 -06:00
valadaptive e0624e30fd Fix some corner cases in SSE parsing (khanon/oai-reverse-proxy!56) 2023-12-09 06:18:01 +00:00
nai-degen 94d4efe9bb properly enforce allowedModelFamilies; refactor HPM proxyReq handlers 2023-12-05 22:07:56 -06:00
random-username-423 12276a1f59 Fix AWS Claude Model Reassigning (khanon/oai-reverse-proxy!55) 2023-12-06 03:21:27 +00:00
nai-degen fdd824f0e4 adds azure rate limit auto-retry 2023-12-04 01:24:33 -06:00
khanon fbdea30264 Azure OpenAI suport (khanon/oai-reverse-proxy!48) 2023-12-04 04:21:18 +00:00
nai-degen cd1b9d0e0c don't print google api key to container logs on error 2023-12-01 11:23:56 -06:00
nai-degen 9e61d9029f adds claude-2.1 (untested) 2023-11-21 11:32:43 -06:00
nai-degen f95e24afbb fixes incorrect max model size for gpt4-v 2023-11-19 02:23:41 -06:00
khanon f29049f993 Support for GPT-4-Vision (khanon/oai-reverse-proxy!54) 2023-11-19 05:06:21 +00:00
nai-degen 7f2f324e26 fixes render dockerfile and dalle3 model detection 2023-11-18 12:27:14 -06:00
nai-degen dc61291933 adds temporary keychecker var to treat dall-e-2 the same as dall-e-3 2023-11-17 20:24:36 -06:00
nai-degen 6c02e9b265 don't enqueue requests which fail stream check 2023-11-17 14:36:47 -06:00
nai-degen e018672968 re-adds keychecker info to STATIC_INFO_PAGE 2023-11-16 02:16:24 -06:00
nai-degen bfd7e23124 encodes queue payload 2023-11-16 01:19:01 -06:00
khanon 6aa6bebf08 Scale SSE heartbeat size with traffic (khanon/oai-reverse-proxy!53) 2023-11-16 05:45:35 +00:00
nai-degen 6acdf35914 removes length from stalled request error message 2023-11-15 17:18:51 -06:00
nai-degen 3de79873e9 adds STATIC_SERVICE_INFO config 2023-11-15 17:12:07 -06:00
nai-degen 3aca9e90f0 fixes rate limiter always using IMAGE_MODEL_RATE_LIMIT 2023-11-15 13:07:58 -06:00
nai-degen 5fabe1d1f8 uses exponential moving average for wait time calculation 2023-11-14 01:36:11 -06:00
nai-degen 4a68c14477 further increases OpenAI rate limit backoff 2023-11-14 01:28:28 -06:00
khanon 20c064394a OpenAI DALL-E Image Generation (khanon/oai-reverse-proxy!52) 2023-11-14 05:41:19 +00:00
nai-degen 3ea23760c3 adjusts prompt logging to truncate huge prompts from the end 2023-11-11 20:14:32 -06:00
nai-degen 5db07404f2 fixes infopage crash when check_keys is disabled 2023-11-10 22:41:57 -06:00
nai-degen c453a5f2ad logs usertoken lookup attempts 2023-11-10 22:41:36 -06:00
nai-degen c7a095d345 removes debug log 2023-11-09 16:25:57 -06:00
nai-degen e9110611fa adds REJECT_PHRASES configuration setting 2023-11-09 16:24:49 -06:00
nai-degen 79e1fe09e4 fixes multiple enumeration on infopage 2023-11-08 12:02:23 -06:00
dllt98 08b2196bfb Update .env.example to include MAX_CONTEXT_TOKENS_OPENAI (khanon/oai-reverse-proxy!50) 2023-11-08 02:50:19 +00:00
nai-degen 350d6542cf fixes stats for non-openai models 2023-11-06 22:41:48 -06:00
nai-degen c9c24f86bb improvements to infopage key categorization 2023-11-06 22:13:34 -06:00
nai-degen b6f8f15a1f tries to prevent per-day rate limited keys from bricking the queue 2023-11-06 21:16:36 -06:00
nai-degen 5467136c1a adds gpt4-turbo to userschema; updates docs 2023-11-06 16:35:35 -06:00
nai-degen 0d5dfeccf8 adds gpt4-turbo model family and support for gpt-4-1106-preview model 2023-11-06 15:29:43 -06:00
nai-degen b615ffa433 fixes issue with local development cookies 2023-11-06 10:28:27 -06:00
nai-degen a27163a629 adds option to not disable keys when reaching IP limit 2023-11-06 10:15:57 -06:00
nai-degen 5a8fb3aff6 adds USE_INSECURE_COOKIES for hosts without SSL support 2023-11-03 15:25:06 -05:00
nai-degen 51dd0c71ba removes unused import in openai proxy 2023-10-24 13:17:46 -05:00
nai-degen 89e1ed46d5 re-signs AWS requests on every attempt to fix fucked up queueing 2023-10-24 13:10:50 -05:00
nai-degen 26dc79c8f1 fixes broken AWS rate limit backoff 2023-10-24 09:19:46 -05:00
nai-degen 89e9b67f3f fixes AWS mid-stream rate limits not actually marking key as rate-limited 2023-10-23 22:47:29 -05:00
nai-degen 52ec2ec265 fixes blank AWS responses due to reqs sometimes using wrong handler 2023-10-23 22:23:06 -05:00
nai-degen 8bd2f749c1 reduces logging severity of prompt validation errors 2023-10-23 20:30:27 -05:00
khanon ff27ca3780 Update info-page.ts 2023-10-20 00:33:57 +00:00
nai-degen 41a463d2c8 possibly fix issue with AWS keychecker due to amazon API change 2023-10-16 12:17:02 -05:00
nai-degen 3f7e50f87e follow-up 'fixes empty AWS streaming responses when under heavy load' 2023-10-15 00:06:38 -05:00
nai-degen f6cfc6e882 fixes empty AWS streaming responses when under heavy load 2023-10-15 00:05:36 -05:00
nai-degen af4d8dae40 changes default AMZ_HOST to bedrock-runtime.region.amazonaws.com 2023-10-12 15:39:06 -05:00
nai-degen 725fd6e6f1 deprioritizes queued Agnai.chat requests and limits concurrency to five across all shared IPs 2023-10-09 12:36:54 -05:00
nai-degen c87484f1ff adds AWS console screenshot to docs 2023-10-07 21:33:53 -05:00
nai-degen 15a2cb5a26 another docs correction 2023-10-07 21:10:18 -05:00
nai-degen c8182cea17 docs correction 2023-10-07 21:08:40 -05:00
nai-degen b06d48e1f8 adds better AWS docs 2023-10-07 20:58:04 -05:00
khanon 140bdea14e Implement AWS KeyChecker and auto-disable AWS logged keys (khanon/oai-reverse-proxy!47) 2023-10-08 01:17:09 +00:00
nai-degen 12f78fa1f2 exempts 'special' role from rate limiting 2023-10-06 20:29:28 -05:00
nai-degen daf6a123d5 adjusts Agnai.chat and RisuAI rate limiting 2023-10-04 09:39:59 -05:00
nai-degen 4e05b01e90 improves AWS .env.example and config.ts docs 2023-10-03 20:29:49 -05:00
nai-degen 5033d00444 improves clarity of errors sent back to streaming clients 2023-10-03 19:45:15 -05:00
nai-degen ba0b20617e ensures AWS always uses anthropic-version 2023-06-01 parser 2023-10-03 19:43:30 -05:00
nai-degen 4a5fd91da3 address npm audit; adds zod-error package 2023-10-03 19:05:46 -05:00
khanon ecf897e685 Refactor handleStreamingResponse to make it less shit (khanon/oai-reverse-proxy!46) 2023-10-03 06:14:19 +00:00
nai-degen 6a3d753f0d fixes anthropic keychecker for some keys 2023-10-02 20:32:07 -05:00
khanon 0bf2f5c123 fixes typo in .env.example 2023-10-02 20:39:30 +00:00
nai-degen ede274c117 disables AWS key on AccessDeniedException 2023-10-02 11:18:08 -05:00
nai-degen d2267beb18 adds aws-claude token cost 2023-10-02 09:43:26 -05:00
nai-degen 0837c89a42 fixes incorrect context size limit for aws claude v1 2023-10-02 03:53:04 -05:00
nai-degen f67560a17b refactors proxy routing 2023-10-01 12:12:28 -05:00
nai-degen e13361a323 removes dead koboldai code 2023-10-01 11:27:11 -05:00
khanon fa4bf468d2 Implement AWS Bedrock support (khanon/oai-reverse-proxy!45) 2023-10-01 01:40:18 +00:00
nai-degen 7e681a7bef strips OAI request parameters when translating to Claude format 2023-09-29 03:01:39 -05:00
nai-degen 1b0106a1ea strips reverse proxy originating IP headers 2023-09-29 03:00:55 -05:00
nai-degen f5521aa6c3 prevents selecting trial keys for embeddings requests due to rate limits 2023-09-26 01:26:07 -05:00
nai-degen f8b480f4c2 adds support for proxying text-embedding-ada-002 requests 2023-09-26 00:58:38 -05:00
khanon 1f35fe1ae1 updates huggingface docs to clarify gatekeeper 2023-09-24 11:00:25 +00:00
khanon 35b44e1c6b fixes issue with OpenAIV1ChatCompletionSchema and PaLM compat 2023-09-24 10:48:56 +00:00
nai-degen 075e415343 makes incoming model name validation less strict for PaLM endpoint 2023-09-20 23:55:53 -05:00
nai-degen ec4f7e845b triggers automatic OAI key recheck three times a day 2023-09-19 21:43:16 -05:00
nai-degen 8923bb76a0 adds turbo-instruct endpoint to info page 2023-09-19 21:10:29 -05:00
khanon 35a6c393ed Add support for Google PaLM and OpenAI Turbo Instruct (khanon/oai-reverse-proxy!44) 2023-09-19 23:13:08 +00:00
nai-degen ef554f8e06 fixes user edit modal for null values 2023-09-18 23:42:08 -05:00
nai-degen 624973fc82 adds admin note 2023-09-18 23:35:29 -05:00
nai-degen c6453638e9 makes max IP limit configurable per-user 2023-09-18 23:16:06 -05:00
nai-degen 40e71435f0 partially redacts IP address on token lookup page 2023-09-17 17:53:29 -05:00
nai-degen 5e57dbb8f1 attempts to improve compatibility with BetterGPT frontend 2023-09-16 11:04:40 -05:00
khanon 201f71a989 corrects typo in anthropic key liveness test payload 2023-09-15 16:50:35 +00:00
nai-degen 66f1d809ec minor html cleanup 2023-09-10 13:31:21 -05:00
nai-degen 437fe1e720 improves rentry leaderboard function 2023-09-10 13:24:39 -05:00
nai-degen 404ce4fc80 adds ranking to markdown stats 2023-09-09 19:31:38 -05:00
nai-degen 95d2369acc adds option to anonymize rentry stats 2023-09-09 18:35:59 -05:00
khanon 2a453ab657 Add temporary user tokens (khanon/oai-reverse-proxy!42) 2023-09-09 22:21:38 +00:00
nai-degen 5728e235dc prioritizes unpozzed keys in key selection when possible 2023-09-09 13:10:33 -05:00
nai-degen 7b3d6efb02 reverts anthropic-version change as it breaks some frontends 2023-09-07 22:01:19 -05:00
nai-degen 63542bfabb adds anthropic-version header in all cases 2023-09-07 20:23:34 -05:00
nai-degen a558920ccf fixes tookens counter on infopage 2023-09-02 18:49:14 -05:00
nai-degen 6afb62fef6 opens user lookup in new tab for cookie samesite restrictions 2023-09-02 15:23:30 -05:00
nai-degen 0e325e89e0 adjusts user self-service link presentation 2023-09-02 15:09:11 -05:00
khanon f05e196994 Refactor project structure and add user self-serve UI (khanon/oai-reverse-proxy!41) 2023-09-02 19:36:44 +00:00
nai-degen 435b46ad4d adds anthropic key checker and pozzed key detection 2023-09-01 10:38:12 -05:00
nai-degen 980abcc01f fixes tsc build 2023-08-31 13:50:16 -05:00
nai-degen fe0f04ceb8 improves display of large token numbers 2023-08-31 13:23:36 -05:00
nai-degen 4b32130eaa adds maintenance function to clear all users' token records 2023-08-30 22:38:33 -05:00
nai-degen ffc0c6472e fixes claude tokens not correctly being accumulated 2023-08-30 20:48:45 -05:00
nai-degen 2c0a659b2d adds token consumption stats to infopage 2023-08-30 20:40:40 -05:00
nai-degen bed275a195 partial refactor/optimization of infopage 2023-08-30 19:25:17 -05:00
nai-degen 7cab0a5c52 fixes tsc issue breaking build 2023-08-30 14:31:47 -05:00
nai-degen 27a1181752 adds optional token quota limits for gpt4-32k 2023-08-30 13:57:10 -05:00
nai-degen 85aeeb2c05 adds unique openaiOrgs to infopage 2023-08-30 12:42:28 -05:00
nai-degen 8d557c844e adds a bunch more logging to keychecker 2023-08-30 12:30:41 -05:00
nai-degen 0a52ec478f maybe fixes keychecker bricking on disabled org keys 2023-08-30 12:30:16 -05:00
nai-degen e462ad585e improves keychecker stability with rate-limited trial keys 2023-08-30 08:33:00 -05:00
khanon 4d781e1720 Add GPT-4-32k support (khanon/oai-reverse-proxy!39) 2023-08-29 22:56:54 +00:00
nai-degen 3c56103de0 adds optional user_token nicknames 2023-08-29 14:20:28 -05:00
nai-degen bb78a399eb fixes organization key issue (via Drago/oai-reverse-proxy@714292ed) 2023-08-29 04:13:12 -05:00
nai-degen 09416c0b90 automatically rechecks keys on the 1st of every month 2023-08-29 04:08:50 -05:00
nai-degen abb30d3608 admin ui improvements; adds Force Recheck feature 2023-08-29 04:08:45 -05:00
khanon 6833736392 Clone keys assigned to multiple organizations (khanon/oai-reverse-proxy!38) 2023-08-28 21:11:49 +00:00
nai-degen 7c9c3a640c minor cleanup for user quota docs/examples 2023-08-28 14:51:28 -05:00
khanon cb780e85da Per-user token quotas and automatic quota refreshing (khanon/oai-reverse-proxy!37) 2023-08-28 19:33:14 +00:00
nai-degen 785b1f69f3 implements new local risu validation (via @kwaroran) 2023-08-28 05:28:58 -05:00
210 changed files with 16385 additions and 4150 deletions
+128 -38
View File
@@ -1,56 +1,146 @@
# Copy this file to .env and fill in the values you wish to change. Most already # To customize your server, make a copy of this file to `.env` and edit any
# have sensible defaults. See config.ts for more details. # values you want to change. Be sure to remove the `#` at the beginning of each
# line you want to modify.
# PORT=7860 # All values have reasonable defaults, so you only need to change the ones you
# SERVER_TITLE=Coom Tunnel # want to override.
# MODEL_RATE_LIMIT=4
# MAX_OUTPUT_TOKENS_OPENAI=300
# MAX_OUTPUT_TOKENS_ANTHROPIC=900
# LOG_LEVEL=info
# REJECT_DISALLOWED=false
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# CHECK_KEYS=true
# TURBO_ONLY=false
# BLOCKED_ORIGINS=reddit.com,9gag.com
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# BLOCK_REDIRECT="https://roblox.com/"
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled # Use production mode unless you are developing locally.
# by default in production mode. NODE_ENV=production
# Optional settings for user management and access control. See
# `docs/user-management.md` to learn how to use these.
# GATEKEEPER=none
# GATEKEEPER_STORE=memory
# MAX_IPS_PER_USER=20
# Optional settings for prompt logging. See docs/logging-sheets.md.
# PROMPT_LOGGING=false
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# The values below are secret -- make sure they are set securely. Do NOT set # General settings:
# them in the .env file of a public repository.
# For Huggingface, set them via the Secrets section in your Space's config UI. # The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
# IMAGE_MODEL_RATE_LIMIT=2
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=16384
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-dall-e
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
# Surround phrases with quotes if they contain commas.
# Avoid short or common phrases as this tests the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# The port and network interface to listen on.
# PORT=7860
# BIND_ADDRESS=0.0.0.0
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
# USE_INSECURE_COOKIES=false
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
# See `docs/user-quotas.md` to learn how to set up quotas.
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# GATEKEEPER_STORE=memory
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
# Whether user_tokens should be automatically disabled when reaching the IP limit.
# MAX_IPS_AUTO_BAN=true
# With user_token gatekeeper, whether to allow users to change their nickname.
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
# which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_DALL_E=0
# TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# ------------------------------------------------------------------------------
# Secrets and keys:
# For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Render, create a "secret file" called .env using the Environment tab. # For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple keys by separating them with a comma. # You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
# You can require a Bearer token for requests when using proxy_token gatekeeper. # With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key # PROXY_KEY=your-secret-key
# You can set an admin key for user management when using user_token gatekeeper. # With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key # ADMIN_KEY=your-very-secret-key
# These are used to push data to a Huggingface Dataset repository. # With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# HF_DATASET_REPO_URL=https://huggingface.co/datasets/your-username/your-dataset-name
# HF_PRIVATE_SSH_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# These are used to persist user data to Firebase across restarts.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com # FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
# These are used to log prompts to Google Sheets. # With prompt logging, the Google Sheets credentials.
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+6 -1
View File
@@ -1,6 +1,11 @@
.env .aider*
.env*
!.env.vault
.venv .venv
.vscode .vscode
.idea
build build
greeting.md greeting.md
node_modules node_modules
http-client.private.env.json
+4
View File
@@ -0,0 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npm run type-check
+14
View File
@@ -0,0 +1,14 @@
{
"overrides": [
{
"files": [
"*.ejs"
],
"options": {
"printWidth": 160,
"bracketSameLine": true
}
}
],
"trailingComma": "es5"
}
+43 -15
View File
@@ -1,34 +1,53 @@
# OAI Reverse Proxy # OAI Reverse Proxy
Reverse proxy server for the OpenAI and Anthropic APIs. Forwards text generation requests while rejecting administrative/billing requests. Includes optional rate limiting and prompt filtering to prevent abuse. Reverse proxy server for various LLM APIs.
### Table of Contents ### Table of Contents
- [What is this?](#what-is-this) - [What is this?](#what-is-this)
- [Why?](#why) - [Features](#features)
- [Usage Instructions](#setup-instructions) - [Usage Instructions](#usage-instructions)
- [Deploy to Huggingface (Recommended)](#deploy-to-huggingface-recommended) - [Self-hosting](#self-hosting)
- [Deploy to Repl.it (WIP)](#deploy-to-replit-wip) - [Alternatives](#alternatives)
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
- [Render (outdated, not advised)](#render-outdated-not-advised)
- [Local Development](#local-development) - [Local Development](#local-development)
## What is this? ## What is this?
If you would like to provide a friend access to an API via keys you own, you can use this to keep your keys safe while still allowing them to generate text with the API. You can also use this if you'd like to build a client-side application which uses the OpenAI or Anthropic APIs, but don't want to build your own backend. You should never embed your real API keys in a client-side application. Instead, you can have your frontend connect to this reverse proxy and forward requests to the downstream service. This project allows you to run a reverse proxy server for various LLM APIs.
This keeps your keys safe and allows you to use the rate limiting and prompt filtering features of the proxy to prevent abuse. ## Features
- [x] Support for multiple APIs
## Why? - [x] [OpenAI](https://openai.com/)
OpenAI keys have full account permissions. They can revoke themselves, generate new keys, modify spend quotas, etc. **You absolutely should not share them, post them publicly, nor embed them in client-side applications as they can be easily stolen.** - [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
This proxy only forwards text generation requests to the downstream service and rejects requests which would otherwise modify your account. - [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
- [x] Multiple API keys with rotation and rate limit handling
- [x] Basic user management
- [x] Simple role-based permissions
- [x] Per-model token quotas
- [x] Temporary user accounts
- [x] Prompt and completion logging
- [x] Abuse detection and prevention
--- ---
## Usage Instructions ## Usage Instructions
If you'd like to run your own instance of this proxy, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like. If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
### Deploy to Huggingface (Recommended) ### Self-hosting
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
### Alternatives
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
### Huggingface (outdated, not advised)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md) [See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
### Deploy to Render ### Render (outdated, not advised)
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md) [See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
## Local Development ## Local Development
@@ -40,3 +59,12 @@ To run the proxy locally for development or testing, install Node.js >= 18.0.0 a
4. Start the server in development mode with `npm run start:dev`. 4. Start the server in development mode with `npm run start:dev`.
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server. You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
## Building
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
## Forking
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
+2
View File
@@ -0,0 +1,2 @@
*
!.gitkeep
View File
+21
View File
@@ -0,0 +1,21 @@
stages:
- build
build_image:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- |
if [ "$CI_COMMIT_REF_NAME" = "main" ]; then
TAG="latest"
else
TAG=$CI_COMMIT_REF_NAME
fi
- echo "Building image with tag $TAG"
- BASE64_AUTH=$(echo -n "$DOCKER_HUB_USERNAME:$DOCKER_HUB_ACCESS_TOKEN" | base64)
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"auth\":\"$BASE64_AUTH\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/docker/ci/Dockerfile --destination docker.io/khanonci/oai-reverse-proxy:$TAG --build-arg CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME --build-arg CI_COMMIT_SHA=$CI_COMMIT_SHA --build-arg CI_PROJECT_PATH=$CI_PROJECT_PATH
only:
- main
+22
View File
@@ -0,0 +1,22 @@
FROM node:18-bullseye-slim
WORKDIR /app
COPY . .
RUN npm ci
RUN npm run build
RUN npm prune --production
EXPOSE 7860
ENV PORT=7860
ENV NODE_ENV=production
ARG CI_COMMIT_REF_NAME
ARG CI_COMMIT_SHA
ARG CI_PROJECT_PATH
ENV GITGUD_BRANCH=$CI_COMMIT_REF_NAME
ENV GITGUD_COMMIT=$CI_COMMIT_SHA
ENV GITGUD_PROJECT=$CI_PROJECT_PATH
CMD [ "npm", "start" ]
+17
View File
@@ -0,0 +1,17 @@
# Before running this, create a .env and greeting.md file.
# Refer to .env.example for the required environment variables.
# User-generated content is stored in the data directory.
# When self-hosting, it's recommended to run this behind a reverse proxy like
# nginx or Caddy to handle SSL/TLS and rate limiting. Refer to
# docs/self-hosting.md for more information and an example nginx config.
version: '3.8'
services:
oai-reverse-proxy:
image: khanonci/oai-reverse-proxy:latest
ports:
- "127.0.0.1:7860:7860"
env_file:
- ./.env
volumes:
- ./greeting.md:/app/greeting.md
- ./data:/app/data
+4
View File
@@ -3,9 +3,13 @@ RUN apt-get update && \
apt-get install -y git apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install RUN npm install
COPY Dockerfile greeting.md* .env* ./ COPY Dockerfile greeting.md* .env* ./
RUN npm run build RUN npm run build
EXPOSE 7860 EXPOSE 7860
ENV NODE_ENV=production ENV NODE_ENV=production
# Huggigface free VMs have 16GB of RAM so we can be greedy
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ] CMD [ "npm", "start" ]
Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Before

Width:  |  Height:  |  Size: 153 KiB

After

Width:  |  Height:  |  Size: 153 KiB

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 36 KiB

@@ -1,4 +1,4 @@
# Shat out by GPT-4, I did not check for correctness beyond a cursory glance
openapi: 3.0.0 openapi: 3.0.0
info: info:
version: 1.0.0 version: 1.0.0
@@ -26,6 +26,26 @@ paths:
post: post:
summary: Create a new user summary: Create a new user
operationId: createUser operationId: createUser
requestBody:
content:
application/json:
schema:
oneOf:
- type: object
properties:
type:
type: string
enum: ["normal", "special"]
- type: object
properties:
type:
type: string
enum: ["temporary"]
expiresAt:
type: integer
format: int64
tokenLimits:
$ref: "#/components/schemas/TokenCount"
responses: responses:
"200": "200":
description: The created user's token description: The created user's token
@@ -173,6 +193,21 @@ paths:
type: string type: string
components: components:
schemas: schemas:
TokenCount:
type: object
properties:
turbo:
type: integer
format: int32
gpt4:
type: integer
format: int32
"gpt4-32k":
type: integer
format: int32
claude:
type: integer
format: int32
User: User:
type: object type: object
properties: properties:
@@ -182,15 +217,18 @@ components:
type: array type: array
items: items:
type: string type: string
nickname:
type: string
type: type:
type: string type: string
enum: ["normal", "special"] enum: ["normal", "special"]
promptCount: promptCount:
type: integer type: integer
format: int32 format: int32
tokenCount: tokenLimits:
type: integer $ref: "#/components/schemas/TokenCount"
format: int32 tokenCounts:
$ref: "#/components/schemas/TokenCount"
createdAt: createdAt:
type: integer type: integer
format: int64 format: int64
@@ -202,3 +240,6 @@ components:
format: int64 format: int64
disabledReason: disabledReason:
type: string type: string
expiresAt:
type: integer
format: int64
+58
View File
@@ -0,0 +1,58 @@
# Configuring the proxy for AWS Bedrock
The proxy supports AWS Bedrock models via the `/proxy/aws/claude` endpoint. There are a few extra steps necessary to use AWS Bedrock compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Attaching policies](#attaching-policies)
- [Provisioning models](#provisioning-models)
- [Note regarding logging](#note-regarding-logging)
## Setting keys
Use the `AWS_CREDENTIALS` environment variable to set the AWS API keys.
Like other APIs, you can provide multiple keys separated by commas. Each AWS key, however, is a set of credentials including the access key, secret key, and region. These are separated by a colon (`:`).
For example:
```
AWS_CREDENTIALS=AKIA000000000000000:somesecretkey:us-east-1,AKIA111111111111111:anothersecretkey:us-west-2
```
## Attaching policies
Unless your credentials belong to the root account, the principal will need to be granted the following permissions:
- `bedrock:InvokeModel`
- `bedrock:InvokeModelWithResponseStream`
- `bedrock:GetModelInvocationLoggingConfiguration`
- The proxy needs this to determine whether prompt/response logging is enabled. By default, the proxy won't use credentials unless it can conclusively determine that logging is disabled, for privacy reasons.
Use the IAM console or the AWS CLI to attach these policies to the principal associated with the credentials.
## Provisioning models
AWS does not automatically provide accounts with access to every model. You will need to provision the models you want to use, in the regions you want to use them in. You can do this from the AWS console.
⚠️ **Models are region-specific.** Currently AWS only offers Claude in a small number of regions. Switch to the AWS region you want to use, then go to the models page and request access to **Anthropic / Claude**.
![](./assets/aws-request-model-access.png)
Access is generally granted more or less instantly. Once your account has access, you can enable the model by checking the box next to it.
You can also request Claude Instant, but support for this isn't fully implemented yet.
### Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `anthropic.claude-v1` (~18k context, claude 1.3 -- EOL 2024-02-28)
- `anthropic.claude-v2` (~100k context, claude 2.0)
- `anthropic.claude-v2:1` (~200k context, claude 2.1)
- **Claude Instant**
- `anthropic.claude-instant-v1` (~100k context, claude instant 1.2)
## Note regarding logging
By default, the proxy will refuse to use keys if it finds that logging is enabled, or if it doesn't have permission to check logging status.
If you can't attach the `bedrock:GetModelInvocationLoggingConfiguration` policy to the principal, you can set the `ALLOW_AWS_LOGGING` environment variable to `true` to force the proxy to use the keys anyway. A warning will appear on the info page when this is enabled.
+30
View File
@@ -0,0 +1,30 @@
# Configuring the proxy for Azure
The proxy supports Azure OpenAI Service via the `/proxy/azure/openai` endpoint. The process of setting it up is slightly different from regular OpenAI.
- [Setting keys](#setting-keys)
- [Model assignment](#model-assignment)
## Setting keys
Use the `AZURE_CREDENTIALS` environment variable to set the Azure API keys.
Like other APIs, you can provide multiple keys separated by commas. Each Azure key, however, is a set of values including the Resource Name, Deployment ID, and API key. These are separated by a colon (`:`).
For example:
```
AZURE_CREDENTIALS=contoso-ml:gpt4-8k:0123456789abcdef0123456789abcdef,northwind-corp:testdeployment:0123456789abcdef0123456789abcdef
```
## Model assignment
Note that each Azure deployment is assigned a model when you create it in the Azure OpenAI Service portal. If you want to use a different model, you'll need to create a new deployment, and therefore a new key to be added to the AZURE_CREDENTIALS environment variable. Each credential only grants access to one model.
### Supported model IDs
Users can send normal OpenAI model IDs to the proxy to invoke the corresponding models. For the most part they work the same with Azure. GPT-3.5 Turbo has an ID of "gpt-35-turbo" because Azure doesn't allow periods in model names, but the proxy should automatically convert this to the correct ID.
As noted above, you can only use model IDs for which a deployment has been created and added to the proxy.
## On content filtering
Be aware that all Azure OpenAI Service deployments have content filtering enabled by default at a Medium level. Prompts or responses which are deemed to be inappropriate will be rejected by the API. This is a feature of the Azure OpenAI Service and not the proxy.
You can disable this from deployment's settings within Azure, but you would need to request an exemption from Microsoft for your organization first. See [this page](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/content-filters) for more information.
+71
View File
@@ -0,0 +1,71 @@
# Configuring the proxy for DALL-E
The proxy supports DALL-E 2 and DALL-E 3 image generation via the `/proxy/openai-images` endpoint. By default it is disabled as it is somewhat expensive and potentially more open to abuse than text generation.
- [Updating your Dockerfile](#updating-your-dockerfile)
- [Enabling DALL-E](#enabling-dall-e)
- [Setting quotas](#setting-quotas)
- [Rate limiting](#rate-limiting)
## Updating your Dockerfile
If you are using a previous version of the Dockerfile supplied with the proxy, it doesn't have the necessary permissions to let the proxy save temporary files.
You can replace the entire thing with the new Dockerfile at [./docker/huggingface/Dockerfile](../docker/huggingface/Dockerfile) (or the equivalent for Render deployments).
You can also modify your existing Dockerfile; just add the following lines after the `WORKDIR` line:
```Dockerfile
# Existing
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
# Take ownership of the app directory and switch to the non-root user
RUN chown -R 1000:1000 /app
USER 1000
# Existing
RUN npm install
```
## Enabling DALL-E
Add `dall-e` to the `ALLOWED_MODEL_FAMILIES` environment variable to enable DALL-E. For example:
```
# GPT3.5 Turbo, GPT-4, GPT-4 Turbo, and DALL-E
ALLOWED_MODEL_FAMILIES=turbo,gpt-4,gpt-4turbo,dall-e
# All models as of this writing
ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,gemini-pro,aws-claude,dall-e
```
Refer to [.env.example](../.env.example) for a full list of supported model families. You can add `dall-e` to that list to enable all models.
## Setting quotas
DALL-E doesn't bill by token like text generation models. Instead there is a fixed cost per image generated, depending on the model, image size, and selected quality.
The proxy still uses tokens to set quotas for users. The cost for each generated image will be converted to "tokens" at a rate of 100000 tokens per US$1.00. This works out to a similar cost-per-token as GPT-4 Turbo, so you can use similar token quotas for both.
Use `TOKEN_QUOTA_DALL_E` to set the default quota for image generation. Otherwise it works the same as token quotas for other models.
```
# ~50 standard DALL-E images per refresh period, or US$2.00
TOKEN_QUOTA_DALL_E=200000
```
Refer to [https://openai.com/pricing](https://openai.com/pricing) for the latest pricing information. As of this writing, the cheapest DALL-E 3 image costs $0.04 per generation, which works out to 4000 tokens. Higher resolution and quality settings can cost up to $0.12 per image, or 12000 tokens.
## Rate limiting
The old `MODEL_RATE_LIMIT` setting has been split into `TEXT_MODEL_RATE_LIMIT` and `IMAGE_MODEL_RATE_LIMIT`. Whatever value you previously set for `MODEL_RATE_LIMIT` will be used for text models.
If you don't specify a `IMAGE_MODEL_RATE_LIMIT`, it defaults to half of the `TEXT_MODEL_RATE_LIMIT`, to a minimum of 1 image per minute.
```
# 4 text generations per minute, 2 images per minute
TEXT_MODEL_RATE_LIMIT=4
IMAGE_MODEL_RATE_LIMIT=2
```
If a prompt is filtered by OpenAI's content filter, it won't count towards the rate limit.
## Hiding recent images
By default, the proxy shows the last 12 recently generated images by users. You can hide this section by setting `SHOW_RECENT_IMAGES` to `false`.
+15 -6
View File
@@ -1,5 +1,7 @@
# Deploy to Huggingface Space # Deploy to Huggingface Space
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend. This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend.
### 1. Get an API key ### 1. Get an API key
@@ -12,12 +14,12 @@ This repository can be deployed to a [Huggingface Space](https://huggingface.co/
- Provide a name for your Space and select "Docker" as the SDK. Select "Blank" for the template. - Provide a name for your Space and select "Docker" as the SDK. Select "Blank" for the template.
- Click "Create Space" and wait for the Space to be created. - Click "Create Space" and wait for the Space to be created.
![Create Space](huggingface-createspace.png) ![Create Space](assets/huggingface-createspace.png)
### 3. Create an empty Dockerfile ### 3. Create an empty Dockerfile
- Once your Space is created, you'll see an option to "Create the Dockerfile in your browser". Click that link. - Once your Space is created, you'll see an option to "Create the Dockerfile in your browser". Click that link.
![Create Dockerfile](huggingface-dockerfile.png) ![Create Dockerfile](assets/huggingface-dockerfile.png)
- Paste the following into the text editor and click "Save". - Paste the following into the text editor and click "Save".
```dockerfile ```dockerfile
FROM node:18-bullseye-slim FROM node:18-bullseye-slim
@@ -25,16 +27,19 @@ RUN apt-get update && \
apt-get install -y git apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install RUN npm install
COPY Dockerfile greeting.md* .env* ./ COPY Dockerfile greeting.md* .env* ./
RUN npm run build RUN npm run build
EXPOSE 7860 EXPOSE 7860
ENV NODE_ENV=production ENV NODE_ENV=production
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ] CMD [ "npm", "start" ]
``` ```
- Click "Commit new file to `main`" to save the Dockerfile. - Click "Commit new file to `main`" to save the Dockerfile.
![Commit](huggingface-savedockerfile.png) ![Commit](assets/huggingface-savedockerfile.png)
### 4. Set your API key as a secret ### 4. Set your API key as a secret
- Click the Settings button in the top right corner of your repository. - Click the Settings button in the top right corner of your repository.
@@ -82,14 +87,18 @@ MAX_OUTPUT_TOKENS_ANTHROPIC=512
# Block prompts containing disallowed characters # Block prompts containing disallowed characters
REJECT_DISALLOWED=false REJECT_DISALLOWED=false
REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy." REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Show exact quota usage on the Server Info page
QUOTA_DISPLAY_MODE=full
``` ```
See `.env.example` for a full list of available settings, or check `config.ts` for details on what each setting does. See `.env.example` for a full list of available settings, or check `config.ts` for details on what each setting does.
## Restricting access to the server ## Restricting access to the server
If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key. If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key. Set the `GATEKEEPER` mode to `proxy_key`, and then set the `PROXY_KEY` variable to whatever password you want.
Add this using the same method as the OPENAI_KEY secret above. Don't add this to your `.env` file because that file is public and anyone can see it. Add this using the same method as the OPENAI_KEY secret above. Don't add this to your `.env` file because that file is public and anyone can see it.
Example:
```
GATEKEEPER=proxy_key
PROXY_KEY=your_secret_password
```
+6 -1
View File
@@ -1,5 +1,8 @@
# Deploy to Render.com # Deploy to Render.com
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received.
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
### 1. Create account ### 1. Create account
- [Sign up for Render.com](https://render.com/) to create an account and access the dashboard. - [Sign up for Render.com](https://render.com/) to create an account and access the dashboard.
@@ -28,6 +31,8 @@ The service will be created according to the instructions in the `render.yaml` f
- For example, `OPENAI_KEY=sk-abc123`. - For example, `OPENAI_KEY=sk-abc123`.
- Click **Save Changes**. - Click **Save Changes**.
**IMPORTANT:** Set `TRUSTED_PROXIES=3`, otherwise users' IP addresses will not be recorded correctly (the server will see the IP address of Render's load balancer instead of the user's real IP address).
The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page. The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page.
If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet. If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet.
+150
View File
@@ -0,0 +1,150 @@
# Quick self-hosting guide
Temporary guide for self-hosting. This will be improved in the future to provide more robust instructions and options. Provided commands are for Ubuntu.
This uses prebuilt Docker images for convenience. If you want to make adjustments to the code you can instead clone the repo and follow the Local Development guide in the [README](../README.md).
## Table of Contents
- [Requirements](#requirements)
- [Running the application](#running-the-application)
- [Setting up a reverse proxy](#setting-up-a-reverse-proxy)
- [trycloudflare](#trycloudflare)
- [nginx](#nginx)
- [Example basic nginx configuration (no SSL)](#example-basic-nginx-configuration-no-ssl)
- [Example with Cloudflare SSL](#example-with-cloudflare-ssl)
- [Updating/Restarting the application](#updatingrestarting-the-application)
## Requirements
- Docker
- Docker Compose
- A VPS with at least 512MB of RAM (1GB recommended)
- A domain name
If you don't have a VPS and domain name you can use TryCloudflare to set up a temporary URL that you can share with others. See [trycloudflare](#trycloudflare) for more information.
## Running the application
- Install Docker and Docker Compose
- Create a new directory for the application
- This will contain your .env file, greeting file, and any user-generated files
- Execute the following commands:
- ```
touch .env
touch greeting.md
echo "OPENAI_KEY=your-openai-key" >> .env
curl https://gitgud.io/khanon/oai-reverse-proxy/-/raw/main/docker/docker-compose-selfhost.yml -o docker-compose.yml
```
- You can set further environment variables and keys in the `.env` file. See [.env.example](../.env.example) for a list of available options.
- You can set a custom greeting in `greeting.md`. This will be displayed on the homepage.
- Run `docker compose up -d`
You can check logs with `docker compose logs -n 100 -f`.
The provided docker-compose file listens on port 7860 but binds to localhost only. You should use a reverse proxy to expose the application to the internet as described in the next section.
## Setting up a reverse proxy
Rather than exposing the application directly to the internet, it is recommended to set up a reverse proxy. This will allow you to use HTTPS and add additional security measures.
### trycloudflare
This will give you a temporary (72 hours) URL that you can use to let others connect to your instance securely, without having to set up a reverse proxy. If you are running the server on your home network, this is probably the best option.
- Install `cloudflared` following the instructions at [try.cloudflare.com](https://try.cloudflare.com/).
- Run `cloudflared tunnel --url http://localhost:7860`
- You will be given a temporary URL that you can share with others.
If you have a VPS, you should use a proper reverse proxy like nginx instead for a more permanent solution which will allow you to use your own domain name, handle SSL, and add additional security/anti-abuse measures.
### nginx
First, install nginx.
- `sudo apt update && sudo apt install nginx`
#### Example basic nginx configuration (no SSL)
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:7860;
}
}
```
- Replace `example.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
- `sudo nginx -t`
- This will check the configuration file for errors.
- `sudo systemctl restart nginx`
- This will restart nginx and apply the new configuration.
#### Example with Cloudflare SSL
This allows you to use a self-signed certificate on the server, and have Cloudflare handle client SSL. You need to have a Cloudflare account and have your domain set up with Cloudflare already, pointing to your server's IP address.
- Set Cloudflare to use Full SSL mode. Since we are using a self-signed certificate, don't use Full (strict) mode.
- Create a self-signed certificate:
- `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/ssl/private/nginx-selfsigned.key -out /etc/ssl/certs/nginx-selfsigned.crt`
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 443 ssl;
server_name yourdomain.com www.yourdomain.com;
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
# Only allow inbound traffic from Cloudflare
allow 173.245.48.0/20;
allow 103.21.244.0/22;
allow 103.22.200.0/22;
allow 103.31.4.0/22;
allow 141.101.64.0/18;
allow 108.162.192.0/18;
allow 190.93.240.0/20;
allow 188.114.96.0/20;
allow 197.234.240.0/22;
allow 198.41.128.0/17;
allow 162.158.0.0/15;
allow 104.16.0.0/13;
allow 104.24.0.0/14;
allow 172.64.0.0/13;
allow 131.0.72.0/22;
deny all;
location / {
proxy_pass http://localhost:7860;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
}
```
- Replace `yourdomain.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
## Updating/Restarting the application
After making an .env change, you need to restart the application for it to take effect.
- `docker compose down`
- `docker compose up -d`
To update the application to the latest version:
- `docker compose pull`
- `docker compose down`
- `docker compose up -d`
- `docker image prune -f`
+2
View File
@@ -5,6 +5,7 @@ The proxy supports several different user management strategies. You can choose
Several of these features require you to set secrets in your environment. If using Huggingface Spaces to deploy, do not set these in your `.env` file because that file is public and anyone can see it. Several of these features require you to set secrets in your environment. If using Huggingface Spaces to deploy, do not set these in your `.env` file because that file is public and anyone can see it.
## Table of Contents ## Table of Contents
- [No user management](#no-user-management-gatekeepernone) - [No user management](#no-user-management-gatekeepernone)
- [Single-password authentication](#single-password-authentication-gatekeeperproxy_key) - [Single-password authentication](#single-password-authentication-gatekeeperproxy_key)
- [Per-user authentication](#per-user-authentication-gatekeeperuser_token) - [Per-user authentication](#per-user-authentication-gatekeeperuser_token)
@@ -41,6 +42,7 @@ This is the default data store (`GATEKEEPER_STORE=memory`) User data will be st
### Firebase Realtime Database ### Firebase Realtime Database
To use Firebase Realtime Database to persist user data, set the following environment variables: To use Firebase Realtime Database to persist user data, set the following environment variables:
- `GATEKEEPER_STORE`: Set this to `firebase_rtdb` - `GATEKEEPER_STORE`: Set this to `firebase_rtdb`
- **Secret** `FIREBASE_RTDB_URL`: The URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com` - **Secret** `FIREBASE_RTDB_URL`: The URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`
- **Secret** `FIREBASE_KEY`: A base-64 encoded service account key for your Firebase project. Refer to the instructions below for how to create this key. - **Secret** `FIREBASE_KEY`: A base-64 encoded service account key for your Firebase project. Refer to the instructions below for how to create this key.
+36
View File
@@ -0,0 +1,36 @@
# User Quotas
When using `user_token` authentication, you can set (model) token quotas for user. These quotas are enforced by the proxy server and are separate from the quotas enforced by OpenAI.
You can set the default quota via environment variables. Quotas are enforced on a per-model basis, and count both prompt tokens and completion tokens. By default, all quotas are disabled.
Set the following environment variables to set the default quotas:
- `TOKEN_QUOTA_TURBO`
- `TOKEN_QUOTA_GPT4`
- `TOKEN_QUOTA_CLAUDE`
Quotas only apply to `normal`-type users; `special`-type users are exempt from quotas. You can change users' types via the REST API.
**Note that changes to these environment variables will only apply to newly created users.** To modify existing users' quotas, use the REST API or the admin UI.
## Automatically refreshing quotas
You can use the `QUOTA_REFRESH_PERIOD` environment variable to automatically refresh users' quotas periodically. This is useful if you want to give users a certain number of tokens per day, for example. The entire quota will be refreshed at the start of the specified period, and any tokens a user has not used will not be carried over.
Quotas for all models and users will be refreshed. If you haven't set `TOKEN_QUOTA_*` for a particular model, quotas for that model will not be refreshed (so any manually set quotas will not be overwritten).
Set the `QUOTA_REFRESH_PERIOD` environment variable to one of the following values:
- `daily` (at midnight)
- `hourly`
- leave unset to disable automatic refreshing
You can also use a cron expression, for example:
- Every 45 seconds: `"*/45 * * * * *"`
- Every 30 minutes: `"*/30 * * * *"`
- Every 6 hours: `"0 */6 * * *"`
- Every 3 days: `"0 0 */3 * *"`
- Daily, but at mid-day: `"0 12 * * *"`
Make sure to enclose the cron expression in quotation marks.
All times are in the server's local time zone. Refer to [crontab.guru](https://crontab.guru/) for more examples.
+9
View File
@@ -0,0 +1,9 @@
{
"dev": {
"proxy-host": "http://localhost:7860",
"oai-key-1": "override in http-client.private.env.json",
"proxy-key": "override in http-client.private.env.json",
"azu-resource-name": "override in http-client.private.env.json",
"azu-deployment-id": "override in http-client.private.env.json"
}
}
+1318 -177
View File
File diff suppressed because it is too large Load Diff
+33 -8
View File
@@ -4,10 +4,11 @@
"description": "Reverse proxy for the OpenAI API", "description": "Reverse proxy for the OpenAI API",
"scripts": { "scripts": {
"build": "tsc && copyfiles -u 1 src/**/*.ejs build", "build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts", "prepare": "husky install",
"start:watch": "nodemon --require source-map-support/register build/server.js",
"start:replit": "tsc && node build/server.js",
"start": "node build/server.js", "start": "node build/server.js",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
"start:replit": "tsc && node build/server.js",
"start:watch": "nodemon --require source-map-support/register build/server.js",
"type-check": "tsc --noEmit" "type-check": "tsc --noEmit"
}, },
"engines": { "engines": {
@@ -17,42 +18,66 @@
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4", "@anthropic-ai/tokenizer": "^0.0.4",
"@aws-crypto/sha256-js": "^5.2.0",
"@smithy/eventstream-codec": "^2.1.3",
"@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/types": "^2.10.1",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.3.5", "axios": "^1.3.5",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6", "cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1", "copyfiles": "^2.4.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"csrf-csrf": "^2.3.0", "csrf-csrf": "^2.3.0",
"dotenv": "^16.0.3", "dotenv": "^16.3.1",
"ejs": "^3.1.9", "ejs": "^3.1.9",
"express": "^4.18.2", "express": "^4.18.2",
"express-session": "^1.17.3",
"firebase-admin": "^11.10.1", "firebase-admin": "^11.10.1",
"googleapis": "^122.0.0", "googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1", "http-proxy-middleware": "^3.0.0-beta.1",
"memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1",
"pino": "^8.11.0", "pino": "^8.11.0",
"pino-http": "^8.3.3", "pino-http": "^8.3.3",
"sanitize-html": "2.12.1",
"sharp": "^0.32.6",
"showdown": "^2.1.0", "showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
"tiktoken": "^1.0.10", "tiktoken": "^1.0.10",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"zlib": "^1.0.5", "zlib": "^1.0.5",
"zod": "^3.21.4" "zod": "^3.22.3",
"zod-error": "^1.5.0"
}, },
"devDependencies": { "devDependencies": {
"@types/cookie-parser": "^1.4.3", "@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13", "@types/cors": "^2.8.13",
"@types/express": "^4.17.17", "@types/express": "^4.17.17",
"@types/express-session": "^1.17.7",
"@types/multer": "^1.4.7", "@types/multer": "^1.4.7",
"@types/node-schedule": "^2.1.0",
"@types/sanitize-html": "^2.9.0",
"@types/showdown": "^2.0.0", "@types/showdown": "^2.0.0",
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1", "@types/uuid": "^9.0.1",
"concurrently": "^8.0.1", "concurrently": "^8.0.1",
"esbuild": "^0.17.16", "esbuild": "^0.17.16",
"esbuild-register": "^3.4.2", "esbuild-register": "^3.4.2",
"husky": "^8.0.3",
"nodemon": "^3.0.1", "nodemon": "^3.0.1",
"source-map-support": "^0.5.21", "pino-pretty": "^10.2.3",
"prettier": "^3.0.3",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typescript": "^5.0.4" "typescript": "^5.4.2"
}, },
"overrides": { "overrides": {
"google-gax": "^3.6.1" "google-gax": "^3.6.1",
"postcss": "^8.4.31",
"follow-redirects": "^1.15.4"
} }
} }
+276
View File
@@ -0,0 +1,276 @@
# OAI Reverse Proxy
###
# @name OpenAI -- Chat Completions
POST https://api.openai.com/v1/chat/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 30,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name OpenAI -- Text Completions
POST https://api.openai.com/v1/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 30,
"stream": false,
"prompt": "This is a test prompt where"
}
###
# @name OpenAI -- Create Embedding
POST https://api.openai.com/v1/embeddings
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name OpenAI -- Get Organizations
GET https://api.openai.com/v1/organizations
Authorization: Bearer {{oai-key-1}}
###
# @name OpenAI -- Get Models
GET https://api.openai.com/v1/models
Authorization: Bearer {{oai-key-1}}
###
# @name Azure OpenAI -- Chat Completions
POST https://{{azu-resource-name}}.openai.azure.com/openai/deployments/{{azu-deployment-id}}/chat/completions?api-version=2023-09-01-preview
api-key: {{azu-key-1}}
Content-Type: application/json
{
"max_tokens": 1,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name Proxy / OpenAI -- Get Models
GET {{proxy-host}}/proxy/openai/v1/models
Authorization: Bearer {{proxy-key}}
###
# @name Proxy / OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4-1106-preview",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 123,
"messages": [
{
"role": "user",
"content": "phrase one"
}
]
}
###
# @name Proxy / OpenAI -- Native Text Completions
POST {{proxy-host}}/proxy/openai/v1/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 20,
"temperature": 0,
"prompt": "Genshin Impact is a game about",
"stream": false
}
###
# @name Proxy / OpenAI -- Chat-to-Text API Translation
# Accepts a chat completion request and reformats it to work with the text completion API. `model` is ignored.
POST {{proxy-host}}/proxy/openai/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "I don't think that's right..."
}
]
}
###
# @name Proxy / OpenAI -- Create Embedding
POST {{proxy-host}}/proxy/openai/embeddings
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name Proxy / Anthropic -- Native Completion (old API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- Native Completion (2023-06-01 API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/anthropic/v1/chat/completions
Authorization: Bearer {{proxy-key}}
#anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 20,
"stream": false,
"temperature": 0,
"messages": [
{
"role": "user",
"content": "What is genshin impact"
}
]
}
###
# @name Proxy / AWS Claude -- Native Completion
POST {{proxy-host}}/proxy/aws/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / AWS Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/aws/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 2,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "That's not right."
}
]
}
###
# @name Proxy / Google AI -- OpenAI-to-Google AI API Translation
POST {{proxy-host}}/proxy/google-ai/v1/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 42,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
}
]
}
+45
View File
@@ -0,0 +1,45 @@
const axios = require("axios");
const concurrentRequests = 75;
const headers = {
Authorization: "Bearer test",
"Content-Type": "application/json",
};
const payload = {
model: "gpt-4",
max_tokens: 1,
stream: false,
messages: [{ role: "user", content: "Hi" }],
};
const makeRequest = async (i) => {
try {
const response = await axios.post(
"http://localhost:7860/proxy/google-ai/v1/chat/completions",
payload,
{ headers }
);
console.log(
`Req ${i} finished with status code ${response.status} and response:`,
response.data
);
} catch (error) {
const msg = error.response
console.error(`Error in req ${i}:`, error.message, msg || "");
}
};
const executeRequestsConcurrently = () => {
const promises = [];
for (let i = 1; i <= concurrentRequests; i++) {
console.log(`Starting request ${i}`);
promises.push(makeRequest(i));
}
Promise.all(promises).then(() => {
console.log("All requests finished");
});
};
executeRequestsConcurrently();
+32 -10
View File
@@ -1,7 +1,8 @@
import { Router } from "express"; import { Router } from "express";
import { z } from "zod"; import { z } from "zod";
import * as userStore from "../../proxy/auth/user-store"; import * as userStore from "../../shared/users/user-store";
import { UserSchema, UserSchemaWithToken, parseSort, sortBy } from "../common"; import { parseSort, sortBy } from "../../shared/utils";
import { UserPartialSchema, UserSchema } from "../../shared/users/schema";
const router = Router(); const router = Router();
@@ -29,11 +30,32 @@ router.get("/:token", (req, res) => {
/** /**
* Creates a new user. * Creates a new user.
* Optionally accepts a JSON body containing `type`, and for temporary-type
* users, `tokenLimits` and `expiresAt` fields.
* Returns the created user's token. * Returns the created user's token.
* POST /admin/users * POST /admin/users
*/ */
router.post("/", (req, res) => { router.post("/", (req, res) => {
const token = userStore.createUser(); const body = req.body;
const base = z.object({
type: UserSchema.shape.type.exclude(["temporary"]).default("normal"),
});
const tempUser = base
.extend({
type: z.literal("temporary"),
expiresAt: UserSchema.shape.expiresAt,
tokenLimits: UserSchema.shape.tokenLimits,
})
.required();
const schema = z.union([base, tempUser]);
const result = schema.safeParse(body);
if (!result.success) {
return res.status(400).json({ error: result.error });
}
const token = userStore.createUser({ ...result.data });
res.json({ token }); res.json({ token });
}); });
@@ -44,11 +66,14 @@ router.post("/", (req, res) => {
* PUT /admin/users/:token * PUT /admin/users/:token
*/ */
router.put("/:token", (req, res) => { router.put("/:token", (req, res) => {
const result = UserSchema.safeParse(req.body); const result = UserPartialSchema.safeParse({
...req.body,
token: req.params.token,
});
if (!result.success) { if (!result.success) {
return res.status(400).json({ error: result.error }); return res.status(400).json({ error: result.error });
} }
userStore.upsertUser({ ...result.data, token: req.params.token }); userStore.upsertUser(result.data);
res.json(userStore.getUser(req.params.token)); res.json(userStore.getUser(req.params.token));
}); });
@@ -59,15 +84,12 @@ router.put("/:token", (req, res) => {
* PUT /admin/users * PUT /admin/users
*/ */
router.put("/", (req, res) => { router.put("/", (req, res) => {
const result = z.array(UserSchemaWithToken).safeParse(req.body.users); const result = z.array(UserPartialSchema).safeParse(req.body.users);
if (!result.success) { if (!result.success) {
return res.status(400).json({ error: result.error }); return res.status(400).json({ error: result.error });
} }
const upserts = result.data.map((user) => userStore.upsertUser(user)); const upserts = result.data.map((user) => userStore.upsertUser(user));
res.json({ res.json({ upserted_users: upserts, count: upserts.length });
upserted_users: upserts,
count: upserts.length,
});
}); });
/** /**
+9 -13
View File
@@ -10,14 +10,10 @@ export const authorize: ({ via }: AuthorizeParams) => RequestHandler =
({ via }) => ({ via }) =>
(req, res, next) => { (req, res, next) => {
const bearerToken = req.headers.authorization?.slice("Bearer ".length); const bearerToken = req.headers.authorization?.slice("Bearer ".length);
const cookieToken = req.cookies["admin-token"]; const cookieToken = req.session.adminToken;
const token = via === "cookie" ? cookieToken : bearerToken; const token = via === "cookie" ? cookieToken : bearerToken;
const attempts = failedAttempts.get(req.ip) ?? 0; const attempts = failedAttempts.get(req.ip) ?? 0;
if (!token) {
return res.status(401).json({ error: "Unauthorized" });
}
if (!ADMIN_KEY) { if (!ADMIN_KEY) {
req.log.warn( req.log.warn(
{ ip: req.ip }, { ip: req.ip },
@@ -34,16 +30,15 @@ export const authorize: ({ via }: AuthorizeParams) => RequestHandler =
return res.status(401).json({ error: "Too many attempts" }); return res.status(401).json({ error: "Too many attempts" });
} }
if (token !== ADMIN_KEY) { if (token && token === ADMIN_KEY) {
return next();
}
req.log.warn( req.log.warn(
{ ip: req.ip, attempts, token }, { ip: req.ip, attempts, invalidToken: String(token) },
`Attempted admin request with invalid token` `Attempted admin request with invalid token`
); );
return handleFailedLogin(req, res); return handleFailedLogin(req, res);
}
req.log.info({ ip: req.ip }, `Admin request authorized`);
next();
}; };
function handleFailedLogin(req: Request, res: Response) { function handleFailedLogin(req: Request, res: Response) {
@@ -53,6 +48,7 @@ function handleFailedLogin(req: Request, res: Response) {
if (req.accepts("json", "html") === "json") { if (req.accepts("json", "html") === "json") {
return res.status(401).json({ error: "Unauthorized" }); return res.status(401).json({ error: "Unauthorized" });
} }
res.clearCookie("admin-token"); delete req.session.adminToken;
return res.redirect("/admin/login?failed=true"); req.session.flash = { type: "error", message: `Invalid admin key.` };
return res.redirect("/admin/login");
} }
-24
View File
@@ -1,24 +0,0 @@
import { doubleCsrf } from "csrf-csrf";
import { v4 as uuid } from "uuid";
import express from "express";
const CSRF_SECRET = uuid();
const { generateToken, doubleCsrfProtection } = doubleCsrf({
getSecret: () => CSRF_SECRET,
cookieName: "csrf",
cookieOptions: { sameSite: "strict", path: "/" },
getTokenFromRequest: (req) => req.body["_csrf"] || req.query["_csrf"],
});
const injectCsrfToken: express.RequestHandler = (req, res, next) => {
res.locals.csrfToken = generateToken(res, req);
// force generation of new token on back button
// TODO: implement session-based CSRF tokens
res.setHeader("Cache-Control", "no-cache, no-store, must-revalidate");
res.setHeader("Pragma", "no-cache");
res.setHeader("Expires", "0");
next();
};
export { injectCsrfToken, doubleCsrfProtection as checkCsrfToken };
+5 -8
View File
@@ -2,25 +2,22 @@ import { Router } from "express";
const loginRouter = Router(); const loginRouter = Router();
loginRouter.get("/login", (req, res) => { loginRouter.get("/login", (_req, res) => {
res.render("admin/login", { failed: req.query.failed }); res.render("admin_login");
}); });
loginRouter.post("/login", (req, res) => { loginRouter.post("/login", (req, res) => {
res.cookie("admin-token", req.body.token, { req.session.adminToken = req.body.token;
maxAge: 1000 * 60 * 60 * 24 * 14,
httpOnly: true,
});
res.redirect("/admin"); res.redirect("/admin");
}); });
loginRouter.get("/logout", (req, res) => { loginRouter.get("/logout", (req, res) => {
res.clearCookie("admin-token"); delete req.session.adminToken;
res.redirect("/admin/login"); res.redirect("/admin/login");
}); });
loginRouter.get("/", (req, res) => { loginRouter.get("/", (req, res) => {
if (req.cookies["admin-token"]) { if (req.session.adminToken) {
return res.redirect("/admin/manage"); return res.redirect("/admin/manage");
} }
res.redirect("/admin/login"); res.redirect("/admin/login");
+44 -7
View File
@@ -1,10 +1,14 @@
import express, { Router } from "express"; import express, { Router } from "express";
import cookieParser from "cookie-parser";
import { authorize } from "./auth"; import { authorize } from "./auth";
import { injectCsrfToken, checkCsrfToken } from "./csrf"; import { HttpError } from "../shared/errors";
import { usersApiRouter as apiRouter } from "./api/users"; import { injectLocals } from "../shared/inject-locals";
import { usersUiRouter as uiRouter } from "./ui/users"; import { withSession } from "../shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { renderPage } from "../info-page";
import { buildInfo } from "../service-info";
import { loginRouter } from "./login"; import { loginRouter } from "./login";
import { usersApiRouter as apiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
const adminRouter = Router(); const adminRouter = Router();
@@ -12,13 +16,46 @@ adminRouter.use(
express.json({ limit: "20mb" }), express.json({ limit: "20mb" }),
express.urlencoded({ extended: true, limit: "20mb" }) express.urlencoded({ extended: true, limit: "20mb" })
); );
adminRouter.use(cookieParser()); adminRouter.use(withSession);
adminRouter.use(injectCsrfToken); adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), apiRouter); adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
adminRouter.use(checkCsrfToken); // All UI routes require CSRF token adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
adminRouter.use("/", loginRouter); adminRouter.use("/", loginRouter);
adminRouter.use("/manage", authorize({ via: "cookie" }), uiRouter); adminRouter.use("/manage", authorize({ via: "cookie" }), webRouter);
adminRouter.use("/service-info", authorize({ via: "cookie" }), (req, res) => {
return res.send(
renderPage(buildInfo(req.protocol + "://" + req.get("host"), true))
);
});
adminRouter.use(
(
err: Error,
req: express.Request,
res: express.Response,
_next: express.NextFunction
) => {
const data: any = { message: err.message, stack: err.stack };
if (err instanceof HttpError) {
data.status = err.status;
res.status(err.status);
if (req.accepts(["html", "json"]) === "json") {
return res.json({ error: data });
}
return res.render("admin_error", data);
} else if (err.name === "ForbiddenError") {
data.status = 403;
if (err.message === "invalid csrf token") {
data.message =
"Invalid CSRF token; try refreshing the previous page before submitting again.";
}
return res.status(403).render("admin_error", { ...data, flash: null });
}
res.status(500).json({ error: data });
}
);
export { adminRouter }; export { adminRouter };
-135
View File
@@ -1,135 +0,0 @@
import { Router } from "express";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
import * as userStore from "../../proxy/auth/user-store";
import {
UserSchemaWithToken,
parseSort,
sortBy,
paginate,
UserSchema,
} from "../common";
const router = Router();
const upload = multer({
storage: multer.memoryStorage(),
fileFilter: (_req, file, cb) => {
if (file.mimetype !== "application/json") {
cb(new Error("Invalid file type"));
} else {
cb(null, true);
}
},
});
router.get("/create-user", (req, res) => {
const recentUsers = userStore
.getUsers()
.sort(sortBy(["createdAt"], false))
.slice(0, 5);
res.render("admin/create-user", {
recentUsers,
newToken: !!req.query.created,
});
});
router.post("/create-user", (_req, res) => {
userStore.createUser();
return res.redirect(`/admin/manage/create-user?created=true`);
});
router.get("/view-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
res.render("admin/view-user", { user });
});
router.get("/list-users", (req, res) => {
const sort = parseSort(req.query.sort) || ["promptCount", "lastUsedAt"];
const requestedPageSize =
Number(req.query.perPage) || Number(req.cookies.perPage) || 20;
const perPage = Math.max(1, Math.min(1000, requestedPageSize));
const users = userStore.getUsers().sort(sortBy(sort, false));
const page = Number(req.query.page) || 1;
const { items, ...pagination } = paginate(users, page, perPage);
return res.render("admin/list-users", {
sort: sort.join(","),
users: items,
...pagination,
});
});
router.get("/import-users", (req, res) => {
const imported = Number(req.query.imported) || 0;
res.render("admin/import-users", { imported });
});
router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) {
return res.status(400).json({ error: "No file uploaded" });
}
const data = JSON.parse(req.file.buffer.toString());
const result = z.array(UserSchemaWithToken).safeParse(data.users);
if (!result.success) {
return res.status(400).json({ error: result.error });
}
const upserts = result.data.map((user) => userStore.upsertUser(user));
res.redirect(`/admin/manage/import-users?imported=${upserts.length}`);
});
router.get("/export-users", (_req, res) => {
res.render("admin/export-users");
});
router.get("/export-users.json", (_req, res) => {
const users = userStore.getUsers();
res.setHeader("Content-Disposition", "attachment; filename=users.json");
res.setHeader("Content-Type", "application/json");
res.send(JSON.stringify({ users }, null, 2));
});
router.get("/", (_req, res) => {
res.render("admin/index", {
isPersistenceEnabled: config.gatekeeperStore !== "memory",
});
});
router.post("/edit-user/:token", (req, res) => {
const result = UserSchema.safeParse(req.body);
if (!result.success) {
return res.status(400).send(result.error);
}
userStore.upsertUser({ ...result.data, token: req.params.token });
return res.sendStatus(204);
});
router.post("/reactivate-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
userStore.upsertUser({
token: user.token,
disabledAt: 0,
disabledReason: "",
});
return res.sendStatus(204);
});
router.post("/disable-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
userStore.disableUser(req.params.token, req.body.reason);
return res.sendStatus(204);
});
export { router as usersUiRouter };
+372
View File
@@ -0,0 +1,372 @@
import { Router } from "express";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
import { HttpError } from "../../shared/errors";
import * as userStore from "../../shared/users/user-store";
import { parseSort, sortBy, paginate } from "../../shared/utils";
import { keyPool } from "../../shared/key-management";
import { LLMService, MODEL_FAMILIES } from "../../shared/models";
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
import {
User,
UserPartialSchema,
UserSchema,
UserTokenCounts,
} from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
const router = Router();
const upload = multer({
storage: multer.memoryStorage(),
fileFilter: (_req, file, cb) => {
if (file.mimetype !== "application/json") {
cb(new Error("Invalid file type"));
} else {
cb(null, true);
}
},
});
router.get("/create-user", (req, res) => {
const recentUsers = userStore
.getUsers()
.sort(sortBy(["createdAt"], false))
.slice(0, 5);
res.render("admin_create-user", {
recentUsers,
newToken: !!req.query.created,
});
});
router.post("/create-user", (req, res) => {
const body = req.body;
const base = z.object({ type: UserSchema.shape.type.default("normal") });
const tempUser = base
.extend({
temporaryUserDuration: z.coerce
.number()
.int()
.min(1)
.max(10080 * 4),
})
.merge(
MODEL_FAMILIES.reduce((schema, model) => {
return schema.extend({
[`temporaryUserQuota_${model}`]: z.coerce.number().int().min(0),
});
}, z.object({}))
)
.transform((data: any) => {
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
limits[model] = data[`temporaryUserQuota_${model}`];
return limits;
}, {} as UserTokenCounts);
return { ...data, expiresAt, tokenLimits };
});
const createSchema = body.type === "temporary" ? tempUser : base;
const result = createSchema.safeParse(body);
if (!result.success) {
throw new HttpError(
400,
result.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
userStore.createUser({ ...result.data });
return res.redirect(`/admin/manage/create-user?created=true`);
});
router.get("/view-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) throw new HttpError(404, "User not found");
res.render("admin_view-user", { user });
});
router.get("/list-users", (req, res) => {
const sort = parseSort(req.query.sort) || ["sumTokens", "createdAt"];
const requestedPageSize =
Number(req.query.perPage) || Number(req.cookies.perPage) || 20;
const perPage = Math.max(1, Math.min(1000, requestedPageSize));
const users = userStore
.getUsers()
.map((user) => {
const sums = getSumsForUser(user);
return { ...user, ...sums };
})
.sort(sortBy(sort, false));
const page = Number(req.query.page) || 1;
const { items, ...pagination } = paginate(users, page, perPage);
return res.render("admin_list-users", {
sort: sort.join(","),
users: items,
...pagination,
});
});
router.get("/import-users", (_req, res) => {
res.render("admin_import-users");
});
router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) throw new HttpError(400, "No file uploaded");
const data = JSON.parse(req.file.buffer.toString());
const result = z.array(UserPartialSchema).safeParse(data.users);
if (!result.success) throw new HttpError(400, result.error.toString());
const upserts = result.data.map((user) => userStore.upsertUser(user));
req.session.flash = {
type: "success",
message: `${upserts.length} users imported`,
};
res.redirect("/admin/manage/import-users");
});
router.get("/export-users", (_req, res) => {
res.render("admin_export-users");
});
router.get("/export-users.json", (_req, res) => {
const users = userStore.getUsers();
res.setHeader("Content-Disposition", "attachment; filename=users.json");
res.setHeader("Content-Type", "application/json");
res.send(JSON.stringify({ users }, null, 2));
});
router.get("/", (_req, res) => {
res.render("admin_index");
});
router.post("/edit-user/:token", (req, res) => {
const result = UserPartialSchema.safeParse({
...req.body,
token: req.params.token,
});
if (!result.success) {
throw new HttpError(
400,
result.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
userStore.upsertUser(result.data);
return res.status(200).json({ success: true });
});
router.post("/reactivate-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) throw new HttpError(404, "User not found");
userStore.upsertUser({
token: user.token,
disabledAt: null,
disabledReason: null,
});
return res.sendStatus(204);
});
router.post("/disable-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) throw new HttpError(404, "User not found");
userStore.disableUser(req.params.token, req.body.reason);
return res.sendStatus(204);
});
router.post("/refresh-user-quota", (req, res) => {
const user = userStore.getUser(req.body.token);
if (!user) throw new HttpError(404, "User not found");
userStore.refreshQuota(user.token);
req.session.flash = {
type: "success",
message: "User's quota was refreshed",
};
return res.redirect(`/admin/manage/view-user/${user.token}`);
});
router.post("/maintenance", (req, res) => {
const action = req.body.action;
let flash = { type: "", message: "" };
switch (action) {
case "recheck": {
const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
checkable.forEach((s) => keyPool.recheck(s));
const keyCount = keyPool
.list()
.filter((k) => checkable.includes(k.service)).length;
flash.type = "success";
flash.message = `Scheduled recheck of ${keyCount} keys.`;
break;
}
case "resetQuotas": {
const users = userStore.getUsers();
users.forEach((user) => userStore.refreshQuota(user.token));
const { claude, gpt4, turbo } = config.tokenQuota;
flash.type = "success";
flash.message = `All users' token quotas reset to ${turbo} (Turbo), ${gpt4} (GPT-4), ${claude} (Claude).`;
break;
}
case "resetCounts": {
const users = userStore.getUsers();
users.forEach((user) => userStore.resetUsage(user.token));
flash.type = "success";
flash.message = `All users' token usage records reset.`;
break;
}
case "downloadImageMetadata": {
const data = JSON.stringify({
exportedAt: new Date().toISOString(),
generations: getLastNImages()
}, null, 2);
res.setHeader(
"Content-Disposition",
`attachment; filename=image-metadata-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
default: {
throw new HttpError(400, "Invalid action");
}
}
req.session.flash = flash;
return res.redirect(`/admin/manage`);
});
router.get("/download-stats", (_req, res) => {
return res.render("admin_download-stats");
});
router.post("/generate-stats", (req, res) => {
const body = req.body;
const valid = z
.object({
anon: z.coerce.boolean().optional().default(false),
sort: z.string().optional().default("prompts"),
maxUsers: z.coerce
.number()
.int()
.min(5)
.max(1000)
.optional()
.default(1000),
tableType: z.enum(["code", "markdown"]).optional().default("markdown"),
format: z
.string()
.optional()
.default("# Stats\n{{header}}\n{{stats}}\n{{time}}"),
})
.strict()
.safeParse(body);
if (!valid.success) {
throw new HttpError(
400,
valid.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
const { anon, sort, format, maxUsers, tableType } = valid.data;
const users = userStore.getUsers();
let totalTokens = 0;
let totalCost = 0;
let totalPrompts = 0;
let totalIps = 0;
const lines = users
.map((u) => {
const sums = getSumsForUser(u);
totalTokens += sums.sumTokens;
totalCost += sums.sumCost;
totalPrompts += u.promptCount;
totalIps += u.ip.length;
const getName = (u: User) => {
const id = `...${u.token.slice(-5)}`;
const banned = !!u.disabledAt;
let nick = anon || !u.nickname ? "Anonymous" : u.nickname;
if (tableType === "markdown") {
nick = banned ? `~~${nick}~~` : nick;
return `${nick.slice(0, 18)} | ${id}`;
} else {
// Strikethrough doesn't work within code blocks
const dead = !!u.disabledAt ? "[dead] " : "";
nick = `${dead}${nick}`;
return `${nick.slice(0, 18).padEnd(18)} ${id}`.padEnd(27);
}
};
const user = getName(u);
const prompts = `${u.promptCount} proompts`.padEnd(14);
const ips = `${u.ip.length} IPs`.padEnd(8);
const tokens = `${sums.prettyUsage} tokens`.padEnd(30);
const sortField = sort === "prompts" ? u.promptCount : sums.sumTokens;
return { user, prompts, ips, tokens, sortField };
})
.sort((a, b) => b.sortField - a.sortField)
.map(({ user, prompts, ips, tokens }, i) => {
const pos = tableType === "markdown" ? (i + 1 + ".").padEnd(4) : "";
return `${pos}${user} | ${prompts} | ${ips} | ${tokens}`;
})
.slice(0, maxUsers);
const strTotalPrompts = `${totalPrompts} proompts`;
const strTotalIps = `${totalIps} IPs`;
const strTotalTokens = `${prettyTokens(totalTokens)} tokens`;
const strTotalCost = `US$${totalCost.toFixed(2)} cost`;
const header = `!!!Note ${users.length} users | ${strTotalPrompts} | ${strTotalIps} | ${strTotalTokens} | ${strTotalCost}`;
const time = `\n-> *(as of ${new Date().toISOString()})* <-`;
let table = [];
table.push(lines.join("\n"));
if (valid.data.tableType === "markdown") {
table = ["User||Prompts|IPs|Usage", "---|---|---|---|---", ...table];
} else {
table = ["```text", ...table, "```"];
}
const result = format
.replace("{{header}}", header)
.replace("{{stats}}", table.join("\n"))
.replace("{{time}}", time);
res.setHeader(
"Content-Disposition",
`attachment; filename=proxy-stats-${new Date().toISOString()}.md`
);
res.setHeader("Content-Type", "text/markdown");
res.send(result);
});
function getSumsForUser(user: User) {
const sums = MODEL_FAMILIES.reduce(
(s, model) => {
const tokens = user.tokenCounts[model] ?? 0;
s.sumTokens += tokens;
s.sumCost += getTokenCostUsd(model, tokens);
return s;
},
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
);
sums.prettyUsage = `${prettyTokens(sums.sumTokens)} ($${sums.sumCost.toFixed(
2
)})`;
return sums;
}
export { router as usersWebRouter };
+133
View File
@@ -0,0 +1,133 @@
<%- include("partials/shared_header", { title: "Create User - OAI Reverse Proxy Admin" }) %>
<style>
#temporaryUserOptions {
margin-top: 1em;
max-width: 30em;
}
#temporaryUserOptions h3 {
margin-bottom: -0.4em;
}
input[type="number"] {
max-width: 10em;
}
.temporary-user-fieldset {
display: grid;
grid-template-columns: repeat(4, 1fr); /* Four equal-width columns */
column-gap: 1em;
row-gap: 0.2em;
}
.full-width {
grid-column: 1 / -1;
}
.quota-label {
text-align: right;
}
</style>
<h1>Create User Token</h1>
<p>User token types:</p>
<ul>
<li><strong>Normal</strong> - Standard users.
<li><strong>Special</strong> - Exempt from token quotas and <code>MAX_IPS_PER_USER</code> enforcement.</li>
<li><strong>Temporary</strong> - Disabled after a specified duration. Quotas never refresh.</li>
</ul>
<form action="/admin/manage/create-user" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<label for="type">Type</label>
<select name="type">
<option value="normal">Normal</option>
<option value="special">Special</option>
<option value="temporary">Temporary</option>
</select>
<input type="submit" value="Create" />
<fieldset id="temporaryUserOptions" style="display: none">
<legend>Temporary User Options</legend>
<div class="temporary-user-fieldset">
<p class="full-width">
Temporary users will be disabled after the specified duration, and their records will be deleted 72 hours after that.
These options apply only to new
temporary users; existing ones use whatever options were in effect when they were created.
</p>
<label for="temporaryUserDuration" class="full-width">Access duration (in minutes)</label>
<input type="number" name="temporaryUserDuration" id="temporaryUserDuration" value="60" class="full-width" />
<!-- convenience calculations -->
<span>6 hours:</span><code>360</code>
<span>12 hours:</span><code>720</code>
<span>1 day:</span><code>1440</code>
<span>1 week:</span><code>10080</code>
<h3 class="full-width">Token Quotas</h3>
<p class="full-width">Temporary users' quotas are never refreshed.</p>
<% Object.entries(quota).forEach(function([model, tokens]) { %>
<label class="quota-label" for="temporaryUserQuota_<%= model %>"><%= model %></label>
<input
type="number"
name="temporaryUserQuota_<%= model %>"
id="temporaryUserQuota_<%= model %>"
value="0"
data-fieldtype="tokenquota"
data-default="<%= tokens %>" />
<% }) %>
</div>
</fieldset>
</form>
<% if (newToken) { %>
<p>Just created <code><%= recentUsers[0].token %></code>.</p>
<% } %>
<h2>Recent Tokens</h2>
<ul>
<% recentUsers.forEach(function(user) { %>
<li><a href="/admin/manage/view-user/<%= user.token %>"><%= user.token %></a></li>
<% }) %>
</ul>
<script>
const typeInput = document.querySelector("select[name=type]");
const temporaryUserOptions = document.querySelector("#temporaryUserOptions");
typeInput.addEventListener("change", function () {
localStorage.setItem("admin__create-user__type", typeInput.value);
if (typeInput.value === "temporary") {
temporaryUserOptions.style.display = "block";
} else {
temporaryUserOptions.style.display = "none";
}
});
function loadDefaults() {
const defaultType = localStorage.getItem("admin__create-user__type");
if (defaultType) {
typeInput.value = defaultType;
typeInput.dispatchEvent(new Event("change"));
}
const durationInput = document.querySelector("input[name=temporaryUserDuration]");
const defaultDuration = localStorage.getItem("admin__create-user__duration");
durationInput.addEventListener("change", function () {
localStorage.setItem("admin__create-user__duration", durationInput.value);
});
if (defaultDuration) {
durationInput.value = defaultDuration;
}
const tokenQuotaInputs = document.querySelectorAll("input[data-fieldtype=tokenquota]");
tokenQuotaInputs.forEach(function (input) {
const defaultQuota = localStorage.getItem("admin__create-user__quota__" + input.id);
input.addEventListener("change", function () {
localStorage.setItem("admin__create-user__quota__" + input.id, input.value);
});
if (defaultQuota) {
input.value = defaultQuota;
}
});
}
loadDefaults();
</script>
<%- include("partials/admin-footer") %>
@@ -0,0 +1,147 @@
<%- include("partials/shared_header", { title: "Download Stats - OAI Reverse Proxy Admin" }) %>
<style>
#statsForm {
display: flex;
flex-direction: column;
}
#statsForm div {
display: flex;
flex-direction: row;
margin-bottom: 0.5em;
}
#statsForm div label {
width: 6em;
text-align: right;
margin-right: 1em;
}
#statsForm ul {
margin: 0;
padding-left: 2em;
font-size: 0.8em;
}
#statsForm li {
list-style: none;
}
#statsForm textarea {
font-family: monospace;
flex-grow: 1;
}
</style>
<h1>Download Stats</h1>
<p>
Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.
</p>
<div>
<h3>Options</h3>
<form id="statsForm" action="/admin/manage/generate-stats" method="post"
style="display: flex; flex-direction: column;">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<div>
<label for="anon">Anonymize</label>
<input id="anon" type="checkbox" name="anon" value="true" />
</div>
<div>
<label for="sort">Sort</label>
<select id="sort" name="sort">
<option value="tokens" selected>By Token Count</option>
<option value="prompts">By Prompt Count</option>
</select>
</div>
<div>
<label for="maxUsers">Max Users</label>
<input id="maxUsers" type="number" name="maxUsers" value="1000" />
</div>
<div>
<label for="tableType">Table Type</label>
<select id="tableType" name="tableType">
<option value="markdown" selected>Markdown Table</option>
<option value="code">Code Block</option>
</select>
</div>
<div>
<label for="format">Custom Format <ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul></label>
<textarea id="format" name="format" rows="10" cols="50" placeholder="{{stats}}">
# Stats
{{header}}
{{stats}}
{{time}}
</textarea>
</div>
<div>
<button type="submit">Download</button>
<button id="copyButton" type="button">Copy to Clipboard</button>
</div>
</form>
</div>
<script>
function loadDefaults() {
const getState = (key) => localStorage.getItem("admin__download-stats__" + key);
const setState = (key, value) => localStorage.setItem("admin__download-stats__" + key, value);
const checkboxes = ["anon"];
const values = ["sort", "format", "tableType", "maxUsers"];
checkboxes.forEach((key) => {
const value = getState(key);
if (value) {
document.getElementById(key).checked = value == "true";
}
document.getElementById(key).addEventListener("change", (e) => {
setState(key, e.target.checked);
});
});
values.forEach((key) => {
const value = getState(key);
if (value) {
document.getElementById(key).value = value;
}
document.getElementById(key).addEventListener("change", (e) => {
setState(key, e.target.value?.trim());
});
});
}
loadDefaults();
async function fetchAndCopy() {
const form = document.getElementById('statsForm');
const formData = new FormData(form);
const response = await fetch(form.action, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
credentials: 'same-origin',
body: new URLSearchParams(formData),
});
if (response.ok) {
const content = await response.text();
copyToClipboard(content);
} else {
throw new Error('Failed to fetch generated stats. Try reloading the page.');
}
}
function copyToClipboard(text) {
navigator.clipboard.writeText(text).then(() => {
alert('Copied to clipboard');
}).catch(err => {
alert('Failed to copy to clipboard. Try downloading the file instead.');
});
}
document.getElementById('copyButton').addEventListener('click', fetchAndCopy);
</script>
<%- include("partials/admin-footer") %>
+8
View File
@@ -0,0 +1,8 @@
<%- include("partials/shared_header", { title: "Error" }) %>
<div id="error-content" style="color: red; background-color: #eedddd; padding: 1em">
<p><strong>⚠️ Error <%= status %>:</strong> <%= message %></p>
<pre><%= stack %></pre>
<a href="#" onclick="window.history.back()">Go Back</a> | <a href="/admin">Go Home</a>
</div>
</body>
</html>
@@ -1,4 +1,4 @@
<%- include("../_partials/admin-header", { title: "Export Users - OAI Reverse Proxy Admin" }) %> <%- include("partials/shared_header", { title: "Export Users - OAI Reverse Proxy Admin" }) %>
<h1>Export Users</h1> <h1>Export Users</h1>
<p> <p>
Export users to JSON. The JSON will be an array of objects under the key Export users to JSON. The JSON will be an array of objects under the key
@@ -25,4 +25,4 @@
} }
</script> </script>
<button onclick="exportUsers()">Export</button> <button onclick="exportUsers()">Export</button>
<%- include("../_partials/admin-footer") %> <%- include("partials/admin-footer") %>
@@ -1,4 +1,4 @@
<%- include("../_partials/admin-header", { title: "Import Users - OAI Reverse Proxy Admin" }) %> <%- include("partials/shared_header", { title: "Import Users - OAI Reverse Proxy Admin" }) %>
<h1>Import Users</h1> <h1>Import Users</h1>
<p> <p>
Import users from JSON. The JSON should be an array of objects under the key Import users from JSON. The JSON should be an array of objects under the key
@@ -6,6 +6,7 @@
</p> </p>
<ul> <ul>
<li><code>token</code> (required): a unique identifier for the user</li> <li><code>token</code> (required): a unique identifier for the user</li>
<li><code>nickname</code> (optional): a nickname for the user, max 80 chars</li>
<li><code>ip</code> (optional): IP addresses the user has connected from</li> <li><code>ip</code> (optional): IP addresses the user has connected from</li>
<li> <li>
<code>type</code> (optional): either <code>normal</code> or <code>type</code> (optional): either <code>normal</code> or
@@ -16,8 +17,14 @@
prompt prompt
</li> </li>
<li> <li>
<code>tokenCount</code> (optional): the number of tokens the user has <code>tokenCounts</code> (optional): the number of tokens the user has
consumed (not yet implemented) consumed. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
</li>
<li>
<code>tokenLimits</code> (optional): the number of tokens the user can
consume. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
</li> </li>
<li> <li>
<code>createdAt</code> (optional): the timestamp when the user was created <code>createdAt</code> (optional): the timestamp when the user was created
@@ -38,7 +45,4 @@
<input type="submit" value="Import" /> <input type="submit" value="Import" />
</form> </form>
</form> </form>
<% if (imported > 0) { %> <%- include("partials/admin-footer") %>
<p>Imported <code><%= imported %></code> users.</p>
<% } %>
<%- include("../_partials/admin-footer") %>
+78
View File
@@ -0,0 +1,78 @@
<%- include("partials/shared_header", { title: "OAI Reverse Proxy Admin" }) %>
<h1>OAI Reverse Proxy Admin</h1>
<% if (!usersEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>🚨 <code>user_token</code> gatekeeper is not enabled.</strong><br />
<br />None of the user management features will do anything.
</p>
<% } %>
<% if (!persistenceEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>⚠️ Users will be lost when the server restarts because persistence is not configured.</strong><br />
<br />Be sure to export your users and import them again after restarting the server if you want to keep them.<br />
<br />
See the
<a target="_blank" href="https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/docs/user-management.md#firebase-realtime-database">
user management documentation</a
>
to learn how to set up persistence.
</p>
<% } %>
<h3>Users</h3>
<ul>
<li><a href="/admin/manage/list-users">List Users</a></li>
<li><a href="/admin/manage/create-user">Create User</a></li>
<li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li>
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
<li><a href="/admin/service-info">Service Info</a></li>
</ul>
<h3>Maintenance</h3>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<div display="flex" flex-direction="column">
<fieldset>
<legend>Key Recheck</legend>
<button id="recheck-keys" type="button" onclick="submitForm('recheck')">Force Key Recheck</button>
<label for="recheck-keys">Triggers a recheck of all keys without restarting the server.</label>
</fieldset>
<% if (quotasEnabled) { %>
<fieldset>
<legend>Bulk Quota Management</legend>
<p>
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
</p>
<p>
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
Resets all users' token records to zero.
</p>
</fieldset>
<% } %>
<% if (imageGenerationEnabled) { %>
<fieldset>
<legend>Image Generation</legend>
<button id="download-image-metadata" type="button" onclick="submitForm('downloadImageMetadata')">Download Image Metadata</button>
<label for="download-image-metadata">Downloads a metadata file containing URL, prompt, and truncated user token for all cached images.</label>
</fieldset>
<% } %>
</div>
</form>
<script>
let confirmed = false;
function submitForm(action) {
if (action === "resetCounts" && !confirmed) {
document.getElementById("clear-token-counts").innerText = "💣 Confirm Clear All Token Counts";
alert("⚠️ This will permanently clear token records for all users. If you only want to refresh quotas, use the other button.");
confirmed = true;
return;
}
document.getElementById("hiddenAction").value = action;
document.getElementById("maintenanceForm").submit();
}
</script>
<%- include("partials/admin-footer") %>
@@ -1,17 +1,18 @@
<%- include("../_partials/admin-header", { title: "Users - OAI Reverse Proxy Admin" }) %> <%- include("partials/shared_header", { title: "Users - OAI Reverse Proxy Admin" }) %>
<h1>User Token List</h1> <h1>User Token List</h1>
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<% if (users.length === 0) { %> <% if (users.length === 0) { %>
<p>No users found.</p> <p>No users found.</p>
<% } else { %> <% } else { %>
<table> <input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
<label for="toggle-nicknames">Show Nicknames</label>
<table class="striped">
<thead> <thead>
<tr> <tr>
<th>Token</th> <th>User</th>
<th <% if (sort.includes("ip")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=ip">IPs</a></th> <th <% if (sort.includes("ip")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=ip">IPs</a></th>
<th <% if (sort.includes("promptCount")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=promptCount">Prompts</a></th> <th <% if (sort.includes("promptCount")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=promptCount">Prompts</a></th>
<th <% if (sort.includes("sumCost")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=sumCost">Usage</a></th>
<th>Type</th> <th>Type</th>
<th <% if (sort.includes("createdAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=createdAt">Created (UTC)</a></th> <th <% if (sort.includes("createdAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=createdAt">Created (UTC)</a></th>
<th <% if (sort.includes("lastUsedAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=lastUsedAt">Last Used (UTC)</a></th> <th <% if (sort.includes("lastUsedAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=lastUsedAt">Last Used (UTC)</a></th>
@@ -22,10 +23,18 @@
<% users.forEach(function(user){ %> <% users.forEach(function(user){ %>
<tr> <tr>
<td> <td>
<code><a href="/admin/manage/view-user/<%= user.token %>"><%= user.token %></a></code> <a href="/admin/manage/view-user/<%= user.token %>">
<code class="usertoken"><%= user.token %></code>
<% if (user.nickname) { %>
<span class="nickname" style="display: none"><%= user.nickname %></span>
<% } else { %>
<code class="nickname" style="display: none"><%= "..." + user.token.slice(-5) %></code>
<% } %>
</a>
</td> </td>
<td><%= user.ip.length %></td> <td><%= user.ip.length %></td>
<td><%= user.promptCount %></td> <td><%= user.promptCount %></td>
<td><%= user.prettyUsage %></td>
<td><%= user.type %></td> <td><%= user.type %></td>
<td><%= user.createdAt %></td> <td><%= user.createdAt %></td>
<td><%= user.lastUsedAt ?? "never" %></td> <td><%= user.lastUsedAt ?? "never" %></td>
@@ -40,7 +49,6 @@
</tr> </tr>
<% }); %> <% }); %>
</table> </table>
<ul class="pagination"> <ul class="pagination">
<% if (page > 1) { %> <% if (page > 1) { %>
<li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page - 1 %>">&laquo;</a></li> <li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page - 1 %>">&laquo;</a></li>
@@ -52,54 +60,28 @@
</ul> </ul>
<p>Showing <%= page * pageSize - pageSize + 1 %> to <%= users.length + page * pageSize - pageSize %> of <%= totalCount %> users.</p> <p>Showing <%= page * pageSize - pageSize + 1 %> to <%= users.length + page * pageSize - pageSize %> of <%= totalCount %> users.</p>
<%- include("../_partials/pagination") %> <%- include("partials/shared_pagination") %>
<% } %> <% } %>
<script> <script>
document.querySelectorAll("td.actions a.ban").forEach(function (a) { function toggleNicknames() {
a.addEventListener("click", function (e) { const checked = document.getElementById("toggle-nicknames").checked;
e.preventDefault(); const visibleSelector = checked ? ".nickname" : ".usertoken";
var token = a.getAttribute("data-token"); const hiddenSelector = checked ? ".usertoken" : ".nickname";
if (confirm("Are you sure you want to ban this user?")) { document.querySelectorAll(visibleSelector).forEach(function (el) {
let reason = prompt("Reason for ban:"); el.style.display = "inline";
fetch( });
"/admin/manage/disable-user/" + token, document.querySelectorAll(hiddenSelector).forEach(function (el) {
{ el.style.display = "none";
method: "POST", });
credentials: "same-origin", localStorage.setItem("showNicknames", checked);
body: JSON.stringify({ reason, _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" }
}).then(() => window.location.reload());
} }
});
});
document.querySelectorAll("td.actions a.unban").forEach(function (a) { const state = localStorage.getItem("showNicknames") === "true";
a.addEventListener("click", function (e) { document.getElementById("toggle-nicknames").checked = state;
e.preventDefault(); toggleNicknames();
var token = a.getAttribute("data-token");
if (confirm("Are you sure you want to unban this user?")) {
fetch(
"/admin/manage/reactivate-user/" + token,
{
method: "POST",
credentials: "same-origin",
body: JSON.stringify({ _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" }
}
).then(() => window.location.reload());
}
});
});
</script> </script>
<script> <%- include("partials/admin-ban-xhr-script") %>
document.querySelectorAll("td").forEach(function(td) {
if (td.innerText.match(/^\d{13}$/)) { <%- include("partials/admin-footer") %>
if (td.innerText == 0) return 'never';
var date = new Date(parseInt(td.innerText));
td.innerText = date.toISOString().replace("T", " ").replace(/\.\d+Z$/, "");
}
});
</script>
<%- include("../_partials/admin-footer") %>
@@ -1,8 +1,5 @@
<%- include("../_partials/admin-header", { title: "Login" }) %> <%- include("partials/shared_header", { title: "Login" }) %>
<h1>Login</h1> <h1>Login</h1>
<% if (failed) { %>
<p style="color: red;">Please try again.</p>
<% } %>
<form action="/admin/login" method="post"> <form action="/admin/login" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" /> <input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<label for="token">Admin Key</label> <label for="token">Admin Key</label>
+147
View File
@@ -0,0 +1,147 @@
<%- include("partials/shared_header", { title: "View User - OAI Reverse Proxy Admin" }) %>
<h1>View User</h1>
<table class="striped">
<thead>
<tr>
<th scope="col">Key</th>
<th scope="col" colspan="2">Value</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">Token</th>
<td colspan="2"><%- user.token %></td>
</tr>
<tr>
<th scope="row">Nickname</th>
<td><%- user.nickname ?? "none" %></td>
<td class="actions">
<a title="Edit" id="edit-nickname" href="#" data-field="nickname" data-token="<%= user.token %>">✏️</a>
</td>
</tr>
<tr>
<th scope="row">Type</th>
<td><%- user.type %></td>
<td class="actions">
<a title="Edit" id="edit-type" href="#" data-field="type" data-token="<%= user.token %>">✏️</a>
</td>
</tr>
<tr>
<th scope="row">Prompts</th>
<td colspan="2"><%- user.promptCount %></td>
</tr>
<tr>
<th scope="row">Created At</th>
<td colspan="2"><%- user.createdAt %></td>
</tr>
<tr>
<th scope="row">Last Used At</th>
<td colspan="2"><%- user.lastUsedAt || "never" %></td>
</tr>
<tr>
<th scope="row">Disabled At</th>
<td><%- user.disabledAt %></td>
<td class="actions">
<% if (user.disabledAt) { %>
<a title="Unban" href="#" class="unban" data-token="<%= user.token %>">🔄️</a>
<% } else { %>
<a title="Ban" href="#" class="ban" data-token="<%= user.token %>">🚫</a>
<% } %>
</td>
</tr>
<tr>
<th scope="row">Disabled Reason</th>
<td><%- user.disabledReason %></td>
<% if (user.disabledAt) { %>
<td class="actions">
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason"
data-token="<%= user.token %>">✏️</a>
</td>
<% } %>
</tr>
<tr>
<th scope="row">IP Address Limit</th>
<td><%- (user.maxIps ?? maxIps) || "Unlimited" %></td>
<td class="actions">
<a title="Edit" id="edit-maxIps" href="#" data-field="maxIps" data-token="<%= user.token %>">✏️</a>
</td>
</tr>
<tr>
<th scope="row">IPs</th>
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: false }) %></td>
</tr>
<tr>
<th scope="row">Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
</th>
<td><%- user.adminNote ?? "none" %></td>
<td class="actions">
<a title="Edit" id="edit-adminNote" href="#" data-field="adminNote" data-token="<%= user.token %>">✏️</a>
</td>
</tr>
<% if (user.type === "temporary") { %>
<tr>
<th scope="row">Expires At</th>
<td colspan="2"><%- user.expiresAt %></td>
</tr>
<% } %>
</tbody>
</table>
<form style="display:none" id="current-values">
<input type="hidden" name="token" value="<%- user.token %>" />
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
<% }); %>
</form>
<h3>Quota Information</h3>
<% if (quotasEnabled) { %>
<form action="/admin/manage/refresh-user-quota" method="POST">
<input type="hidden" name="token" value="<%- user.token %>" />
<input type="hidden" name="_csrf" value="<%- csrfToken %>" />
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
</form>
<% } %> <%- include("partials/shared_quota-info", { quota, user }) %>
<p><a href="/admin/manage/list-users">Back to User List</a></p>
<script>
document.querySelectorAll("td.actions a[data-field]").forEach(function (a) {
a.addEventListener("click", function (e) {
e.preventDefault();
const token = a.dataset.token;
const field = a.dataset.field;
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
let value = prompt(`Enter new value for '${field}'':`, existingValue);
if (value !== null) {
if (value === "") {
value = null;
}
fetch(`/admin/manage/edit-user/${token}`, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify({
[field]: value,
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
}),
headers: { "Content-Type": "application/json", Accept: "application/json" },
})
.then((res) => Promise.all([res.ok, res.json()]))
.then(([ok, json]) => {
const url = new URL(window.location.href);
const params = new URLSearchParams();
if (!ok) {
params.set("flash", `error: ${json.error.message}`);
} else {
params.set("flash", `success: User's ${field} updated.`);
}
url.search = params.toString();
window.location.assign(url);
});
}
});
});
</script>
<%- include("partials/admin-ban-xhr-script") %> <%- include("partials/admin-footer") %>
@@ -0,0 +1,32 @@
<script>
document.querySelectorAll("td.actions a.ban").forEach(function (a) {
a.addEventListener("click", function (e) {
e.preventDefault();
var token = a.getAttribute("data-token");
if (confirm("Are you sure you want to ban this user?")) {
let reason = prompt("Reason for ban:");
fetch("/admin/manage/disable-user/" + token, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify({ reason, _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" },
}).then(() => window.location.reload());
}
});
});
document.querySelectorAll("td.actions a.unban").forEach(function (a) {
a.addEventListener("click", function (e) {
e.preventDefault();
var token = a.getAttribute("data-token");
if (confirm("Are you sure you want to unban this user?")) {
fetch("/admin/manage/reactivate-user/" + token, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify({ _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" },
}).then(() => window.location.reload());
}
});
});
</script>
@@ -0,0 +1,15 @@
<hr />
<footer>
<a href="/admin">Index</a> | <a href="/admin/logout">Logout</a>
</footer>
<script>
document.querySelectorAll("td,time").forEach(function(td) {
if (td.innerText.match(/^\d{13}$/)) {
if (td.innerText == 0) return 'never';
var date = new Date(parseInt(td.innerText));
td.innerText = date.toISOString().replace("T", " ").replace(/\.\d+Z$/, "Z");
}
});
</script>
</body>
</html>
+338 -112
View File
@@ -1,165 +1,323 @@
import dotenv from "dotenv"; import dotenv from "dotenv";
import type firebase from "firebase-admin"; import type firebase from "firebase-admin";
import path from "path";
import pino from "pino"; import pino from "pino";
import type { ModelFamily } from "./shared/models";
import { MODEL_FAMILIES } from "./shared/models";
dotenv.config(); dotenv.config();
// Can't import the usual logger here because it itself needs the config.
const startupLogger = pino({ level: "debug" }).child({ module: "startup" }); const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
const isDev = process.env.NODE_ENV !== "production"; const isDev = process.env.NODE_ENV !== "production";
type PromptLoggingBackend = "google_sheets"; export const DATA_DIR = path.join(__dirname, "..", "data");
export const USER_ASSETS_DIR = path.join(DATA_DIR, "user-files");
export type Config = { type Config = {
/** The port the proxy server will listen on. */ /** The port the proxy server will listen on. */
port: number; port: number;
/** The network interface the proxy server will listen on. */
bindAddress: string;
/** Comma-delimited list of OpenAI API keys. */ /** Comma-delimited list of OpenAI API keys. */
openaiKey?: string; openaiKey?: string;
/** Comma-delimited list of Anthropic API keys. */ /** Comma-delimited list of Anthropic API keys. */
anthropicKey?: string; anthropicKey?: string;
/**
* Comma-delimited list of Google AI API keys. Note that these are not the
* same as the GCP keys/credentials used for Vertex AI; the models are the
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/**
* Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region.
*
* The credentials must have access to the actions `bedrock:InvokeModel` and
* `bedrock:InvokeModelWithResponseStream`. You must also have already
* provisioned the necessary models in your AWS account, on the specific
* regions specified for each credential. Models are region-specific.
*
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/
awsCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
* API key.
*
* The resource name is the subdomain in your Azure OpenAI deployment's URL,
* e.g. `https://resource-name.openai.azure.com
*
* @example `AZURE_CREDENTIALS=resource_name_1:deployment_id_1:api_key_1,resource_name_2:deployment_id_2:api_key_2`
*/
azureCredentials?: string;
/** /**
* The proxy key to require for requests. Only applicable if the user * The proxy key to require for requests. Only applicable if the user
* management mode is set to 'proxy_key', and required if so. * management mode is set to 'proxy_key', and required if so.
**/ */
proxyKey?: string; proxyKey?: string;
/** /**
* The admin key used to access the /admin API. Required if the user * The admin key used to access the /admin API or UI. Required if the user
* management mode is set to 'user_token'. * management mode is set to 'user_token'.
**/ */
adminKey?: string; adminKey?: string;
/**
* The password required to view the service info/status page. If not set, the
* info page will be publicly accessible.
*/
serviceInfoPassword?: string;
/** /**
* Which user management mode to use. * Which user management mode to use.
* * - `none`: No user management. Proxy is open to all requests with basic
* `none`: No user management. Proxy is open to all requests with basic
* abuse protection. * abuse protection.
* * - `proxy_key`: A specific proxy key must be provided in the Authorization
* `proxy_key`: A specific proxy key must be provided in the Authorization
* header to use the proxy. * header to use the proxy.
* * - `user_token`: Users must be created via by admins and provide their
* `user_token`: Users must be created via the /admin REST API and provide * personal access token in the Authorization header to use the proxy.
* their personal access token in the Authorization header to use the proxy. * Configure this function and add users via the admin API or UI.
* Configure this function and add users via the /admin API.
*/ */
gatekeeper: "none" | "proxy_key" | "user_token"; gatekeeper: "none" | "proxy_key" | "user_token";
/** /**
* Persistence layer to use for user management. * Persistence layer to use for user management.
* * - `memory`: Users are stored in memory and are lost on restart (default)
* `memory`: Users are stored in memory and are lost on restart (default) * - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
* * requires `firebaseKey` and `firebaseRtdbUrl` to be set.
* `firebase_rtdb`: Users are stored in a Firebase Realtime Database; requires */
* `firebaseKey` and `firebaseRtdbUrl` to be set. (deprecated) gatekeeperStore: "memory" | "firebase_rtdb";
*
* `huggingface_datasets`: Users are stored in a Huggingface Datasets git
* repository; requires `hfDatasetRepoUrl` and `hfPrivateSshKey` to be set.
**/
gatekeeperStore: "memory" | "firebase_rtdb" | "huggingface_datasets";
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */ /** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
firebaseRtdbUrl?: string; firebaseRtdbUrl?: string;
/** Base64-encoded Firebase service account key if using the Firebase RTDB store. */
firebaseKey?: string;
/** URL of the Huggingface Datasets git repository if using the Huggingface
* Datasets store. */
hfDatasetRepoUrl?: string;
/** Private SSH key used to push to the Huggingface Dataset repository. */
hfPrivateSshKey?: string;
/** /**
* Maximum number of IPs per user, after which their token is disabled. * Base64-encoded Firebase service account key if using the Firebase RTDB
* store. Note that you should encode the *entire* JSON key file, not just the
* `private_key` field inside it.
*/
firebaseKey?: string;
/**
* Maximum number of IPs allowed per user token.
* Users with the manually-assigned `special` role are exempt from this limit. * Users with the manually-assigned `special` role are exempt from this limit.
* By default, this is 0, meaning that users are not IP-limited. * - Defaults to 0, which means that users are not IP-limited.
*/ */
maxIpsPerUser: number; maxIpsPerUser: number;
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */ /**
modelRateLimit: number; * Whether a user token should be automatically disabled if it exceeds the
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
*/
maxIpsAutoBan: boolean;
/** Per-IP limit for requests per minute to text and chat models. */
textModelRateLimit: number;
/** Per-IP limit for requests per minute to image generation models. */
imageModelRateLimit: number;
/** /**
* For OpenAI, the maximum number of context tokens (prompt + max output) a * For OpenAI, the maximum number of context tokens (prompt + max output) a
* user can request before their request is rejected. * user can request before their request is rejected.
* Context limits can help prevent excessive spend. * Context limits can help prevent excessive spend.
* Defaults to 0, which means no limit beyond OpenAI's stated maximums. * - Defaults to 0, which means no limit beyond OpenAI's stated maximums.
*/ */
maxContextTokensOpenAI: number; maxContextTokensOpenAI: number;
/** /**
* For Anthropic, the maximum number of context tokens a user can request. * For Anthropic, the maximum number of context tokens a user can request.
* Claude context limits can prevent requests from tying up concurrency slots * Claude context limits can prevent requests from tying up concurrency slots
* for too long, which can lengthen queue times for other users. * for too long, which can lengthen queue times for other users.
* Defaults to 0, which means no limit beyond Anthropic's stated maximums. * - Defaults to 0, which means no limit beyond Anthropic's stated maximums.
*/ */
maxContextTokensAnthropic: number; maxContextTokensAnthropic: number;
/** For OpenAI, the maximum number of sampled tokens a user can request. */ /** For OpenAI, the maximum number of sampled tokens a user can request. */
maxOutputTokensOpenAI: number; maxOutputTokensOpenAI: number;
/** For Anthropic, the maximum number of sampled tokens a user can request. */ /** For Anthropic, the maximum number of sampled tokens a user can request. */
maxOutputTokensAnthropic: number; maxOutputTokensAnthropic: number;
/** Whether requests containing disallowed characters should be rejected. */ /** Whether requests containing the following phrases should be rejected. */
rejectDisallowed?: boolean; rejectPhrases: string[];
/** Message to return when rejecting requests. */ /** Message to return when rejecting requests. */
rejectMessage?: string; rejectMessage: string;
/** Pino log level. */ /** Verbosity level of diagnostic logging. */
logLevel?: "debug" | "info" | "warn" | "error"; logLevel: "trace" | "debug" | "info" | "warn" | "error";
/**
* Whether to allow the usage of AWS credentials which could be logging users'
* model invocations. By default, such keys are treated as if they were
* disabled because users may not be aware that their usage is being logged.
*
* Some credentials do not have the policy attached that allows the proxy to
* confirm logging status, in which case the proxy assumes that logging could
* be enabled and will refuse to use the key. If you still want to use such a
* key and can't attach the policy, you can set this to true.
*/
allowAwsLogging?: boolean;
/** Whether prompts and responses should be logged to persistent storage. */ /** Whether prompts and responses should be logged to persistent storage. */
promptLogging?: boolean; promptLogging?: boolean;
/** Which prompt logging backend to use. */ /** Which prompt logging backend to use. */
promptLoggingBackend?: PromptLoggingBackend; promptLoggingBackend?: "google_sheets";
/** Base64-encoded Google Sheets API key. */ /** Base64-encoded Google Sheets API key. */
googleSheetsKey?: string; googleSheetsKey?: string;
/** Google Sheets spreadsheet ID. */ /** Google Sheets spreadsheet ID. */
googleSheetsSpreadsheetId?: string; googleSheetsSpreadsheetId?: string;
/** Whether to periodically check keys for usage and validity. */ /** Whether to periodically check keys for usage and validity. */
checkKeys?: boolean; checkKeys: boolean;
/** Whether to publicly show total token costs on the info page. */
showTokenCosts: boolean;
/** /**
* Comma-separated list of origins to block. Requests matching any of these * Comma-separated list of origins to block. Requests matching any of these
* origins or referers will be rejected. * origins or referers will be rejected.
* Partial matches are allowed, so `reddit` will match `www.reddit.com`. * - Partial matches are allowed, so `reddit` will match `www.reddit.com`.
* Include only the hostname, not the protocol or path, e.g: * - Include only the hostname, not the protocol or path, e.g:
* `reddit.com,9gag.com,gaiaonline.com` * `reddit.com,9gag.com,gaiaonline.com`
*/ */
blockedOrigins?: string; blockedOrigins?: string;
/** /** Message to return when rejecting requests from blocked origins. */
* Message to return when rejecting requests from blocked origins.
*/
blockMessage?: string; blockMessage?: string;
/** /** Destination URL to redirect blocked requests to, for non-JSON requests. */
* Desination URL to redirect blocked requests to, for non-JSON requests.
*/
blockRedirect?: string; blockRedirect?: string;
/** Which model families to allow requests for. Applies only to OpenAI. */
allowedModelFamilies: ModelFamily[];
/** /**
* Whether the proxy should disallow requests for GPT-4 models in order to * The number of (LLM) tokens a user can consume before requests are rejected.
* prevent excessive spend. Applies only to OpenAI. * Limits include both prompt and response tokens. `special` users are exempt.
* - Defaults to 0, which means no limit.
* - Changes are not automatically applied to existing users. Use the
* admin API or UI to update existing users, or use the QUOTA_REFRESH_PERIOD
* setting to periodically set all users' quotas to these values.
*/ */
turboOnly?: boolean; tokenQuota: { [key in ModelFamily]: number };
/**
* The period over which to enforce token quotas. Quotas will be fully reset
* at the start of each period, server time. Unused quota does not roll over.
* You can also provide a cron expression for a custom schedule. If not set,
* quotas will never automatically refresh.
* - Defaults to unset, which means quotas will never automatically refresh.
*/
quotaRefreshPeriod?: "hourly" | "daily" | string;
/** Whether to allow users to change their own nicknames via the UI. */
allowNicknameChanges: boolean;
/** Whether to show recent DALL-E image generations on the homepage. */
showRecentImages: boolean;
/**
* If true, cookies will be set without the `Secure` attribute, allowing
* the admin UI to used over HTTP.
*/
useInsecureCookies: boolean;
/**
* Whether to use a more minimal public Service Info page with static content.
* Disables all stats pertaining to traffic, prompt/token usage, and queues.
* The full info page will appear if you have signed in as an admin using the
* configured ADMIN_KEY and go to /admin/service-info.
**/
staticServiceInfo?: boolean;
/**
* Trusted proxy hops. If you are deploying the server behind a reverse proxy
* (Nginx, Cloudflare Tunnel, AWS WAF, etc.) the IP address of incoming
* requests will be the IP address of the proxy, not the actual user.
*
* Depending on your hosting configuration, there may be multiple proxies/load
* balancers between your server and the user. Each one will append the
* incoming IP address to the `X-Forwarded-For` header. The user's real IP
* address will be the first one in the list, assuming the header has not been
* tampered with. Setting this value correctly ensures that the server doesn't
* trust values in `X-Forwarded-For` not added by trusted proxies.
*
* In order for the server to determine the user's real IP address, you need
* to tell it how many proxies are between the user and the server so it can
* select the correct IP address from the `X-Forwarded-For` header.
*
* *WARNING:* If you set it incorrectly, the proxy will either record the
* wrong IP address, or it will be possible for users to spoof their IP
* addresses and bypass rate limiting. Check the request logs to see what
* incoming X-Forwarded-For values look like.
*
* Examples:
* - X-Forwarded-For: "34.1.1.1, 172.1.1.1, 10.1.1.1" => trustedProxies: 3
* - X-Forwarded-For: "34.1.1.1" => trustedProxies: 1
* - no X-Forwarded-For header => trustedProxies: 0 (the actual IP of the incoming request will be used)
*
* As of 2024/01/08:
* For HuggingFace or Cloudflare Tunnel, use 1.
* For Render, use 3.
* For deployments not behind a load balancer, use 0.
*
* You should double check against your actual request logs to be sure.
*
* Defaults to 1, as most deployments are on HuggingFace or Cloudflare Tunnel.
*/
trustedProxies?: number;
/**
* Whether to allow OpenAI tool usage. The proxy doesn't impelment any
* support for tools/function calling but can pass requests and responses as
* is. Note that the proxy also cannot accurately track quota usage for
* requests involving tools, so you must opt in to this feature at your own
* risk.
*/
allowOpenAIToolUsage?: boolean;
/**
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
* A leading slash is required.
*/
proxyEndpointRoute: string;
}; };
// To change configs, create a file called .env in the root directory. // To change configs, create a file called .env in the root directory.
// See .env.example for an example. // See .env.example for an example.
export const config: Config = { export const config: Config = {
port: getEnvWithDefault("PORT", 7860), port: getEnvWithDefault("PORT", 7860),
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""), openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""), anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""), proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""), adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"), gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"), gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0), maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", true),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined), firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined), firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
hfDatasetRepoUrl: getEnvWithDefault("HF_DATASET_REPO_URL", undefined), textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
hfPrivateSshKey: getEnvWithDefault("HF_PRIVATE_SSH_KEY", undefined), imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4), maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 0),
maxContextTokensAnthropic: getEnvWithDefault( maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC", "MAX_CONTEXT_TOKENS_ANTHROPIC",
0 0
), ),
maxOutputTokensOpenAI: getEnvWithDefault("MAX_OUTPUT_TOKENS_OPENAI", 300), maxOutputTokensOpenAI: getEnvWithDefault(
maxOutputTokensAnthropic: getEnvWithDefault( ["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
"MAX_OUTPUT_TOKENS_ANTHROPIC",
400 400
), ),
rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false), maxOutputTokensAnthropic: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
400
),
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-turbo",
"azure-gpt4-32k",
]),
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault( rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE", "REJECT_MESSAGE",
"This content violates /aicg/'s acceptable use policy." "This content violates /aicg/'s acceptable use policy."
), ),
logLevel: getEnvWithDefault("LOG_LEVEL", "info"), logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev), checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev),
showTokenCosts: getEnvWithDefault("SHOW_TOKEN_COSTS", false),
allowAwsLogging: getEnvWithDefault("ALLOW_AWS_LOGGING", false),
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false), promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined), promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined), googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
@@ -173,67 +331,76 @@ export const config: Config = {
"You must be over the age of majority in your country to use this service." "You must be over the age of majority in your country to use this service."
), ),
blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"), blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"),
turboOnly: getEnvWithDefault("TURBO_ONLY", false), tokenQuota: MODEL_FAMILIES.reduce(
(acc, family: ModelFamily) => {
acc[family] = getEnvWithDefault(
`TOKEN_QUOTA_${family.toUpperCase().replace(/-/g, "_")}`,
0
) as number;
return acc;
},
{} as { [key in ModelFamily]: number }
),
quotaRefreshPeriod: getEnvWithDefault("QUOTA_REFRESH_PERIOD", undefined),
allowNicknameChanges: getEnvWithDefault("ALLOW_NICKNAME_CHANGES", true),
showRecentImages: getEnvWithDefault("SHOW_RECENT_IMAGES", true),
useInsecureCookies: getEnvWithDefault("USE_INSECURE_COOKIES", isDev),
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
} as const; } as const;
function migrateConfigs() { function generateCookieSecret() {
let migrated = false; if (process.env.COOKIE_SECRET !== undefined) {
const deprecatedMax = process.env.MAX_OUTPUT_TOKENS; return process.env.COOKIE_SECRET;
if (!process.env.MAX_OUTPUT_TOKENS_OPENAI && deprecatedMax) {
migrated = true;
config.maxOutputTokensOpenAI = parseInt(deprecatedMax);
}
if (!process.env.MAX_OUTPUT_TOKENS_ANTHROPIC && deprecatedMax) {
migrated = true;
config.maxOutputTokensAnthropic = parseInt(deprecatedMax);
} }
if (migrated) { const seed = "" + config.adminKey + config.openaiKey + config.anthropicKey;
startupLogger.warn( const crypto = require("crypto");
{ return crypto.createHash("sha256").update(seed).digest("hex");
MAX_OUTPUT_TOKENS: deprecatedMax,
MAX_OUTPUT_TOKENS_OPENAI: config.maxOutputTokensOpenAI,
MAX_OUTPUT_TOKENS_ANTHROPIC: config.maxOutputTokensAnthropic,
},
"`MAX_OUTPUT_TOKENS` has been replaced with separate `MAX_OUTPUT_TOKENS_OPENAI` and `MAX_OUTPUT_TOKENS_ANTHROPIC` configs. You should update your .env file to remove `MAX_OUTPUT_TOKENS` and set the new configs."
);
}
} }
/** Prevents the server from starting if config state is invalid. */ export const COOKIE_SECRET = generateCookieSecret();
export async function assertConfigIsValid() {
migrateConfigs(); export async function assertConfigIsValid() {
if (process.env.MODEL_RATE_LIMIT !== undefined) {
const limit =
parseInt(process.env.MODEL_RATE_LIMIT, 10) || config.textModelRateLimit;
config.textModelRateLimit = limit;
config.imageModelRateLimit = Math.max(Math.floor(limit / 2), 1);
startupLogger.warn(
{ textLimit: limit, imageLimit: config.imageModelRateLimit },
"MODEL_RATE_LIMIT is deprecated. Use TEXT_MODEL_RATE_LIMIT and IMAGE_MODEL_RATE_LIMIT instead."
);
}
// Ensure gatekeeper mode is valid.
if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) { if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) {
throw new Error( throw new Error(
`Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.` `Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.`
); );
} }
// Don't allow `user_token` mode without `ADMIN_KEY`.
if (config.gatekeeper === "user_token" && !config.adminKey) { if (config.gatekeeper === "user_token" && !config.adminKey) {
throw new Error( throw new Error(
"`user_token` gatekeeper mode requires an `ADMIN_KEY` to be set." "`user_token` gatekeeper mode requires an `ADMIN_KEY` to be set."
); );
} }
// Don't allow `proxy_key` mode without `PROXY_KEY`.
if (config.gatekeeper === "proxy_key" && !config.proxyKey) { if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
throw new Error( throw new Error(
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set." "`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
); );
} }
// Don't allow `PROXY_KEY` to be set for other modes.
if (config.gatekeeper !== "proxy_key" && config.proxyKey) { if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
throw new Error( throw new Error(
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`." "`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
); );
} }
// Require appropriate firebase config if using firebase store.
if ( if (
config.gatekeeperStore === "firebase_rtdb" && config.gatekeeperStore === "firebase_rtdb" &&
(!config.firebaseKey || !config.firebaseRtdbUrl) (!config.firebaseKey || !config.firebaseRtdbUrl)
@@ -247,7 +414,8 @@ export async function assertConfigIsValid() {
// them to users. // them to users.
for (const key of getKeys(config)) { for (const key of getKeys(config)) {
const maybeSensitive = ["key", "credentials", "secret", "password"].some( const maybeSensitive = ["key", "credentials", "secret", "password"].some(
(sensitive) => key.toLowerCase().includes(sensitive) (sensitive) =>
key.toLowerCase().includes(sensitive) && !["checkKeys"].includes(key)
); );
const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]); const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]);
if (maybeSensitive && !secured.has(key)) if (maybeSensitive && !secured.has(key))
@@ -269,59 +437,109 @@ export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
* Config keys that are not displayed on the info page at all, generally because * Config keys that are not displayed on the info page at all, generally because
* they are not relevant to the user or can be inferred from other config. * they are not relevant to the user or can be inferred from other config.
*/ */
export const OMITTED_KEYS: (keyof Config)[] = [ export const OMITTED_KEYS = [
"port", "port",
"bindAddress",
"logLevel", "logLevel",
"openaiKey", "openaiKey",
"anthropicKey", "anthropicKey",
"googleAIKey",
"mistralAIKey",
"awsCredentials",
"azureCredentials",
"proxyKey", "proxyKey",
"adminKey", "adminKey",
"checkKeys", "serviceInfoPassword",
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
"googleSheetsKey", "googleSheetsKey",
"firebaseKey", "firebaseKey",
"firebaseRtdbUrl", "firebaseRtdbUrl",
"hfDatasetRepoUrl",
"hfPrivateSshKey",
"gatekeeperStore", "gatekeeperStore",
"maxIpsPerUser", "maxIpsPerUser",
"blockedOrigins", "blockedOrigins",
"blockMessage", "blockMessage",
"blockRedirect", "blockRedirect",
]; "allowNicknameChanges",
"showRecentImages",
"useInsecureCookies",
"staticServiceInfo",
"checkKeys",
"allowedModelFamilies",
"trustedProxies",
"proxyEndpointRoute",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
type Printable<T> = {
[P in keyof T as Exclude<P, OmitKeys>]: T[P] extends object
? Printable<T[P]>
: string;
};
type PublicConfig = Printable<Config>;
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>; const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
export function listConfig(): Record<string, string> { export function listConfig(obj: Config = config) {
const result: Record<string, string> = {}; const result: Record<string, unknown> = {};
for (const key of getKeys(config)) { for (const key of getKeys(obj)) {
const value = config[key]?.toString() || ""; const value = obj[key]?.toString() || "";
const shouldOmit =
OMITTED_KEYS.includes(key) || value === "" || value === "undefined";
const shouldMask = SENSITIVE_KEYS.includes(key); const shouldMask = SENSITIVE_KEYS.includes(key);
const shouldOmit =
OMITTED_KEYS.includes(key as OmitKeys) ||
value === "" ||
value === "undefined";
if (shouldOmit) { if (shouldOmit) {
continue; continue;
} }
const validKey = key as keyof Printable<Config>;
if (value && shouldMask) { if (value && shouldMask) {
result[key] = "********"; result[validKey] = "********";
} else { } else {
result[key] = value; result[validKey] = value;
}
if (typeof obj[key] === "object" && !Array.isArray(obj[key])) {
result[key] = listConfig(obj[key] as unknown as Config);
} }
} }
return result; return result as PublicConfig;
} }
function getEnvWithDefault<T>(name: string, defaultValue: T): T { /**
const value = process.env[name]; * Tries to get a config value from one or more environment variables (in
* order), falling back to a default value if none are set.
*/
function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
const value = Array.isArray(env)
? env.map((e) => process.env[e]).find((v) => v !== undefined)
: process.env[env];
if (value === undefined) { if (value === undefined) {
return defaultValue; return defaultValue;
} }
try { try {
if (name === "OPENAI_KEY" || name === "ANTHROPIC_KEY") { if (
[
"OPENAI_KEY",
"ANTHROPIC_KEY",
"GOOGLE_AI_KEY",
"AWS_CREDENTIALS",
"AZURE_CREDENTIALS",
].includes(String(env))
) {
return value as unknown as T; return value as unknown as T;
} }
// Intended to be used for comma-delimited lists
if (Array.isArray(defaultValue)) {
return value.split(",").map((v) => v.trim()) as T;
}
return JSON.parse(value) as T; return JSON.parse(value) as T;
} catch (err) { } catch (err) {
return value as unknown as T; return value as unknown as T;
@@ -353,3 +571,11 @@ export function getFirebaseApp(): firebase.app.App {
} }
return firebaseApp; return firebaseApp;
} }
function parseCsv(val: string): string[] {
if (!val) return [];
const regex = /(".*?"|[^",]+)(?=\s*,|\s*$)/g;
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
+177 -164
View File
@@ -1,219 +1,149 @@
/** This whole module kinda sucks */
import fs from "fs"; import fs from "fs";
import { Request, Response } from "express"; import express, { Router, Request, Response } from "express";
import showdown from "showdown"; import showdown from "showdown";
import { config, listConfig } from "./config"; import { config } from "./config";
import { OpenAIKey, keyPool } from "./key-management"; import { buildInfo, ServiceInfo } from "./service-info";
import { getUniqueIps } from "./proxy/rate-limit"; import { getLastNImages } from "./shared/file-storage/image-history";
import { import { keyPool } from "./shared/key-management";
QueuePartition, import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
getEstimatedWaitTime, import { withSession } from "./shared/with-session";
getQueueLength, import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
} from "./proxy/queue";
const INFO_PAGE_TTL = 5000; const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
turbo: "GPT-3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
"dall-e": "DALL-E",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-pro": "Gemini Pro",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-dall-e": "Azure DALL-E",
};
const converter = new showdown.Converter();
const customGreeting = fs.existsSync("greeting.md")
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
: "";
let infoPageHtml: string | undefined; let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0; let infoPageLastUpdated = 0;
export const handleInfoPage = (req: Request, res: Response) => { export const handleInfoPage = (req: Request, res: Response) => {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) { if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
res.send(infoPageHtml); return res.send(infoPageHtml);
return;
} }
// Sometimes huggingface doesn't send the host header and makes us guess.
const baseUrl = const baseUrl =
process.env.SPACE_ID && !req.get("host")?.includes("hf.space") process.env.SPACE_ID && !req.get("host")?.includes("hf.space")
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID) ? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
: req.protocol + "://" + req.get("host"); : req.protocol + "://" + req.get("host");
res.send(cacheInfoPageHtml(baseUrl)); const info = buildInfo(baseUrl + config.proxyEndpointRoute);
infoPageHtml = renderPage(info);
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
}; };
function cacheInfoPageHtml(baseUrl: string) { export function renderPage(info: ServiceInfo) {
const keys = keyPool.list();
const openaiKeys = keys.filter((k) => k.service === "openai").length;
const anthropicKeys = keys.filter((k) => k.service === "anthropic").length;
const info = {
uptime: process.uptime(),
endpoints: {
...(openaiKeys ? { openai: baseUrl + "/proxy/openai" } : {}),
...(anthropicKeys ? { anthropic: baseUrl + "/proxy/anthropic" } : {}),
},
proompts: keys.reduce((acc, k) => acc + k.promptCount, 0),
...(config.modelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
openaiKeys,
anthropicKeys,
...(openaiKeys ? getOpenAIInfo() : {}),
...(anthropicKeys ? getAnthropicInfo() : {}),
config: listConfig(),
build: process.env.BUILD_INFO || "dev",
};
const title = getServerTitle(); const title = getServerTitle();
const headerHtml = buildInfoPageHeader(new showdown.Converter(), title); const headerHtml = buildInfoPageHeader(info);
const pageBody = `<!DOCTYPE html> return `<!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<meta name="robots" content="noindex" /> <meta name="robots" content="noindex" />
<title>${title}</title> <title>${title}</title>
<style>
body {
font-family: sans-serif;
background-color: #f0f0f0;
padding: 1em;
}
@media (prefers-color-scheme: dark) {
body {
background-color: #222;
color: #eee;
}
a:link, a:visited {
color: #bbe;
}
}
</style>
</head> </head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;"> <body>
${headerHtml} ${headerHtml}
<hr /> <hr />
<h2>Service Info</h2> <h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre> <pre>${JSON.stringify(info, null, 2)}</pre>
${getSelfServiceLinks()}
</body> </body>
</html>`; </html>`;
infoPageHtml = pageBody;
infoPageLastUpdated = Date.now();
return pageBody;
}
type ServiceInfo = {
activeKeys: number;
trialKeys?: number;
// activeLimit: string;
revokedKeys?: number;
overQuotaKeys?: number;
proomptersInQueue: number;
estimatedQueueTime: string;
};
// this has long since outgrown this awful "dump everything in a <pre> tag" approach
// but I really don't want to spend time on a proper UI for this right now
function getOpenAIInfo() {
const info: { [model: string]: Partial<ServiceInfo> } = {};
const keys = keyPool
.list()
.filter((k) => k.service === "openai") as OpenAIKey[];
const hasGpt4 = keys.some((k) => k.isGpt4) && !config.turboOnly;
if (keyPool.anyUnchecked()) {
const uncheckedKeys = keys.filter((k) => !k.lastChecked);
info.status =
`Performing startup key checks (${uncheckedKeys.length} left).` as any;
} else {
delete info.status;
}
if (config.checkKeys) {
const turboKeys = keys.filter((k) => !k.isGpt4);
const gpt4Keys = keys.filter((k) => k.isGpt4);
info.turbo = {
activeKeys: turboKeys.filter((k) => !k.isDisabled).length,
trialKeys: turboKeys.filter((k) => k.isTrial).length,
revokedKeys: turboKeys.filter((k) => k.isRevoked).length,
overQuotaKeys: turboKeys.filter((k) => k.isOverQuota).length,
};
if (hasGpt4) {
info.gpt4 = {
activeKeys: gpt4Keys.filter((k) => !k.isDisabled).length,
trialKeys: gpt4Keys.filter((k) => k.isTrial).length,
revokedKeys: gpt4Keys.filter((k) => k.isRevoked).length,
overQuotaKeys: gpt4Keys.filter((k) => k.isOverQuota).length,
};
}
} else {
info.status = "Key checking is disabled." as any;
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
info.gpt4 = {
activeKeys: keys.filter((k) => !k.isDisabled && k.isGpt4).length,
};
}
const turboQueue = getQueueInformation("turbo");
info.turbo.proomptersInQueue = turboQueue.proomptersInQueue;
info.turbo.estimatedQueueTime = turboQueue.estimatedQueueTime;
if (hasGpt4) {
const gpt4Queue = getQueueInformation("gpt-4");
info.gpt4.proomptersInQueue = gpt4Queue.proomptersInQueue;
info.gpt4.estimatedQueueTime = gpt4Queue.estimatedQueueTime;
}
return info;
}
function getAnthropicInfo() {
const claudeInfo: Partial<ServiceInfo> = {};
const keys = keyPool.list().filter((k) => k.service === "anthropic");
claudeInfo.activeKeys = keys.filter((k) => !k.isDisabled).length;
const queue = getQueueInformation("claude");
claudeInfo.proomptersInQueue = queue.proomptersInQueue;
claudeInfo.estimatedQueueTime = queue.estimatedQueueTime;
return { claude: claudeInfo };
} }
/** /**
* If the server operator provides a `greeting.md` file, it will be included in * If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page. * the rendered info page.
**/ **/
function buildInfoPageHeader(converter: showdown.Converter, title: string) { function buildInfoPageHeader(info: ServiceInfo) {
const customGreeting = fs.existsSync("greeting.md") const title = getServerTitle();
? fs.readFileSync("greeting.md", "utf8")
: null;
// TODO: use some templating engine instead of this mess // TODO: use some templating engine instead of this mess
let infoBody = `# ${title}`;
let infoBody = `<!-- Header for Showdown's parser, don't remove this line -->
# ${title}`;
if (config.promptLogging) { if (config.promptLogging) {
infoBody += `\n## Prompt logging is enabled! infoBody += `\n## Prompt Logging Enabled
The server operator has enabled prompt logging. The prompts you send to this proxy and the AI responses you receive may be saved. This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
Logs are anonymous and do not contain IP addresses or timestamps. [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/prompt-logging/index.ts). [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/shared/prompt-logging/index.ts).
**If you are uncomfortable with this, don't send prompts to this proxy!**`; **If you are uncomfortable with this, don't send prompts to this proxy!**`;
} }
if (config.staticServiceInfo) {
return converter.makeHtml(infoBody + customGreeting);
}
const waits: string[] = []; const waits: string[] = [];
infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
if (config.openaiKey) { for (const modelFamily of config.allowedModelFamilies) {
const turboWait = getQueueInformation("turbo").estimatedQueueTime; const service = MODEL_FAMILY_SERVICE[modelFamily];
const gpt4Wait = getQueueInformation("gpt-4").estimatedQueueTime;
waits.push(`**Turbo:** ${turboWait}`); const hasKeys = keyPool.list().some((k) => {
if (keyPool.list().some((k) => k.isGpt4) && !config.turboOnly) { return k.service === service && k.modelFamilies.includes(modelFamily);
waits.push(`**GPT-4:** ${gpt4Wait}`); });
const wait = info[modelFamily]?.estimatedQueueTime;
if (hasKeys && wait) {
waits.push(
`**${MODEL_FAMILY_FRIENDLY_NAME[modelFamily] || modelFamily}**: ${wait}`
);
} }
} }
if (config.anthropicKey) {
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
waits.push(`**Claude:** ${claudeWait}`);
}
infoBody += "\n\n" + waits.join(" / "); infoBody += "\n\n" + waits.join(" / ");
if (customGreeting) { infoBody += customGreeting;
infoBody += `\n## Server Greeting\n
${customGreeting}`; infoBody += buildRecentImageSection();
}
return converter.makeHtml(infoBody); return converter.makeHtml(infoBody);
} }
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */ function getSelfServiceLinks() {
function getQueueInformation(partition: QueuePartition) { if (config.gatekeeper !== "user_token") return "";
const waitMs = getEstimatedWaitTime(partition); return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
const waitTime =
waitMs < 60000
? `${Math.round(waitMs / 1000)}sec`
: `${Math.round(waitMs / 60000)}min, ${Math.round(
(waitMs % 60000) / 1000
)}sec`;
return {
proomptersInQueue: getQueueLength(partition),
estimatedQueueTime: waitMs > 2000 ? waitTime : "no wait",
};
} }
function getServerTitle() { function getServerTitle() {
@@ -235,9 +165,46 @@ function getServerTitle() {
return "OAI Reverse Proxy"; return "OAI Reverse Proxy";
} }
function buildRecentImageSection() {
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
if (
!config.showRecentImages ||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return "";
}
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
}
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`
return html;
}
function escapeHtml(unsafe: string) {
return unsafe
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) { function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
// Huggingface broke their amazon elb config and no longer sends the
// x-forwarded-host header. This is a workaround.
try { try {
const [username, spacename] = spaceId.split("/"); const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`; return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
@@ -245,3 +212,49 @@ function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
return ""; return "";
} }
} }
function checkIfUnlocked(
req: Request,
res: Response,
next: express.NextFunction
) {
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
return res.redirect("/unlock-info");
}
next();
}
const infoPageRouter = Router();
if (config.serviceInfoPassword?.length) {
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
infoPageRouter.use(withSession);
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
infoPageRouter.post("/unlock-info", (req, res) => {
if (req.body.password !== config.serviceInfoPassword) {
return res.status(403).send("Incorrect password");
}
req.session!.unlocked = true;
res.redirect("/");
});
infoPageRouter.get("/unlock-info", (_req, res) => {
if (_req.session?.unlocked) return res.redirect("/");
res.send(`
<form method="post" action="/unlock-info">
<h1>Unlock Service Info</h1>
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
<input type="password" name="password" placeholder="Password" />
<button type="submit">Unlock</button>
</form>
`);
});
infoPageRouter.use(checkIfUnlocked);
}
infoPageRouter.get("/", handleInfoPage);
infoPageRouter.get("/status", (req, res) => {
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
});
export { infoPageRouter };
-105
View File
@@ -1,105 +0,0 @@
import type * as http from "http";
import { AnthropicKeyProvider, AnthropicKeyUpdate } from "./anthropic/provider";
import { Key, Model, KeyProvider, AIService } from "./index";
import { OpenAIKeyProvider, OpenAIKeyUpdate } from "./openai/provider";
type AllowedPartial = OpenAIKeyUpdate | AnthropicKeyUpdate;
export class KeyPool {
private keyProviders: KeyProvider[] = [];
constructor() {
this.keyProviders.push(new OpenAIKeyProvider());
this.keyProviders.push(new AnthropicKeyProvider());
}
public init() {
this.keyProviders.forEach((provider) => provider.init());
const availableKeys = this.available("all");
if (availableKeys === 0) {
throw new Error(
"No keys loaded. Ensure either OPENAI_KEY or ANTHROPIC_KEY is set."
);
}
}
public get(model: Model): Key {
const service = this.getService(model);
return this.getKeyProvider(service).get(model);
}
public list(): Omit<Key, "key">[] {
return this.keyProviders.flatMap((provider) => provider.list());
}
public disable(key: Key, reason: "quota" | "revoked"): void {
const service = this.getKeyProvider(key.service);
service.disable(key);
if (service instanceof OpenAIKeyProvider) {
service.update(key.hash, {
isRevoked: reason === "revoked",
isOverQuota: reason === "quota",
});
}
}
public update(key: Key, props: AllowedPartial): void {
const service = this.getKeyProvider(key.service);
service.update(key.hash, props);
}
public available(service: AIService | "all" = "all"): number {
return this.keyProviders.reduce((sum, provider) => {
const includeProvider = service === "all" || service === provider.service;
return sum + (includeProvider ? provider.available() : 0);
}, 0);
}
public anyUnchecked(): boolean {
return this.keyProviders.some((provider) => provider.anyUnchecked());
}
public incrementPrompt(key: Key): void {
const provider = this.getKeyProvider(key.service);
provider.incrementPrompt(key.hash);
}
public getLockoutPeriod(model: Model): number {
const service = this.getService(model);
return this.getKeyProvider(service).getLockoutPeriod(model);
}
public markRateLimited(key: Key): void {
const provider = this.getKeyProvider(key.service);
provider.markRateLimited(key.hash);
}
public updateRateLimits(key: Key, headers: http.IncomingHttpHeaders): void {
const provider = this.getKeyProvider(key.service);
if (provider instanceof OpenAIKeyProvider) {
provider.updateRateLimits(key.hash, headers);
}
}
public activeLimitInUsd(
service: AIService,
options?: Record<string, unknown>
): string {
return this.getKeyProvider(service).activeLimitInUsd(options);
}
private getService(model: Model): AIService {
if (model.startsWith("gpt")) {
// https://platform.openai.com/docs/models/model-endpoint-compatibility
return "openai";
} else if (model.startsWith("claude-")) {
// https://console.anthropic.com/docs/api/reference#parameters
return "anthropic";
}
throw new Error(`Unknown service for model '${model}'`);
}
private getKeyProvider(service: AIService): KeyProvider {
return this.keyProviders.find((provider) => provider.service === service)!;
}
}
-347
View File
@@ -1,347 +0,0 @@
import axios, { AxiosError } from "axios";
import { logger } from "../../logger";
import type { OpenAIKey, OpenAIKeyProvider } from "./provider";
/** Minimum time in between any two key checks. */
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
/**
* Minimum time in between checks for a given key. Because we can no longer
* read quota usage, there is little reason to check a single key more often
* than this.
**/
const KEY_CHECK_PERIOD = 60 * 60 * 1000; // 1 hour
const POST_CHAT_COMPLETIONS_URL = "https://api.openai.com/v1/chat/completions";
const GET_MODELS_URL = "https://api.openai.com/v1/models";
const GET_SUBSCRIPTION_URL =
"https://api.openai.com/dashboard/billing/subscription";
type GetModelsResponse = {
data: [{ id: string }];
};
type GetSubscriptionResponse = {
plan: { title: string };
has_payment_method: boolean;
soft_limit_usd: number;
hard_limit_usd: number;
system_hard_limit_usd: number;
};
type OpenAIError = {
error: { type: string; code: string; param: unknown; message: string };
};
type UpdateFn = typeof OpenAIKeyProvider.prototype.update;
export class OpenAIKeyChecker {
private readonly keys: OpenAIKey[];
private log = logger.child({ module: "key-checker", service: "openai" });
private timeout?: NodeJS.Timeout;
private updateKey: UpdateFn;
private lastCheck = 0;
constructor(keys: OpenAIKey[], updateKey: UpdateFn) {
this.keys = keys;
this.updateKey = updateKey;
}
public start() {
this.log.info("Starting key checker...");
this.scheduleNextCheck();
}
public stop() {
if (this.timeout) {
clearTimeout(this.timeout);
}
}
/**
* Schedules the next check. If there are still keys yet to be checked, it
* will schedule a check immediately for the next unchecked key. Otherwise,
* it will schedule a check for the least recently checked key, respecting
* the minimum check interval.
**/
private scheduleNextCheck() {
const enabledKeys = this.keys.filter((key) => !key.isDisabled);
if (enabledKeys.length === 0) {
this.log.warn("All keys are disabled. Key checker stopping.");
return;
}
// Perform startup checks for any keys that haven't been checked yet.
const uncheckedKeys = enabledKeys.filter((key) => !key.lastChecked);
if (uncheckedKeys.length > 0) {
// Check up to 12 keys at once to speed up startup.
const keysToCheck = uncheckedKeys.slice(0, 12);
this.log.info(
{
key: keysToCheck.map((key) => key.hash),
remaining: uncheckedKeys.length - keysToCheck.length,
},
"Scheduling initial checks for key batch."
);
this.timeout = setTimeout(async () => {
const promises = keysToCheck.map((key) => this.checkKey(key));
try {
await Promise.all(promises);
} catch (error) {
this.log.error({ error }, "Error checking one or more keys.");
}
this.scheduleNextCheck();
}, 250);
return;
}
// Schedule the next check for the oldest key.
const oldestKey = enabledKeys.reduce((oldest, key) =>
key.lastChecked < oldest.lastChecked ? key : oldest
);
// Don't check any individual key too often.
// Don't check anything at all at a rate faster than once per 3 seconds.
const nextCheck = Math.max(
oldestKey.lastChecked + KEY_CHECK_PERIOD,
this.lastCheck + MIN_CHECK_INTERVAL
);
this.log.debug(
{ key: oldestKey.hash, nextCheck: new Date(nextCheck) },
"Scheduling next check."
);
const delay = nextCheck - Date.now();
this.timeout = setTimeout(() => this.checkKey(oldestKey), delay);
}
private async checkKey(key: OpenAIKey) {
// It's possible this key might have been disabled while we were waiting
// for the next check.
if (key.isDisabled) {
this.log.warn({ key: key.hash }, "Skipping check for disabled key.");
this.scheduleNextCheck();
return;
}
this.log.debug({ key: key.hash }, "Checking key...");
let isInitialCheck = !key.lastChecked;
try {
// We only need to check for provisioned models on the initial check.
if (isInitialCheck) {
const [/* subscription,*/ provisionedModels, livenessTest] =
await Promise.all([
// this.getSubscription(key),
this.getProvisionedModels(key),
this.testLiveness(key),
]);
const updates = {
isGpt4: provisionedModels.gpt4,
// softLimit: subscription.soft_limit_usd,
// hardLimit: subscription.hard_limit_usd,
// systemHardLimit: subscription.system_hard_limit_usd,
isTrial: livenessTest.rateLimit <= 250,
softLimit: 0,
hardLimit: 0,
systemHardLimit: 0,
};
this.updateKey(key.hash, updates);
} else {
// Provisioned models don't change, so we don't need to check them again
const [/* subscription, */ _livenessTest] = await Promise.all([
// this.getSubscription(key),
this.testLiveness(key),
]);
const updates = {
// softLimit: subscription.soft_limit_usd,
// hardLimit: subscription.hard_limit_usd,
// systemHardLimit: subscription.system_hard_limit_usd,
softLimit: 0,
hardLimit: 0,
systemHardLimit: 0,
};
this.updateKey(key.hash, updates);
}
this.log.info(
{ key: key.hash, hardLimit: key.hardLimit },
"Key check complete."
);
} catch (error) {
// touch the key so we don't check it again for a while
this.updateKey(key.hash, {});
this.handleAxiosError(key, error as AxiosError);
}
this.lastCheck = Date.now();
// Only enqueue the next check if this wasn't a startup check, since those
// are batched together elsewhere.
if (!isInitialCheck) {
// this.scheduleNextCheck();
}
}
private async getProvisionedModels(
key: OpenAIKey
): Promise<{ turbo: boolean; gpt4: boolean }> {
const opts = { headers: { Authorization: `Bearer ${key.key}` } };
const { data } = await axios.get<GetModelsResponse>(GET_MODELS_URL, opts);
const models = data.data;
const turbo = models.some(({ id }) => id.startsWith("gpt-3.5"));
const gpt4 = models.some(({ id }) => id.startsWith("gpt-4"));
// We want to update the key's `isGpt4` flag here, but we don't want to
// update its `lastChecked` timestamp because we need to let the liveness
// check run before we can consider the key checked.
// Need to use `find` here because keys are cloned from the pool.
const keyFromPool = this.keys.find((k) => k.hash === key.hash)!;
this.updateKey(key.hash, {
isGpt4: gpt4,
lastChecked: keyFromPool.lastChecked,
});
return { turbo, gpt4 };
}
private async getSubscription(key: OpenAIKey) {
const { data } = await axios.get<GetSubscriptionResponse>(
GET_SUBSCRIPTION_URL,
{ headers: { Authorization: `Bearer ${key.key}` } }
);
// See note above about updating the key's `lastChecked` timestamp.
const keyFromPool = this.keys.find((k) => k.hash === key.hash)!;
this.updateKey(key.hash, {
isTrial: !data.has_payment_method,
lastChecked: keyFromPool.lastChecked,
});
return data;
}
private handleAxiosError(key: OpenAIKey, error: AxiosError) {
if (error.response && OpenAIKeyChecker.errorIsOpenAIError(error)) {
const { status, data } = error.response;
if (status === 401) {
this.log.warn(
{ key: key.hash, error: data },
"Key is invalid or revoked. Disabling key."
);
this.updateKey(key.hash, {
isDisabled: true,
isRevoked: true,
isGpt4: false,
});
} else if (status === 429) {
switch (data.error.type) {
case "insufficient_quota":
case "access_terminated":
case "billing_not_active":
const isOverQuota = data.error.type === "insufficient_quota";
const isRevoked = !isOverQuota;
const isGpt4 = isRevoked ? false : key.isGpt4;
this.log.warn(
{ key: key.hash, rateLimitType: data.error.type, error: data },
"Key returned a non-transient 429 error. Disabling key."
);
this.updateKey(key.hash, {
isDisabled: true,
isRevoked,
isOverQuota,
isGpt4,
});
break;
case "requests":
// Trial keys have extremely low requests-per-minute limits and we
// can often hit them just while checking the key, so we need to
// retry the check later to know if the key has quota remaining.
this.log.warn(
{ key: key.hash, error: data },
"Key is currently rate limited, so its liveness cannot be checked. Retrying in fifteen seconds."
);
// To trigger a shorter than usual delay before the next check, we
// will set its `lastChecked` to (NOW - (KEY_CHECK_PERIOD - 15s)).
// This will cause the usual key check scheduling logic to schedule
// the next check in 15 seconds. This also prevents the key from
// holding up startup checks for other keys.
const fifteenSeconds = 15 * 1000;
const next = Date.now() - (KEY_CHECK_PERIOD - fifteenSeconds);
this.updateKey(key.hash, { lastChecked: next });
break;
case "tokens":
// Hitting a token rate limit, even on a trial key, actually implies
// that the key is valid and can generate completions, so we will
// treat this as effectively a successful `testLiveness` call.
this.log.info(
{ key: key.hash },
"Key is currently `tokens` rate limited; assuming it is operational."
);
this.updateKey(key.hash, { lastChecked: Date.now() });
break;
default:
this.log.error(
{ key: key.hash, rateLimitType: data.error.type, error: data },
"Encountered unexpected rate limit error class while checking key. This may indicate a change in the API; please report this."
);
// We don't know what this error means, so we just let the key
// through and maybe it will fail when someone tries to use it.
this.updateKey(key.hash, { lastChecked: Date.now() });
}
} else {
this.log.error(
{ key: key.hash, status, error: data },
"Encountered unexpected error status while checking key. This may indicate a change in the API; please report this."
);
this.updateKey(key.hash, { lastChecked: Date.now() });
}
return;
}
this.log.error(
{ key: key.hash, error: error.message },
"Network error while checking key; trying this key again in a minute."
);
const oneMinute = 60 * 1000;
const next = Date.now() - (KEY_CHECK_PERIOD - oneMinute);
this.updateKey(key.hash, { lastChecked: next });
}
/**
* Tests whether the key is valid and has quota remaining. The request we send
* is actually not valid, but keys which are revoked or out of quota will fail
* with a 401 or 429 error instead of the expected 400 Bad Request error.
* This lets us avoid test keys without spending any quota.
*
* We use the rate limit header to determine whether it's a trial key.
*/
private async testLiveness(key: OpenAIKey): Promise<{ rateLimit: number }> {
const payload = {
model: "gpt-3.5-turbo",
max_tokens: -1,
messages: [{ role: "user", content: "" }],
};
const { headers, data } = await axios.post<OpenAIError>(
POST_CHAT_COMPLETIONS_URL,
payload,
{
headers: { Authorization: `Bearer ${key.key}` },
validateStatus: (status) => status === 400,
}
);
const rateLimitHeader = headers["x-ratelimit-limit-requests"];
const rateLimit = parseInt(rateLimitHeader) || 3500; // trials have 200
// invalid_request_error is the expected error
if (data.error.type !== "invalid_request_error") {
this.log.warn(
{ key: key.hash, error: data },
"Unexpected 400 error class while checking key; assuming key is valid, but this may indicate a change in the API."
);
}
return { rateLimit };
}
static errorIsOpenAIError(
error: AxiosError
): error is AxiosError<OpenAIError> {
const data = error.response?.data as any;
return data?.error?.type;
}
}
-348
View File
@@ -1,348 +0,0 @@
/* Manages OpenAI API keys. Tracks usage, disables expired keys, and provides
round-robin access to keys. Keys are stored in the OPENAI_KEY environment
variable as a comma-separated list of keys. */
import crypto from "crypto";
import fs from "fs";
import http from "http";
import path from "path";
import { KeyProvider, Key, Model } from "../index";
import { config } from "../../config";
import { logger } from "../../logger";
import { OpenAIKeyChecker } from "./checker";
export type OpenAIModel = "gpt-3.5-turbo" | "gpt-4";
export const OPENAI_SUPPORTED_MODELS: readonly OpenAIModel[] = [
"gpt-3.5-turbo",
"gpt-4",
] as const;
export interface OpenAIKey extends Key {
readonly service: "openai";
/** Set when key check returns a 401. */
isRevoked: boolean;
/** Set when key check returns a non-transient 429. */
isOverQuota: boolean;
/** Threshold at which a warning email will be sent by OpenAI. */
softLimit: number;
/** Threshold at which the key will be disabled because it has reached the user-defined limit. */
hardLimit: number;
/** The maximum quota allocated to this key by OpenAI. */
systemHardLimit: number;
/** The time at which this key was last rate limited. */
rateLimitedAt: number;
/**
* Last known X-RateLimit-Requests-Reset header from OpenAI, converted to a
* number.
* Formatted as a `\d+(m|s)` string denoting the time until the limit resets.
* Specifically, it seems to indicate the time until the key's quota will be
* fully restored; the key may be usable before this time as the limit is a
* rolling window.
*
* Requests which return a 429 do not count against the quota.
*
* Requests which fail for other reasons (e.g. 401) count against the quota.
*/
rateLimitRequestsReset: number;
/**
* Last known X-RateLimit-Tokens-Reset header from OpenAI, converted to a
* number.
* Appears to follow the same format as `rateLimitRequestsReset`.
*
* Requests which fail do not count against the quota as they do not consume
* tokens.
*/
rateLimitTokensReset: number;
}
export type OpenAIKeyUpdate = Omit<
Partial<OpenAIKey>,
"key" | "hash" | "promptCount"
>;
export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
readonly service = "openai" as const;
private keys: OpenAIKey[] = [];
private checker?: OpenAIKeyChecker;
private log = logger.child({ module: "key-provider", service: this.service });
constructor() {
const keyString = config.openaiKey?.trim();
if (!keyString) {
this.log.warn("OPENAI_KEY is not set. OpenAI API will not be available.");
return;
}
let bareKeys: string[];
bareKeys = keyString.split(",").map((k) => k.trim());
bareKeys = [...new Set(bareKeys)];
for (const k of bareKeys) {
const newKey = {
key: k,
service: "openai" as const,
isGpt4: true,
isTrial: false,
isDisabled: false,
isRevoked: false,
isOverQuota: false,
softLimit: 0,
hardLimit: 0,
systemHardLimit: 0,
usage: 0,
lastUsed: 0,
lastChecked: 0,
promptCount: 0,
hash: `oai-${crypto
.createHash("sha256")
.update(k)
.digest("hex")
.slice(0, 8)}`,
rateLimitedAt: 0,
rateLimitRequestsReset: 0,
rateLimitTokensReset: 0,
};
this.keys.push(newKey);
}
this.log.info({ keyCount: this.keys.length }, "Loaded OpenAI keys.");
}
public init() {
if (config.checkKeys) {
this.checker = new OpenAIKeyChecker(this.keys, this.update.bind(this));
this.checker.start();
}
}
/**
* Returns a list of all keys, with the key field removed.
* Don't mutate returned keys, use a KeyPool method instead.
**/
public list() {
return this.keys.map((key) => {
return Object.freeze({
...key,
key: undefined,
});
});
}
public get(model: Model) {
const needGpt4 = model.startsWith("gpt-4");
const availableKeys = this.keys.filter(
(key) => !key.isDisabled && (!needGpt4 || key.isGpt4)
);
if (availableKeys.length === 0) {
let message = needGpt4
? "No GPT-4 keys available. Try selecting a Turbo model."
: "No active OpenAI keys available.";
throw new Error(message);
}
if (needGpt4 && config.turboOnly) {
throw new Error(
"Proxy operator has disabled GPT-4 to reduce quota usage. Try selecting a Turbo model."
);
}
// Select a key, from highest priority to lowest priority:
// 1. Keys which are not rate limited
// a. We ignore rate limits from over a minute ago
// b. If all keys were rate limited in the last minute, select the
// least recently rate limited key
// 2. Keys which are trials
// 3. Keys which have not been used in the longest time
const now = Date.now();
const rateLimitThreshold = 60 * 1000;
const keysByPriority = availableKeys.sort((a, b) => {
const aRateLimited = now - a.rateLimitedAt < rateLimitThreshold;
const bRateLimited = now - b.rateLimitedAt < rateLimitThreshold;
if (aRateLimited && !bRateLimited) return 1;
if (!aRateLimited && bRateLimited) return -1;
if (aRateLimited && bRateLimited) {
return a.rateLimitedAt - b.rateLimitedAt;
}
if (a.isTrial && !b.isTrial) return -1;
if (!a.isTrial && b.isTrial) return 1;
return a.lastUsed - b.lastUsed;
});
const selectedKey = keysByPriority[0];
selectedKey.lastUsed = now;
// When a key is selected, we rate-limit it for a brief period of time to
// prevent the queue processor from immediately flooding it with requests
// while the initial request is still being processed (which is when we will
// get new rate limit headers).
// Instead, we will let a request through every second until the key
// becomes fully saturated and locked out again.
selectedKey.rateLimitedAt = now;
selectedKey.rateLimitRequestsReset = 1000;
return { ...selectedKey };
}
/** Called by the key checker to update key information. */
public update(keyHash: string, update: OpenAIKeyUpdate) {
const keyFromPool = this.keys.find((k) => k.hash === keyHash)!;
Object.assign(keyFromPool, { lastChecked: Date.now(), ...update });
// this.writeKeyStatus();
}
/** Disables a key, or does nothing if the key isn't in this pool. */
public disable(key: Key) {
const keyFromPool = this.keys.find((k) => k.key === key.key);
if (!keyFromPool || keyFromPool.isDisabled) return;
keyFromPool.isDisabled = true;
this.log.warn({ key: key.hash }, "Key disabled");
}
public available() {
return this.keys.filter((k) => !k.isDisabled).length;
}
public anyUnchecked() {
return !!config.checkKeys && this.keys.some((key) => !key.lastChecked);
}
/**
* Given a model, returns the period until a key will be available to service
* the request, or returns 0 if a key is ready immediately.
*/
public getLockoutPeriod(model: Model = "gpt-4"): number {
const needGpt4 = model.startsWith("gpt-4");
const activeKeys = this.keys.filter(
(key) => !key.isDisabled && (!needGpt4 || key.isGpt4)
);
if (activeKeys.length === 0) {
// If there are no active keys for this model we can't fulfill requests.
// We'll return 0 to let the request through and return an error,
// otherwise the request will be stuck in the queue forever.
return 0;
}
// A key is rate-limited if its `rateLimitedAt` plus the greater of its
// `rateLimitRequestsReset` and `rateLimitTokensReset` is after the
// current time.
// If there are any keys that are not rate-limited, we can fulfill requests.
const now = Date.now();
const rateLimitedKeys = activeKeys.filter((key) => {
const resetTime = Math.max(
key.rateLimitRequestsReset,
key.rateLimitTokensReset
);
return now < key.rateLimitedAt + resetTime;
}).length;
const anyNotRateLimited = rateLimitedKeys < activeKeys.length;
if (anyNotRateLimited) {
return 0;
}
// If all keys are rate-limited, return the time until the first key is
// ready.
const timeUntilFirstReady = Math.min(
...activeKeys.map((key) => {
const resetTime = Math.max(
key.rateLimitRequestsReset,
key.rateLimitTokensReset
);
return key.rateLimitedAt + resetTime - now;
})
);
return timeUntilFirstReady;
}
public markRateLimited(keyHash: string) {
this.log.warn({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
key.rateLimitedAt = Date.now();
}
public incrementPrompt(keyHash?: string) {
const key = this.keys.find((k) => k.hash === keyHash);
if (!key) return;
key.promptCount++;
}
public updateRateLimits(keyHash: string, headers: http.IncomingHttpHeaders) {
const key = this.keys.find((k) => k.hash === keyHash)!;
const requestsReset = headers["x-ratelimit-reset-requests"];
const tokensReset = headers["x-ratelimit-reset-tokens"];
// Sometimes OpenAI only sends one of the two rate limit headers, it's
// unclear why.
if (requestsReset && typeof requestsReset === "string") {
this.log.info(
{ key: key.hash, requestsReset },
`Updating rate limit requests reset time`
);
key.rateLimitRequestsReset = getResetDurationMillis(requestsReset);
}
if (tokensReset && typeof tokensReset === "string") {
this.log.info(
{ key: key.hash, tokensReset },
`Updating rate limit tokens reset time`
);
key.rateLimitTokensReset = getResetDurationMillis(tokensReset);
}
if (!requestsReset && !tokensReset) {
this.log.warn(
{ key: key.hash },
`No rate limit headers in OpenAI response; skipping update`
);
return;
}
}
/**
* Returns the total quota limit of all keys in USD. Keys which are disabled
* are not included in the total.
*/
public activeLimitInUsd(
{ gpt4 }: { gpt4: boolean } = { gpt4: false }
): string {
const keys = this.keys.filter((k) => !k.isDisabled && k.isGpt4 === gpt4);
const totalLimit = keys.reduce((acc, { hardLimit }) => acc + hardLimit, 0);
return `$${totalLimit.toFixed(2)}`;
}
/** Writes key status to disk. */
// public writeKeyStatus() {
// const keys = this.keys.map((key) => ({
// key: key.key,
// isGpt4: key.isGpt4,
// usage: key.usage,
// hardLimit: key.hardLimit,
// isDisabled: key.isDisabled,
// }));
// fs.writeFileSync(
// path.join(__dirname, "..", "keys.json"),
// JSON.stringify(keys, null, 2)
// );
// }
}
/**
* Converts reset string ("21.0032s" or "21ms") to a number of milliseconds.
* Result is clamped to 10s even though the API returns up to 60s, because the
* API returns the time until the entire quota is reset, even if a key may be
* able to fulfill requests before then due to partial resets.
**/
function getResetDurationMillis(resetDuration?: string): number {
const match = resetDuration?.match(/(\d+(\.\d+)?)(s|ms)/);
if (match) {
const [, time, , unit] = match;
const value = parseFloat(time);
const result = unit === "s" ? value * 1000 : value;
return Math.min(result, 10000);
}
return 0;
}
+14
View File
@@ -1,6 +1,20 @@
import pino from "pino"; import pino from "pino";
import { config } from "./config"; import { config } from "./config";
const transport =
process.env.NODE_ENV === "production"
? undefined
: {
target: "pino-pretty",
options: {
singleLine: true,
messageFormat: "{if module}\x1b[90m[{module}] \x1b[39m{end}{msg}",
ignore: "module",
},
};
export const logger = pino({ export const logger = pino({
level: config.logLevel, level: config.logLevel,
base: { pid: process.pid, module: "server" },
transport,
}); });
-167
View File
@@ -1,167 +0,0 @@
/**
* Very scuffed persistence system using a Huggingface's Datasets git repo as a
* file system. We use this because it's free and everyone is already deploying
* to Huggingface's Spaces feature anyway, so they can easily create a Dataset
* repository too rather than having to find some other place to host files.
*
* We periodically commit to the repo, and then pull from it when we need to
* read data. This is a bit slow, but it's fine for our purposes.
*/
import fs from "fs";
import os from "os";
import path from "path";
import { spawn } from "child_process";
import { config, Config } from "./config";
import { logger } from "./logger";
const log = logger.child({ module: "dataset-persistence" });
let singleton: DatasetPersistence | null = null;
class DatasetPersistence {
private initialized: boolean = false;
private keyPath = `${os.tmpdir()}/id_rsa`;
private repoPath = `${os.tmpdir()}/oai-proxy-dataset`;
private repoUrl!: string;
private sshKey!: string;
constructor() {
if (singleton) return singleton;
if (config.gatekeeperStore !== "huggingface_datasets") return;
DatasetPersistence.assertConfigured(config);
this.repoUrl = config.hfDatasetRepoUrl;
this.sshKey = config.hfPrivateSshKey.trim();
singleton = this;
}
async init() {
if (this.initialized) return;
log.info(
{ repoUrl: this.repoUrl, keyPath: this.keyPath, repoPath: this.repoPath },
"Initializing Huggingface Datasets persistence."
);
try {
this.setupSshKey();
await this.runGit(
"config user.email 'oai-proxy-persistence@example.com'"
);
await this.runGit("config user.name 'Proxy Persistence'");
log.info("Cloning repo...");
const cloneOutput = await this.runGit(
`clone --depth 1 ${this.repoUrl} ${this.repoPath}`
);
log.info({ output: cloneOutput.toString() }, "Cloned repo.");
// Test write access
const pushOutput = this.runGit("push").toString();
if (pushOutput !== "Everything up-to-date") {
log.error({ output: pushOutput }, "Unexpected output from git push.");
throw new Error("Unable to push to repo.");
}
log.info("Datasets configuration looks good.");
} catch (e) {
log.error(
{ error: e },
"Failed to initialize Huggingface Datasets persistence."
);
throw e;
}
this.initialized = true;
}
async get(key: string): Promise<Buffer | null> {
try {
await this.init();
this.runGit(`checkout HEAD -- ${key}`);
const filePath = path.join(this.repoPath, key);
return fs.promises.readFile(filePath);
} catch (e) {
log.error({ error: e }, "Failed to get key from Dataset repo.");
return null;
}
}
async set(key: string, value: Buffer) {
try {
await this.init();
await fs.promises.writeFile(`${this.repoPath}/${key}`, value);
// TODO: Need to set up LFS for >10MB files
if (fs.statSync(`${this.repoPath}/${key}`).size > 10 * 1024 * 1024) {
throw new Error("File too large for non-LFS storage.");
}
await this.runGit(`add ${key}`);
await this.runGit(`commit -m "Update ${key}"`);
await this.runGit("push");
} catch (e) {
log.error({ error: e }, "Failed to set key in Dataset repo.");
}
}
protected async cleanup() {
try {
await this.init();
await this.runGit("fetch --depth 1");
await this.runGit("reset --hard FETCH_HEAD");
} catch (e) {
log.error({ error: e }, "Failed to cleanup Dataset repo.");
}
}
protected async setupSshKey() {
fs.writeFileSync(this.keyPath, this.sshKey);
fs.chmodSync(this.keyPath, 0o600);
await this.runGit(`config core.sshCommand 'ssh -i ${this.keyPath}'`);
}
protected async runGit(command: string) {
const cmd = `git -C ${this.repoPath} ${command}`;
log.debug({ command: cmd }, "Running git command.");
return new Promise<string>((resolve, reject) => {
const proc = spawn(cmd, { shell: true });
const stdout: string[] = [];
const stderr: string[] = [];
proc.stdout.on("data", (data) => stdout.push(data.toString()));
proc.stderr.on("data", (data) => stderr.push(data.toString()));
proc.on("close", (code) => {
if (code !== 0) {
const errorOutput = stderr.join("");
log.error({ code, errorOutput }, "Git command failed.");
reject(
new Error(
`Git command failed with exit code ${code}: ${errorOutput}`
)
);
} else {
resolve(stdout.join(""));
}
});
});
}
static assertConfigured(input: Config): asserts input is ConfigWithDatasets {
if (!input.hfDatasetRepoUrl) {
throw new Error("HF_DATASET_REPO_URL is required when using Datasets.");
}
if (!input.hfPrivateSshKey) {
throw new Error("HF_PRIVATE_SSH_KEY is required when using Datasets.");
}
}
}
type ConfigWithDatasets = Config & {
hfDatasetRepoUrl: string;
hfPrivateSshKey: string;
};
export { DatasetPersistence };
+228 -77
View File
@@ -1,5 +1,4 @@
import { Request, RequestHandler, Router } from "express"; import { Request, Response, RequestHandler, Router } from "express";
import * as http from "http";
import { createProxyMiddleware } from "http-proxy-middleware"; import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config"; import { config } from "../config";
import { logger } from "../logger"; import { logger } from "../logger";
@@ -9,16 +8,15 @@ import { handleProxyError } from "./middleware/common";
import { import {
addKey, addKey,
addAnthropicPreamble, addAnthropicPreamble,
blockZoomerOrigins,
createPreprocessorMiddleware, createPreprocessorMiddleware,
finalizeBody, finalizeBody,
languageFilter, createOnProxyReqHandler,
removeOriginHeaders,
} from "./middleware/request"; } from "./middleware/request";
import { import {
ProxyResHandlerWithBody, ProxyResHandlerWithBody,
createOnProxyResHandler, createOnProxyResHandler,
} from "./middleware/response"; } from "./middleware/response";
import { sendErrorToClient } from "./middleware/response/error-generator";
let modelsCache: any = null; let modelsCache: any = null;
let modelsCacheTime = 0; let modelsCacheTime = 0;
@@ -42,8 +40,12 @@ const getModelsResponse = () => {
"claude-instant-v1.1", "claude-instant-v1.1",
"claude-instant-v1.1-100k", "claude-instant-v1.1-100k",
"claude-instant-v1.0", "claude-instant-v1.0",
"claude-2", // claude-2 is 100k by default it seems "claude-2",
"claude-2.0", "claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
]; ];
const models = claudeVariants.map((id) => ({ const models = claudeVariants.map((id) => ({
@@ -66,30 +68,6 @@ const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse()); res.status(200).json(getModelsResponse());
}; };
const rewriteAnthropicRequest = (
proxyReq: http.ClientRequest,
req: Request,
res: http.ServerResponse
) => {
const rewriterPipeline = [
addKey,
addAnthropicPreamble,
languageFilter,
blockZoomerOrigins,
removeOriginHeaders,
finalizeBody,
];
try {
for (const rewriter of rewriterPipeline) {
rewriter(proxyReq, req, res, {});
}
} catch (error) {
req.log.error(error, "Error while executing proxy rewriter");
proxyReq.destroy(error as Error);
}
};
/** Only used for non-streaming requests. */ /** Only used for non-streaming requests. */
const anthropicResponseHandler: ProxyResHandlerWithBody = async ( const anthropicResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes, _proxyRes,
@@ -101,42 +79,69 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (config.promptLogging) { let newBody = body;
const host = req.get("host"); switch (`${req.inboundApi}<-${req.outboundApi}`) {
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`; case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAnthropicTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to Anthropic chat format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
} }
if (req.inboundApi === "openai") { res.status(200).json({ ...newBody, proxy: body.proxy });
req.log.info("Transforming Anthropic response to OpenAI format");
body = transformAnthropicResponse(body);
}
// TODO: Remove once tokenization is stable
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
res.status(200).json(body);
}; };
function flattenChatResponse(
content: { type: string; text: string }[]
): string {
return content
.map((part: { type: string; text: string }) =>
part.type === "text" ? part.text : ""
)
.join("\n");
}
export function transformAnthropicChatResponseToAnthropicText(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
type: "completion",
id: "ant-" + anthropicBody.id,
completion: flattenChatResponse(anthropicBody.content),
stop_reason: anthropicBody.stop_reason,
stop: anthropicBody.stop_sequence,
model: anthropicBody.model,
usage: anthropicBody.usage,
};
}
/** /**
* Transforms a model response from the Anthropic API to match those from the * Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This * OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled * is only used for non-streaming requests as streaming requests are handled
* on-the-fly. * on-the-fly.
*/ */
function transformAnthropicResponse( function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any> anthropicBody: Record<string, any>,
req: Request
): Record<string, any> { ): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return { return {
id: "ant-" + anthropicBody.log_id, id: "ant-" + anthropicBody.log_id,
object: "chat.completion", object: "chat.completion",
created: Date.now(), created: Date.now(),
model: anthropicBody.model, model: anthropicBody.model,
usage: { usage: {
prompt_tokens: 0, prompt_tokens: req.promptTokens,
completion_tokens: 0, completion_tokens: req.outputTokens,
total_tokens: 0, total_tokens: totalTokens,
}, },
choices: [ choices: [
{ {
@@ -151,54 +156,200 @@ function transformAnthropicResponse(
}; };
} }
const anthropicProxy = createQueueMiddleware( function transformAnthropicChatResponseToOpenAI(
createProxyMiddleware({ anthropicBody: Record<string, any>
): Record<string, any> {
return {
id: "ant-" + anthropicBody.id,
object: "chat.completion",
created: Date.now(),
model: anthropicBody.model,
usage: anthropicBody.usage,
choices: [
{
message: {
role: "assistant",
content: flattenChatResponse(anthropicBody.content),
},
finish_reason: anthropicBody.stop_reason,
index: 0,
},
],
};
}
const anthropicProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.anthropic.com", target: "https://api.anthropic.com",
changeOrigin: true, changeOrigin: true,
selfHandleResponse: true,
logger,
on: { on: {
proxyReq: rewriteAnthropicRequest, proxyReq: createOnProxyReqHandler({
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
}),
proxyRes: createOnProxyResHandler([anthropicResponseHandler]), proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
error: handleProxyError, error: handleProxyError,
}, },
selfHandleResponse: true, // Abusing pathFilter to rewrite the paths dynamically.
logger, pathFilter: (pathname, req) => {
pathRewrite: { const isText = req.outboundApi === "anthropic-text";
// Send OpenAI-compat requests to the real Anthropic endpoint. const isChat = req.outboundApi === "anthropic-chat";
"^/v1/chat/completions": "/v1/complete", if (isChat && pathname === "/v1/complete") {
req.url = "/v1/messages";
}
if (isText && pathname === "/v1/chat/completions") {
req.url = "/v1/complete";
}
if (isChat && pathname === "/v1/chat/completions") {
req.url = "/v1/messages";
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
req.url = "/v1/messages";
}
return true;
}, },
}) }),
); });
const nativeTextPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
});
const textToChatPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.startsWith("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
});
const oaiToChatPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
if (req.body.model?.includes("claude-3")) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
}
};
const anthropicRouter = Router(); const anthropicRouter = Router();
// Fix paths because clients don't consistently use the /v1 prefix.
anthropicRouter.use((req, _res, next) => {
if (!req.path.startsWith("/v1/")) {
req.url = `/v1${req.url}`;
}
next();
});
anthropicRouter.get("/v1/models", handleModelRequest); anthropicRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
anthropicRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware({
inApi: "anthropic-chat",
outApi: "anthropic-chat",
service: "anthropic",
}),
anthropicProxy
);
// Anthropic text completion endpoint. Translates to Anthropic chat completion
// if the requested model is a Claude 3 model.
anthropicRouter.post( anthropicRouter.post(
"/v1/complete", "/v1/complete",
ipLimiter, ipLimiter,
createPreprocessorMiddleware({ inApi: "anthropic", outApi: "anthropic" }), preprocessAnthropicTextRequest,
anthropicProxy anthropicProxy
); );
// OpenAI-to-Anthropic compatibility endpoint. // OpenAI-to-Anthropic compatibility endpoint. Accepts an OpenAI chat completion
// request and transforms/routes it to the appropriate Anthropic format and
// endpoint based on the requested model.
anthropicRouter.post( anthropicRouter.post(
"/v1/chat/completions", "/v1/chat/completions",
ipLimiter, ipLimiter,
createPreprocessorMiddleware({ inApi: "openai", outApi: "anthropic" }), preprocessOpenAICompatRequest,
anthropicProxy anthropicProxy
); );
// Redirect browser requests to the homepage. // Temporarily force Anthropic Text to Anthropic Chat for frontends which do not
anthropicRouter.get("*", (req, res, next) => { // yet support the new model. Forces claude-3. Will be removed once common
const isBrowser = req.headers["user-agent"]?.includes("Mozilla"); // frontends have been updated.
if (isBrowser) { anthropicRouter.post(
res.redirect("/"); "/v1/:type(sonnet|opus)/:action(complete|messages)",
} else { ipLimiter,
next(); handleAnthropicTextCompatRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
}),
anthropicProxy
);
function handleAnthropicTextCompatRequest(
req: Request,
res: Response,
next: any
) {
const type = req.params.type;
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = `claude-3-${type}-20240229`;
req.log.info(
{ type, inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling Anthropic compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/anthropic\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/anthropic/" + type,
correct_endpoint: "/anthropic",
},
},
});
} }
});
req.body.model = compatModel;
next();
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Claude 3 Sonnet.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (!model.startsWith("gpt-")) return;
req.body.model = "claude-3-sonnet-20240229";
}
export const anthropic = anthropicRouter; export const anthropic = anthropicRouter;
-64
View File
@@ -1,64 +0,0 @@
/**
* Authenticates RisuAI.xyz users using a special x-risu-tk header provided by
* RisuAI.xyz. This lets us rate limit and limit queue concurrency properly,
* since otherwise RisuAI.xyz users share the same IP address and can't be
* distinguished.
* Contributors: @kwaroran
*/
import axios from "axios";
import { Request, Response, NextFunction } from "express";
const RISUAI_TOKEN_CHECKER_URL = "https://sv.risuai.xyz/public/api/checktoken";
const validRisuTokens = new Set<string>();
let lastFailedRisuTokenCheck = 0;
export async function checkRisuToken(
req: Request,
_res: Response,
next: NextFunction
) {
let header = req.header("x-risu-tk") || null;
if (!header) {
return next();
}
const timeSinceLastFailedCheck = Date.now() - lastFailedRisuTokenCheck;
if (timeSinceLastFailedCheck < 60 * 1000) {
req.log.warn(
{ timeSinceLastFailedCheck },
"Skipping RisuAI token check due to recent failed check"
);
return next();
}
try {
if (!validRisuTokens.has(header)) {
req.log.info("Authenticating new RisuAI token");
const validCheck = await axios.post<{ vaild: boolean }>(
RISUAI_TOKEN_CHECKER_URL,
{ token: header },
{ headers: { "Content-Type": "application/json" } }
);
if (!validCheck.data.vaild) {
req.log.warn("Invalid RisuAI token; using IP instead");
} else {
req.log.info("RisuAI token authenticated");
validRisuTokens.add(header);
req.risuToken = header;
}
} else {
req.log.debug("RisuAI token already known");
req.risuToken = header;
}
} catch (err) {
lastFailedRisuTokenCheck = Date.now();
req.log.warn(
{ error: err.message },
"Error authenticating RisuAI token; using IP instead"
);
}
next();
}
-211
View File
@@ -1,211 +0,0 @@
/**
* Basic user management. Handles creation and tracking of proxy users, personal
* access tokens, and quota management. Supports in-memory and Firebase Realtime
* Database persistence stores.
*
* Users are identified solely by their personal access token. The token is
* used to authenticate the user for all proxied requests.
*/
import admin from "firebase-admin";
import { v4 as uuid } from "uuid";
import { config, getFirebaseApp } from "../../config";
import { logger } from "../../logger";
export interface User {
/** The user's personal access token. */
token: string;
/** The IP addresses the user has connected from. */
ip: string[];
/** The user's privilege level. */
type: UserType;
/** The number of prompts the user has made. */
promptCount: number;
/** The number of tokens the user has consumed. Not yet implemented. */
tokenCount: number;
/** The time at which the user was created. */
createdAt: number;
/** The time at which the user last connected. */
lastUsedAt?: number;
/** The time at which the user was disabled, if applicable. */
disabledAt?: number;
/** The reason for which the user was disabled, if applicable. */
disabledReason?: string;
}
/**
* Possible privilege levels for a user.
* - `normal`: Default role. Subject to usual rate limits and quotas.
* - `special`: Special role. Higher quotas and exempt from auto-ban/lockout.
* TODO: implement auto-ban/lockout for normal users when they do naughty shit
*/
export type UserType = "normal" | "special";
type UserUpdate = Partial<User> & Pick<User, "token">;
const MAX_IPS_PER_USER = config.maxIpsPerUser;
const users: Map<string, User> = new Map();
const usersToFlush = new Set<string>();
export async function init() {
logger.info({ store: config.gatekeeperStore }, "Initializing user store...");
if (config.gatekeeperStore === "firebase_rtdb") {
await initFirebase();
}
logger.info("User store initialized.");
}
/** Creates a new user and returns their token. */
export function createUser() {
const token = uuid();
users.set(token, {
token,
ip: [],
type: "normal",
promptCount: 0,
tokenCount: 0,
createdAt: Date.now(),
});
usersToFlush.add(token);
return token;
}
/** Returns the user with the given token if they exist. */
export function getUser(token: string) {
return users.get(token);
}
/** Returns a list of all users. */
export function getUsers() {
return Array.from(users.values()).map((user) => ({ ...user }));
}
/**
* Upserts the given user. Intended for use with the /admin API for updating
* user information via JSON. Use other functions for more specific operations.
*/
export function upsertUser(user: UserUpdate) {
const existing: User = users.get(user.token) ?? {
token: user.token,
ip: [],
type: "normal",
promptCount: 0,
tokenCount: 0,
createdAt: Date.now(),
};
users.set(user.token, {
...existing,
...user,
});
usersToFlush.add(user.token);
// Immediately schedule a flush to the database if we're using Firebase.
if (config.gatekeeperStore === "firebase_rtdb") {
setImmediate(flushUsers);
}
return users.get(user.token);
}
/** Increments the prompt count for the given user. */
export function incrementPromptCount(token: string) {
const user = users.get(token);
if (!user) return;
user.promptCount++;
usersToFlush.add(token);
}
/** Increments the token count for the given user by the given amount. */
export function incrementTokenCount(token: string, amount = 1) {
const user = users.get(token);
if (!user) return;
user.tokenCount += amount;
usersToFlush.add(token);
}
/**
* Given a user's token and IP address, authenticates the user and adds the IP
* to the user's list of IPs. Returns the user if they exist and are not
* disabled, otherwise returns undefined.
*/
export function authenticate(token: string, ip: string) {
const user = users.get(token);
if (!user || user.disabledAt) return;
if (!user.ip.includes(ip)) user.ip.push(ip);
// If too many IPs are associated with the user, disable the account.
const ipLimit =
user.type === "special" || !MAX_IPS_PER_USER ? Infinity : MAX_IPS_PER_USER;
if (user.ip.length > ipLimit) {
disableUser(token, "Too many IP addresses associated with this token.");
return;
}
user.lastUsedAt = Date.now();
usersToFlush.add(token);
return user;
}
/** Disables the given user, optionally providing a reason. */
export function disableUser(token: string, reason?: string) {
const user = users.get(token);
if (!user) return;
user.disabledAt = Date.now();
user.disabledReason = reason;
usersToFlush.add(token);
}
// TODO: Firebase persistence is pretend right now and just polls the in-memory
// store to sync it with Firebase when it changes. Will refactor to abstract
// persistence layer later so we can support multiple stores.
let firebaseTimeout: NodeJS.Timeout | undefined;
async function initFirebase() {
logger.info("Connecting to Firebase...");
const app = getFirebaseApp();
const db = admin.database(app);
const usersRef = db.ref("users");
const snapshot = await usersRef.once("value");
const users: Record<string, User> | null = snapshot.val();
firebaseTimeout = setInterval(flushUsers, 20 * 1000);
if (!users) {
logger.info("No users found in Firebase.");
return;
}
for (const token in users) {
upsertUser(users[token]);
}
usersToFlush.clear();
const numUsers = Object.keys(users).length;
logger.info({ users: numUsers }, "Loaded users from Firebase");
}
async function flushUsers() {
const app = getFirebaseApp();
const db = admin.database(app);
const usersRef = db.ref("users");
const updates: Record<string, User> = {};
for (const token of usersToFlush) {
const user = users.get(token);
if (!user) {
continue;
}
updates[token] = user;
}
usersToFlush.clear();
const numUpdates = Object.keys(updates).length;
if (numUpdates === 0) {
return;
}
await usersRef.update(updates);
logger.info(
{ users: Object.keys(updates).length },
"Flushed users to Firebase"
);
}
+307
View File
@@ -0,0 +1,307 @@
import { Request, RequestHandler, Response, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signAwsRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { transformAnthropicChatResponseToAnthropicText } from "./anthropic";
import { sendErrorToClient } from "./middleware/response/error-generator";
const LATEST_AWS_V2_MINOR_VERSION = "1";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.awsCredentials) return { object: "list", data: [] };
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const variants = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const awsResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
// case "openai<-anthropic-chat":
// todo: implement this
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError,
},
}),
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const awsTextCompletionRouter: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const awsRouter = Router();
awsRouter.get("/v1/models", handleModelRequest);
// Native(ish) Anthropic text completion endpoint.
awsRouter.post("/v1/complete", ipLimiter, awsTextCompletionRouter, awsProxy);
// Native Anthropic chat completion endpoint.
awsRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
// Temporary force-Claude3 endpoint
awsRouter.post(
"/v1/sonnet/:action(complete|messages)",
ipLimiter,
handleCompatibilityRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "aws",
}),
awsProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If client already specified an AWS Claude model ID, use it
if (model.includes("anthropic.claude")) {
return;
}
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?)(\d*)/i;
const match = model.match(pattern);
// If there's no match, return the latest v2 model
if (!match) {
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
const instant = match[2];
const major = match[4];
const minor = match[6];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
// There's only one v1 model
if (major === "1") {
req.body.model = "anthropic.claude-v1";
return;
}
// Try to map Anthropic API v2 models to AWS v2 models
if (major === "2") {
if (minor === "0") {
req.body.model = "anthropic.claude-v2";
return;
}
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
// AWS currently only supports one v3 model.
const variant = match[8]; // sonnet or opus
const variantVersion = match[9];
if (major === "3") {
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
}
// Fallback to latest v2 model
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
export function handleCompatibilityRequest(
req: Request,
res: Response,
next: any
) {
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
req.log.info(
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling AWS compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/aws/claude/sonnet",
correct_endpoint: "/aws/claude",
},
},
});
}
req.body.model = compatModel;
next();
}
export const aws = awsRouter;
+129
View File
@@ -0,0 +1,129 @@
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
import {
AzureOpenAIModelFamily,
getAzureOpenAIModelFamily,
ModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { KNOWN_OPENAI_MODELS } from "./openai";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addAzureKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
let modelsCache: any = null;
let modelsCacheTime = 0;
function getModelsResponse() {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
let available = new Set<AzureOpenAIModelFamily>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "azure") continue;
key.modelFamilies.forEach((family) =>
available.add(family as AzureOpenAIModelFamily)
);
}
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
const models = KNOWN_OPENAI_MODELS.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "azure",
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const azureOpenAIProxy = createQueueMiddleware({
beforeProxy: addAzureKey,
proxyMiddleware: createProxyMiddleware({
target: "will be set by router",
router: (req) => {
if (!req.signedRequest) throw new Error("signedRequest not set");
const { hostname, path } = req.signedRequest;
return `https://${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
error: handleProxyError,
},
}),
});
const azureOpenAIRouter = Router();
azureOpenAIRouter.get("/v1/models", handleModelRequest);
azureOpenAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "openai",
service: "azure",
}),
azureOpenAIProxy
);
azureOpenAIRouter.post(
"/v1/images/generations",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "azure",
}),
azureOpenAIProxy
);
export const azure = azureOpenAIRouter;
+106
View File
@@ -0,0 +1,106 @@
/**
* Authenticates RisuAI.xyz users using a special x-risu-tk header provided by
* RisuAI.xyz. This lets us rate limit and limit queue concurrency properly,
* since otherwise RisuAI.xyz users share the same IP address and can't be
* distinguished.
* Contributors: @kwaroran
*/
import crypto from "crypto";
import { Request, Response, NextFunction } from "express";
import { logger } from "../logger";
const log = logger.child({ module: "check-risu-token" });
const RISUAI_PUBLIC_KEY = `
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArEXBmHQfy/YdNIu9lfNC
xHbVwb2aYx07pBEmqQJtvVEOISj80fASxg+cMJH+/0a/Z4gQgzUJl0HszRpMXAfu
wmRoetedyC/6CLraHke0Qad/AEHAKwG9A+NwsHRv/cDfP8euAr20cnOyVa79bZsl
1wlHYQQGo+ve+P/FXtjLGJ/KZYr479F5jkIRKZxPE8mRmkhAVS/u+18QM94BzfoI
0LlbwvvCHe18QSX6viDK+HsqhhyYDh+0FgGNJw6xKYLdExbQt77FSukH7NaJmVAs
kYuIJbnAGw5Oq0L6dXFW2DFwlcLz51kPVOmDc159FsQjyuPnta7NiZAANS8KM1CJ
pwIDAQAB`;
let IMPORTED_RISU_KEY: CryptoKey | null = null;
type RisuToken = { id: string; expiresIn: number };
type SignedToken = { data: RisuToken; sig: string };
(async () => {
try {
log.debug("Importing Risu public key");
IMPORTED_RISU_KEY = await crypto.subtle.importKey(
"spki",
Buffer.from(RISUAI_PUBLIC_KEY.replace(/\s/g, ""), "base64"),
{ name: "RSASSA-PKCS1-v1_5", hash: "SHA-256" },
true,
["verify"]
);
log.debug("Imported Risu public key");
} catch (err) {
log.warn({ error: err.message }, "Error importing Risu public key");
IMPORTED_RISU_KEY = null;
}
})();
export async function checkRisuToken(
req: Request,
_res: Response,
next: NextFunction
) {
let header = req.header("x-risu-tk") || null;
if (!header || !IMPORTED_RISU_KEY) {
return next();
}
try {
const { valid, data } = await validCheck(header);
if (!valid || !data) {
req.log.warn(
{ token: header, data },
"Invalid RisuAI token; using IP instead"
);
} else {
req.log.info("RisuAI token validated");
req.risuToken = String(data.id);
}
} catch (err) {
req.log.warn(
{ error: err.message },
"Error validating RisuAI token; using IP instead"
);
}
next();
}
async function validCheck(header: string) {
let tk: SignedToken;
try {
tk = JSON.parse(
Buffer.from(decodeURIComponent(header), "base64").toString("utf-8")
);
} catch (err) {
log.warn({ error: err.message }, "Provided unparseable RisuAI token");
return { valid: false };
}
const data: RisuToken = tk.data;
const sig = Buffer.from(tk.sig, "base64");
if (data.expiresIn < Math.floor(Date.now() / 1000)) {
log.warn({ token: header }, "Provided expired RisuAI token");
return { valid: false };
}
const valid = await crypto.subtle.verify(
{ name: "RSASSA-PKCS1-v1_5" },
IMPORTED_RISU_KEY!,
sig,
Buffer.from(JSON.stringify(data))
);
if (!valid) {
log.warn({ token: header }, "RisuAI token failed signature check");
}
return { valid, data };
}
@@ -1,6 +1,6 @@
import type { Request, RequestHandler } from "express"; import type { Request, RequestHandler } from "express";
import { config } from "../../config"; import { config } from "../config";
import { authenticate, getUser } from "./user-store"; import { authenticate, getUser } from "../shared/users/user-store";
const GATEKEEPER = config.gatekeeper; const GATEKEEPER = config.gatekeeper;
const PROXY_KEY = config.proxyKey; const PROXY_KEY = config.proxyKey;
@@ -46,18 +46,29 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
} }
if (GATEKEEPER === "user_token" && token) { if (GATEKEEPER === "user_token" && token) {
const user = authenticate(token, req.ip); // RisuAI users all come from a handful of aws lambda IPs so we cannot use
if (user) { // IP alone to distinguish between them and prevent usertoken sharing.
// Risu sends a signed token in the request headers with an anonymous user
// ID that we can instead use to associate requests with an individual.
const ip = req.risuToken?.length ?
`risu${req.risuToken}-${req.ip}` :
req.ip;
const { user, result } = authenticate(token, ip);
switch (result) {
case "success":
req.user = user; req.user = user;
return next(); return next();
} else { case "limited":
const maybeBannedUser = getUser(token);
if (maybeBannedUser?.disabledAt) {
return res.status(403).json({ return res.status(403).json({
error: `Forbidden: ${ error: `Forbidden: no more IPs can authenticate with this token`,
maybeBannedUser.disabledReason || "Token disabled"
}`,
}); });
case "disabled":
const bannedUser = getUser(token);
if (bannedUser?.disabledAt) {
const reason = bannedUser.disabledReason || "Token disabled";
return res.status(403).json({ error: `Forbidden: ${reason}` });
} }
} }
} }
+135
View File
@@ -0,0 +1,135 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
let modelsCache: any = null;
let modelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.googleAIKey) return { object: "list", data: [] };
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
const models = googleAIVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
permission: [],
root: "google",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "openai") {
req.log.info("Transforming Google AI response to OpenAI format");
newBody = transformGoogleAIResponse(body, req);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformGoogleAIResponse(
resBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
return {
id: "goo-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates[0].finishReason,
index: 0,
},
],
};
}
const googleAIProxy = createQueueMiddleware({
beforeProxy: addGoogleAIKey,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
const { protocol, hostname, path } = signedRequest;
return `${protocol}//${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
error: handleProxyError,
},
}),
});
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "google-ai",
service: "google-ai",
}),
googleAIProxy
);
export const googleAI = googleAIRouter;
-106
View File
@@ -1,106 +0,0 @@
/* Pretends to be a KoboldAI API endpoint and translates incoming Kobold
requests to OpenAI API equivalents. */
import { Request, Response, Router } from "express";
import http from "http";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createPreprocessorMiddleware,
finalizeBody,
languageFilter,
transformKoboldPayload,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
export const handleModelRequest = (_req: Request, res: Response) => {
res.status(200).json({ result: "Connected to OpenAI reverse proxy" });
};
export const handleSoftPromptsRequest = (_req: Request, res: Response) => {
res.status(200).json({ soft_prompts_list: [] });
};
const rewriteRequest = (
proxyReq: http.ClientRequest,
req: Request,
res: Response
) => {
req.body.stream = false;
const rewriterPipeline = [
addKey,
transformKoboldPayload,
languageFilter,
finalizeBody,
];
try {
for (const rewriter of rewriterPipeline) {
rewriter(proxyReq, req, res, {});
}
} catch (error) {
logger.error(error, "Error while executing proxy rewriter");
proxyReq.destroy(error as Error);
}
};
const koboldResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
const koboldResponse = {
results: [{ text: body.choices[0].message.content }],
model: body.model,
...(config.promptLogging && {
proxy_note: `Prompt logging is enabled on this proxy instance. See ${req.get(
"host"
)} for more information.`,
}),
};
res.send(JSON.stringify(koboldResponse));
};
const koboldOaiProxy = createProxyMiddleware({
target: "https://api.openai.com",
changeOrigin: true,
pathRewrite: {
"^/api/v1/generate": "/v1/chat/completions",
},
on: {
proxyReq: rewriteRequest,
proxyRes: createOnProxyResHandler([koboldResponseHandler]),
error: handleProxyError,
},
selfHandleResponse: true,
logger,
});
const koboldRouter = Router();
koboldRouter.get("/api/v1/model", handleModelRequest);
koboldRouter.get("/api/v1/config/soft_prompts_list", handleSoftPromptsRequest);
koboldRouter.post(
"/api/v1/generate",
ipLimiter,
createPreprocessorMiddleware({ inApi: "kobold", outApi: "openai" }),
koboldOaiProxy
);
koboldRouter.use((req, res) => {
logger.warn(`Unhandled kobold request: ${req.method} ${req.path}`);
res.status(404).json({ error: "Not found" });
});
export const kobold = koboldRouter;
+245 -106
View File
@@ -1,143 +1,282 @@
import { Request, Response } from "express"; import { Request, Response } from "express";
import http from "http";
import httpProxy from "http-proxy"; import httpProxy from "http-proxy";
import { ZodError } from "zod"; import { ZodError } from "zod";
import { generateErrorMessage } from "zod-error";
import { assertNever } from "../../shared/utils";
import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
import { sendErrorToClient } from "./response/error-generator";
import { HttpError } from "../../shared/errors";
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions"; const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete"; const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
/** Returns true if we're making a request to a completion endpoint. */ export function isTextGenerationRequest(req: Request) {
export function isCompletionRequest(req: Request) {
return ( return (
req.method === "POST" && req.method === "POST" &&
[OPENAI_CHAT_COMPLETION_ENDPOINT, ANTHROPIC_COMPLETION_ENDPOINT].some( [
(endpoint) => req.path.startsWith(endpoint) OPENAI_CHAT_COMPLETION_ENDPOINT,
) OPENAI_TEXT_COMPLETION_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint))
); );
} }
export function writeErrorResponse( export function isImageGenerationRequest(req: Request) {
return (
req.method === "POST" &&
req.path.startsWith(OPENAI_IMAGE_COMPLETION_ENDPOINT)
);
}
export function isEmbeddingsRequest(req: Request) {
return (
req.method === "POST" && req.path.startsWith(OPENAI_EMBEDDINGS_ENDPOINT)
);
}
export function sendProxyError(
req: Request, req: Request,
res: Response, res: Response,
statusCode: number, statusCode: number,
statusMessage: string,
errorPayload: Record<string, any> errorPayload: Record<string, any>
) { ) {
const errorSource = errorPayload.error?.type?.startsWith("proxy") const msg =
? "proxy" statusCode === 500
: "upstream"; ? `The proxy encountered an error while trying to process your prompt.`
: `The proxy encountered an error while trying to send your prompt to the upstream service.`;
// If we're mid-SSE stream, send a data event with the error payload and end sendErrorToClient({
// the stream. Otherwise just send a normal error response. options: {
if ( format: req.inboundApi,
res.headersSent || title: `Proxy error (HTTP ${statusCode} ${statusMessage})`,
res.getHeader("content-type") === "text/event-stream" message: `${msg} Further technical details are provided below.`,
) { obj: errorPayload,
const errorContent = reqId: req.id,
statusCode === 403 model: req.body?.model,
? JSON.stringify(errorPayload) },
: JSON.stringify(errorPayload, null, 2); req,
res,
const msg = buildFakeSseMessage( });
`${errorSource} error (${statusCode})`,
errorContent,
req
);
res.write(msg);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
if (req.debug) {
errorPayload.error.proxy_tokenizer_debug_info = req.debug;
}
res.status(statusCode).json(errorPayload);
}
} }
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => { export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
req.log.error({ err }, `Error during proxy request middleware`); req.log.error(err, `Error during http-proxy-middleware request`);
handleInternalError(err, req as Request, res as Response); classifyErrorAndSend(err, req as Request, res as Response);
}; };
export const handleInternalError = ( export const classifyErrorAndSend = (
err: Error, err: Error,
req: Request, req: Request,
res: Response res: Response
) => { ) => {
try { try {
const isZod = err instanceof ZodError; const { statusCode, statusMessage, userMessage, ...errorDetails } =
const isForbidden = err.name === "ForbiddenError"; classifyError(err);
if (isZod) { sendProxyError(req, res, statusCode, statusMessage, {
writeErrorResponse(req, res, 400, { error: { message: userMessage, ...errorDetails },
error: {
type: "proxy_validation_error",
proxy_note: `Reverse proxy couldn't validate your request when trying to transform it. Your client may be sending invalid data.`,
issues: err.issues,
stack: err.stack,
message: err.message,
},
}); });
} else if (isForbidden) { } catch (error) {
// Spoofs a vaguely threatening OpenAI error message. Only invoked by the req.log.error(error, `Error writing error response headers, giving up.`);
// block-zoomers rewriter to scare off tiktokers. res.end();
writeErrorResponse(req, res, 403, {
error: {
type: "organization_account_disabled",
code: "policy_violation",
param: null,
message: err.message,
},
});
} else {
writeErrorResponse(req, res, 500, {
error: {
type: "proxy_internal_error",
proxy_note: `Reverse proxy encountered an error before it could reach the upstream API.`,
message: err.message,
stack: err.stack,
},
});
}
} catch (e) {
req.log.error(
{ error: e },
`Error writing error response headers, giving up.`
);
} }
}; };
export function buildFakeSseMessage( function classifyError(err: Error): {
type: string, /** HTTP status code returned to the client. */
string: string, statusCode: number;
req: Request /** HTTP status message returned to the client. */
) { statusMessage: string;
let fakeEvent; /** Message displayed to the user. */
const useBackticks = !type.includes("403"); userMessage: string;
const msgContent = useBackticks /** Short error type, e.g. "proxy_validation_error". */
? `\`\`\`\n[${type}: ${string}]\n\`\`\`\n` type: string;
: `[${type}: ${string}]`; } & Record<string, any> {
const defaultError = {
if (req.inboundApi === "anthropic") { statusCode: 500,
fakeEvent = { statusMessage: "Internal Server Error",
completion: msgContent, userMessage: `Reverse proxy error: ${err.message}`,
stop_reason: type, type: "proxy_internal_error",
truncated: false, // I've never seen this be true stack: err.stack,
stop: null,
model: req.body?.model,
log_id: "proxy-req-" + req.id,
}; };
} else {
fakeEvent = { switch (err.constructor.name) {
id: "chatcmpl-" + req.id, case "HttpError":
object: "chat.completion.chunk", const statusCode = (err as HttpError).status;
created: Date.now(), return {
model: req.body?.model, statusCode,
choices: [ statusMessage: `HTTP ${statusCode} ${http.STATUS_CODES[statusCode]}`,
{ userMessage: `Reverse proxy error: ${err.message}`,
delta: { content: msgContent }, type: "proxy_http_error",
index: 0, };
finish_reason: type, case "BadRequestError":
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage: `Request is not valid. (${err.message})`,
type: "proxy_bad_request",
};
case "NotFoundError":
return {
statusCode: 404,
statusMessage: "Not Found",
userMessage: `Requested resource not found. (${err.message})`,
type: "proxy_not_found",
};
case "PaymentRequiredError":
return {
statusCode: 402,
statusMessage: "No Keys Available",
userMessage: err.message,
type: "proxy_no_keys_available",
};
case "ZodError":
const userMessage = generateErrorMessage((err as ZodError).issues, {
prefix: "Request validation failed. ",
path: { enabled: true, label: null, type: "breadcrumbs" },
code: { enabled: false },
maxErrors: 3,
transform: ({ issue, ...rest }) => {
return `At '${rest.pathComponent}': ${issue.message}`;
}, },
], });
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage,
type: "proxy_validation_error",
};
case "ZoomerForbiddenError":
// Mimics a ban notice from OpenAI, thrown when blockZoomerOrigins blocks
// a request.
return {
statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Your account has been disabled for violating our terms of service.`,
type: "organization_account_disabled",
code: "policy_violation",
};
case "ForbiddenError":
return {
statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Request is not allowed. (${err.message})`,
type: "proxy_forbidden",
};
case "QuotaExceededError":
return {
statusCode: 429,
statusMessage: "Too Many Requests",
userMessage: `You've exceeded your token quota for this model type.`,
type: "proxy_quota_exceeded",
info: (err as QuotaExceededError).quotaInfo,
};
case "Error":
if ("code" in err) {
switch (err.code) {
case "ENOTFOUND":
return {
statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy encountered a DNS error while trying to connect to the upstream service.`,
type: "proxy_network_error",
code: err.code,
};
case "ECONNREFUSED":
return {
statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy couldn't connect to the upstream service.`,
type: "proxy_network_error",
code: err.code,
};
case "ECONNRESET":
return {
statusCode: 504,
statusMessage: "Gateway Timeout",
userMessage: `Reverse proxy timed out while waiting for the upstream service to respond.`,
type: "proxy_network_error",
code: err.code,
}; };
} }
return `data: ${JSON.stringify(fakeEvent)}\n\n`; }
return defaultError;
default:
return defaultError;
}
}
export function getCompletionFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "mistral-ai":
// Can be null if the model wants to invoke tools rather than return a
// completion.
return body.choices[0].message.content || "";
case "openai-text":
return body.choices[0].text;
case "anthropic-chat":
if (!body.content) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic chat completion"
);
return "";
}
return body.content
.map(({ text, type }: { type: string; text: string }) =>
type === "text" ? text : `[Unsupported content type: ${type}]`
)
.join("\n");
case "anthropic-text":
if (!body.completion) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic text completion"
);
return "";
}
return body.completion.trim();
case "google-ai":
if ("choices" in body) {
return body.choices[0].message.content;
}
return body.candidates[0].content.parts[0].text;
case "openai-image":
return body.data?.map((item: any) => item.url).join("\n");
default:
assertNever(format);
}
}
export function getModelFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "openai-text":
case "mistral-ai":
return body.model;
case "openai-image":
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return body.model || req.body.model;
case "google-ai":
// Google doesn't confirm the model in the response.
return req.body.model;
default:
assertNever(format);
}
} }
-65
View File
@@ -1,65 +0,0 @@
import { Key, keyPool } from "../../../key-management";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/** Add a key that can service this request to the request object. */
export const addKey: ProxyRequestMiddleware = (proxyReq, req) => {
let assignedKey: Key;
if (!isCompletionRequest(req)) {
// Horrible, horrible hack to stop the proxy from complaining about clients
// not sending a model when they are requesting the list of models (which
// requires a key, but obviously not a model).
// TODO: shouldn't even proxy /models to the upstream API, just fake it
// using the models our key pool has available.
req.body.model = "gpt-3.5-turbo";
}
if (!req.inboundApi || !req.outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error(
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
err.message
);
throw err;
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
// This should happen somewhere else but addKey is guaranteed to run first.
req.isStreaming = req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
// Anthropic support has a special endpoint that accepts OpenAI-formatted
// requests and translates them into Anthropic requests. On this endpoint,
// the requested model is an OpenAI one even though we're actually sending
// an Anthropic request.
// For such cases, ignore the requested model entirely.
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.log.debug("Using an Anthropic key for an OpenAI-compatible request");
assignedKey = keyPool.get("claude-v1");
} else {
assignedKey = keyPool.get(req.body.model);
}
req.key = assignedKey;
req.log.info(
{
key: assignedKey.hash,
model: req.body?.model,
fromApi: req.inboundApi,
toApi: req.outboundApi,
},
"Assigned key to request"
);
if (assignedKey.service === "anthropic") {
proxyReq.setHeader("X-API-Key", assignedKey.key);
} else {
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
}
};
@@ -1,129 +0,0 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { countTokens } from "../../../tokenization";
import { RequestPreprocessor } from ".";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
* and outbound API format, which combined determine the size of the context.
* If the context is too large, an error is thrown.
* This preprocessor should run after any preprocessor that transforms the
* request body.
*/
export const checkContextSize: RequestPreprocessor = async (req) => {
let prompt;
switch (req.outboundApi) {
case "openai":
req.outputTokens = req.body.max_tokens;
prompt = req.body.messages;
break;
case "anthropic":
req.outputTokens = req.body.max_tokens_to_sample;
prompt = req.body.prompt;
break;
default:
throw new Error(`Unknown outbound API: ${req.outboundApi}`);
}
const result = await countTokens({ req, prompt, service: req.outboundApi });
req.promptTokens = result.token_count;
// TODO: Remove once token counting is stable
req.log.debug({ result: result }, "Counted prompt tokens.");
req.debug = req.debug ?? {};
req.debug = { ...req.debug, ...result };
maybeReassignModel(req);
validateContextSize(req);
};
function validateContextSize(req: Request) {
assertRequestHasTokenCounts(req);
const promptTokens = req.promptTokens;
const outputTokens = req.outputTokens;
const contextTokens = promptTokens + outputTokens;
const model = req.body.model;
const proxyMax =
(req.outboundApi === "openai" ? OPENAI_MAX_CONTEXT : CLAUDE_MAX_CONTEXT) ||
Number.MAX_SAFE_INTEGER;
let modelMax = 0;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
modelMax = 8192;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?(?:-100k)/)) {
modelMax = 100000;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000;
} else if (model.match(/claude-2/)) {
modelMax = 100000;
} else {
// Don't really want to throw here because I don't want to have to update
// this ASAP every time a new model is released.
req.log.warn({ model }, "Unknown model, using 100k token limit.");
modelMax = 100000;
}
const finalMax = Math.min(proxyMax, modelMax);
z.number()
.int()
.max(finalMax, {
message: `Your request exceeds the context size limit for this model or proxy. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
})
.parse(contextTokens);
req.log.debug(
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
"Prompt size validated"
);
req.debug.prompt_tokens = promptTokens;
req.debug.max_model_tokens = modelMax;
req.debug.max_proxy_tokens = proxyMax;
}
function assertRequestHasTokenCounts(
req: Request
): asserts req is Request & { promptTokens: number; outputTokens: number } {
z.object({
promptTokens: z.number().int().min(1),
outputTokens: z.number().int().min(1),
})
.nonstrict()
.parse(req);
}
/**
* For OpenAI-to-Anthropic requests, users can't specify the model, so we need
* to pick one based on the final context size. Ideally this would happen in
* the `transformOutboundPayload` preprocessor, but we don't have the context
* size at that point (and need a transformed body to calculate it).
*/
function maybeReassignModel(req: Request) {
if (req.inboundApi !== "openai" || req.outboundApi !== "anthropic") {
return;
}
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const contextSize = req.promptTokens! + req.outputTokens!;
if (contextSize > 8500) {
req.log.debug(
{ model: bigModel, contextSize },
"Using Claude 100k model for OpenAI-to-Anthropic request"
);
req.body.model = bigModel;
}
// Small model is the default already set in `transformOutboundPayload`
}
+28 -16
View File
@@ -2,21 +2,30 @@ import type { Request } from "express";
import type { ClientRequest } from "http"; import type { ClientRequest } from "http";
import type { ProxyReqCallback } from "http-proxy"; import type { ProxyReqCallback } from "http-proxy";
// Express middleware (runs before http-proxy-middleware, can be async) export { createOnProxyReqHandler } from "./onproxyreq-factory";
export { createPreprocessorMiddleware } from "./preprocess"; export {
export { checkContextSize } from "./check-context-size"; createPreprocessorMiddleware,
export { setApiFormat } from "./set-api-format"; createEmbeddingsPreprocessorMiddleware,
export { transformOutboundPayload } from "./transform-outbound-payload"; } from "./preprocessor-factory";
// HPM middleware (runs on onProxyReq, cannot be async) // Express middleware (runs before http-proxy-middleware, can be async)
export { addKey } from "./add-key"; export { addAzureKey } from "./preprocessors/add-azure-key";
export { addAnthropicPreamble } from "./add-anthropic-preamble"; export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { blockZoomerOrigins } from "./block-zoomer-origins"; export { validateContextSize } from "./preprocessors/validate-context-size";
export { finalizeBody } from "./finalize-body"; export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { languageFilter } from "./language-filter"; export { languageFilter } from "./preprocessors/language-filter";
export { limitCompletions } from "./limit-completions"; export { setApiFormat } from "./preprocessors/set-api-format";
export { removeOriginHeaders } from "./remove-origin-headers"; export { signAwsRequest } from "./preprocessors/sign-aws-request";
export { transformKoboldPayload } from "./transform-kobold-payload"; export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
export { checkModelFamily } from "./onproxyreq/check-model-family";
export { finalizeBody } from "./onproxyreq/finalize-body";
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
export { stripHeaders } from "./onproxyreq/strip-headers";
/** /**
* Middleware that runs prior to the request being handled by http-proxy- * Middleware that runs prior to the request being handled by http-proxy-
@@ -35,7 +44,7 @@ export { transformKoboldPayload } from "./transform-kobold-payload";
export type RequestPreprocessor = (req: Request) => void | Promise<void>; export type RequestPreprocessor = (req: Request) => void | Promise<void>;
/** /**
* Middleware that runs immediately before the request is sent to the API in * Callbacks that run immediately before the request is sent to the API in
* response to http-proxy-middleware's `proxyReq` event. * response to http-proxy-middleware's `proxyReq` event.
* *
* Async functions cannot be used here as HPM's event emitter is not async and * Async functions cannot be used here as HPM's event emitter is not async and
@@ -45,4 +54,7 @@ export type RequestPreprocessor = (req: Request) => void | Promise<void>;
* first attempt is rate limited and the request is automatically retried by the * first attempt is rate limited and the request is automatically retried by the
* request queue middleware. * request queue middleware.
*/ */
export type ProxyRequestMiddleware = ProxyReqCallback<ClientRequest, Request>; export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
export const forceModel = (model: string) => (req: Request) =>
void (req.body.model = model);
@@ -1,51 +0,0 @@
import { Request } from "express";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
const DISALLOWED_REGEX =
/[\u2E80-\u2E99\u2E9B-\u2EF3\u2F00-\u2FD5\u3005\u3007\u3021-\u3029\u3038-\u303B\u3400-\u4DB5\u4E00-\u9FD5\uF900-\uFA6D\uFA70-\uFAD9]/;
// Our shitty free-tier VMs will fall over if we test every single character in
// each 15k character request ten times a second. So we'll just sample 20% of
// the characters and hope that's enough.
const containsDisallowedCharacters = (text: string) => {
const sampleSize = Math.ceil(text.length * 0.2);
const sample = text
.split("")
.sort(() => 0.5 - Math.random())
.slice(0, sampleSize)
.join("");
return DISALLOWED_REGEX.test(sample);
};
/** Block requests containing too many disallowed characters. */
export const languageFilter: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!config.rejectDisallowed) {
return;
}
if (isCompletionRequest(req)) {
const combinedText = getPromptFromRequest(req);
if (containsDisallowedCharacters(combinedText)) {
logger.warn(`Blocked request containing bad characters`);
_proxyReq.destroy(new Error(config.rejectMessage));
}
}
};
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic":
return body.prompt;
case "openai":
return body.messages
.map((m: { content: string }) => m.content)
.join("\n");
default:
throw new Error(`Unknown service: ${service}`);
}
}
@@ -1,16 +0,0 @@
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/**
* Don't allow multiple completions to be requested to prevent abuse.
* OpenAI-only, Anthropic provides no such parameter.
**/
export const limitCompletions: ProxyRequestMiddleware = (_proxyReq, req) => {
if (isCompletionRequest(req) && req.outboundApi === "openai") {
const originalN = req.body?.n || 1;
req.body.n = 1;
if (originalN !== req.body.n) {
req.log.warn(`Limiting completion choices from ${originalN} to 1`);
}
}
};
@@ -0,0 +1,45 @@
import {
applyQuotaLimits,
blockZoomerOrigins,
checkModelFamily,
HPMRequestCallback,
stripHeaders,
} from "./index";
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
/**
* Returns an http-proxy-middleware request handler that runs the given set of
* onProxyReq callback functions in sequence.
*
* These will run each time a request is proxied, including on automatic retries
* by the queue after encountering a rate limit.
*/
export const createOnProxyReqHandler = ({
pipeline,
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
const callbackPipeline = [
checkModelFamily,
applyQuotaLimits,
blockZoomerOrigins,
stripHeaders,
...pipeline,
];
return (proxyReq, req, res, options) => {
// The streaming flag must be set before any other onProxyReq handler runs,
// as it may influence the behavior of subsequent handlers.
// Image generation requests can't be streamed.
// TODO: this flag is set in too many places
req.isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
try {
for (const fn of callbackPipeline) {
fn(proxyReq, req, res, options);
}
} catch (error) {
proxyReq.destroy(error);
}
};
};
@@ -1,24 +1,25 @@
import { AnthropicKey, Key } from "../../../key-management"; import { AnthropicKey, Key } from "../../../../shared/key-management";
import { isCompletionRequest } from "../common"; import { isTextGenerationRequest } from "../../common";
import { ProxyRequestMiddleware } from "."; import { HPMRequestCallback } from "../index";
/** /**
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to * Some keys require the prompt to start with `\n\nHuman:`. There is no way to
* know this without trying to send the request and seeing if it fails. If a * know this without trying to send the request and seeing if it fails. If a
* key is marked as requiring a preamble, it will be added here. * key is marked as requiring a preamble, it will be added here.
*/ */
export const addAnthropicPreamble: ProxyRequestMiddleware = ( export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
_proxyReq, if (
req !isTextGenerationRequest(req) ||
) => { req.key?.service !== "anthropic" ||
if (!isCompletionRequest(req) || req.key?.service !== "anthropic") { req.outboundApi !== "anthropic-text"
) {
return; return;
} }
let preamble = ""; let preamble = "";
let prompt = req.body.prompt; let prompt = req.body.prompt;
assertAnthropicKey(req.key); assertAnthropicKey(req.key);
if (req.key.requiresPreamble) { if (req.key.requiresPreamble && prompt) {
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:"; preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt"); req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
} }
@@ -0,0 +1,116 @@
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { HPMRequestCallback } from "../index";
import { assertNever } from "../../../../shared/utils";
export const addKey: HPMRequestCallback = (proxyReq, req) => {
let assignedKey: Key;
const { service, inboundApi, outboundApi, body } = req;
if (!inboundApi || !outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error({ inboundApi, outboundApi, path: req.path }, err.message);
throw err;
}
if (!body?.model) {
throw new Error("You must specify a model with your request.");
}
if (inboundApi === outboundApi) {
assignedKey = keyPool.get(body.model, service);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
// TODO: This whole else condition is probably no longer needed since API
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-chat":
case "anthropic-text":
assignedKey = keyPool.get("claude-v1", service);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
assignedKey = keyPool.get("dall-e-3", service);
break;
case "openai":
case "google-ai":
case "mistral-ai":
throw new Error(
`add-key should not be called for outbound API ${outboundApi}`
);
default:
assertNever(outboundApi);
}
}
req.key = assignedKey;
req.log.info(
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "mistral-ai":
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
proxyReq.setHeader("api-key", azureKey);
break;
case "aws":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
proxyReq,
req
) => {
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
req.key = key;
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -1,12 +1,11 @@
import { isCompletionRequest } from "../common"; import { HPMRequestCallback } from "../index";
import { ProxyRequestMiddleware } from ".";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(","); const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
class ForbiddenError extends Error { class ZoomerForbiddenError extends Error {
constructor(message: string) { constructor(message: string) {
super(message); super(message);
this.name = "ForbiddenError"; this.name = "ZoomerForbiddenError";
} }
} }
@@ -14,11 +13,7 @@ class ForbiddenError extends Error {
* Blocks requests from Janitor AI users with a fake, scary error message so I * Blocks requests from Janitor AI users with a fake, scary error message so I
* stop getting emails asking for tech support. * stop getting emails asking for tech support.
*/ */
export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => { export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
if (!isCompletionRequest(req)) {
return;
}
const origin = req.headers.origin || req.headers.referer; const origin = req.headers.origin || req.headers.referer;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) { if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working. // Venus-derivatives send a test prompt to check if the proxy is working.
@@ -27,7 +22,7 @@ export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => {
return; return;
} }
throw new ForbiddenError( throw new ZoomerForbiddenError(
`Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.` `Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.`
); );
} }
@@ -0,0 +1,14 @@
import { HPMRequestCallback } from "../index";
import { config } from "../../../../config";
import { ForbiddenError } from "../../../../shared/errors";
import { getModelFamilyForRequest } from "../../../../shared/models";
/**
* Ensures the selected model family is enabled by the proxy configuration.
**/
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
const family = getModelFamilyForRequest(req);
if (!config.allowedModelFamilies.includes(family)) {
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
}
};
@@ -1,9 +1,18 @@
import { fixRequestBody } from "http-proxy-middleware"; import { fixRequestBody } from "http-proxy-middleware";
import type { ProxyRequestMiddleware } from "."; import type { HPMRequestCallback } from "../index";
/** Finalize the rewritten request body. Must be the last rewriter. */ /** Finalize the rewritten request body. Must be the last rewriter. */
export const finalizeBody: ProxyRequestMiddleware = (proxyReq, req) => { export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) { if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
const updatedBody = JSON.stringify(req.body); const updatedBody = JSON.stringify(req.body);
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody)); proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
(req as any).rawBody = Buffer.from(updatedBody); (req as any).rawBody = Buffer.from(updatedBody);
@@ -0,0 +1,26 @@
import type { HPMRequestCallback } from "../index";
/**
* For AWS/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
proxyReq.path = req.signedRequest.path;
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
proxyReq.setHeader(key, value);
});
// Don't use fixRequestBody here because it adds a content-length header.
// Amazon doesn't want that and it breaks the signature.
proxyReq.write(req.signedRequest.body);
};
@@ -0,0 +1,16 @@
import { HPMRequestCallback } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
**/
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
proxyReq.removeHeader("cf-connecting-ip");
proxyReq.removeHeader("forwarded");
proxyReq.removeHeader("true-client-ip");
proxyReq.removeHeader("x-forwarded-for");
proxyReq.removeHeader("x-real-ip");
};
@@ -1,36 +0,0 @@
import { RequestHandler } from "express";
import { handleInternalError } from "../common";
import {
RequestPreprocessor,
checkContextSize,
setApiFormat,
transformOutboundPayload,
} from ".";
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
additionalPreprocessors?: RequestPreprocessor[]
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
transformOutboundPayload,
checkContextSize,
...(additionalPreprocessors ?? []),
];
return async function executePreprocessors(req, res, next) {
try {
for (const preprocessor of preprocessors) {
await preprocessor(req);
}
next();
} catch (error) {
req.log.error(error, "Error while executing request preprocessor");
handleInternalError(error as Error, req, res);
}
};
};
@@ -0,0 +1,158 @@
import { RequestHandler } from "express";
import { ZodIssue } from "zod";
import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common";
import {
RequestPreprocessor,
validateContextSize,
countPromptTokens,
setApiFormat,
transformOutboundPayload,
languageFilter,
} from ".";
type RequestPreprocessorOptions = {
/**
* Functions to run before the request body is transformed between API
* formats. Use this to change the behavior of the transformation, such as for
* endpoints which can accept multiple API formats.
*/
beforeTransform?: RequestPreprocessor[];
/**
* Functions to run after the request body is transformed and token counts are
* assigned. Use this to perform validation or other actions that depend on
* the request body being in the final API format.
*/
afterTransform?: RequestPreprocessor[];
};
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
*
* These run first in the request lifecycle, a single time per request before it
* is added to the request queue. They aren't run again if the request is
* re-attempted after a rate limit.
*
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
* It will run after these preprocessors, but before the request is sent to
* http-proxy-middleware.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
{ beforeTransform, afterTransform }: RequestPreprocessorOptions = {}
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
...(beforeTransform ?? []),
transformOutboundPayload,
countPromptTokens,
languageFilter,
...(afterTransform ?? []),
validateContextSize,
];
return async (...args) => executePreprocessors(preprocessors, args);
};
/**
* Returns a middleware function that specifically prepares requests for
* OpenAI's embeddings API. Tokens are not counted because embeddings requests
* are basically free.
*/
export const createEmbeddingsPreprocessorMiddleware = (): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat({ inApi: "openai", outApi: "openai", service: "openai" }),
(req) => void (req.promptTokens = req.outputTokens = 0),
];
return async (...args) => executePreprocessors(preprocessors, args);
};
async function executePreprocessors(
preprocessors: RequestPreprocessor[],
[req, res, next]: Parameters<RequestHandler>
) {
handleTestMessage(req, res, next);
if (res.headersSent) return;
try {
for (const preprocessor of preprocessors) {
await preprocessor(req);
}
next();
} catch (error) {
if (error.constructor.name === "ZodError") {
const msg = error?.issues
?.map((issue: ZodIssue) => issue.message)
.join("; ");
req.log.info(msg, "Prompt validation failed.");
} else {
req.log.error(error, "Error while executing request preprocessor");
}
// If the requested has opted into streaming, the client probably won't
// handle a non-eventstream response, but we haven't initialized the SSE
// stream yet as that is typically done later by the request queue. We'll
// do that here and then call classifyErrorAndSend to use the streaming
// error handler.
const { stream } = req.body;
const isStreaming = stream === "true" || stream === true;
if (isStreaming && !res.headersSent) {
initializeSseStream(res);
}
classifyErrorAndSend(error as Error, req, res);
}
}
/**
* Bypasses the API call and returns a test message response if the request body
* is a known test message from SillyTavern. Otherwise these messages just waste
* API request quota and confuse users when the proxy is busy, because ST always
* makes them with `stream: false` (which is not allowed when the proxy is busy)
*/
const handleTestMessage: RequestHandler = (req, res) => {
const { method, body } = req;
if (method !== "POST") {
return;
}
if (isTestMessage(body)) {
req.log.info({ body }, "Received test message. Skipping API call.");
res.json({
id: "test-message",
object: "chat.completion",
created: Date.now(),
model: body.model,
// openai chat
choices: [
{
message: { role: "assistant", content: "Hello!" },
finish_reason: "stop",
index: 0,
},
],
// anthropic text
completion: "Hello!",
// anthropic chat
content: [{ type: "text", text: "Hello!" }],
proxy_note:
"This response was generated by the proxy's test message handler and did not go to the API.",
});
}
};
function isTestMessage(body: any) {
const { messages, prompt } = body;
if (messages) {
return (
messages.length === 1 &&
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
prompt?.startsWith("Hi\n\n")
);
}
}
@@ -0,0 +1,78 @@
import {
APIFormat,
AzureOpenAIKey,
keyPool,
} from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addAzureKey: RequestPreprocessor = (req) => {
const validAPIs: APIFormat[] = ["openai", "openai-image"];
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
validAPIs.includes(api)
);
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
req.key = keyPool.get(model, "azure");
req.body.model = model;
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
// OpenAI wants logprobs: true/false and top_logprobs: number
// Azure seems to just want to combine them into logprobs: number
// if (typeof req.body.logprobs === "boolean") {
// req.body.logprobs = req.body.top_logprobs || undefined;
// delete req.body.top_logprobs
// }
// Temporarily just disabling logprobs for Azure because their model support
// is random: `This model does not support the 'logprobs' parameter.`
delete req.body.logprobs;
delete req.body.top_logprobs;
}
req.log.info(
{ key: req.key.hash, model },
"Assigned Azure OpenAI key to request"
);
const cred = req.key as AzureOpenAIKey;
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
const operation =
req.outboundApi === "openai" ? "/chat/completions" : "/images/generations";
const apiVersion =
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
path: `/openai/deployments/${deploymentId}${operation}?api-version=${apiVersion}`,
headers: {
["host"]: `${resourceName}.openai.azure.com`,
["content-type"]: "application/json",
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
};
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
const [resourceName, deploymentId, apiKey] = key.key.split(":");
if (!resourceName || !deploymentId || !apiKey) {
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
}
return { resourceName, deploymentId, apiKey };
}
@@ -0,0 +1,40 @@
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addGoogleAIKey: RequestPreprocessor = (req) => {
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!apisValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model;
req.key = keyPool.get(model, "google-ai");
req.log.info(
{ key: req.key.hash, model },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
req.isStreaming = req.isStreaming || req.body.stream;
delete req.body.stream;
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(req.body),
};
};
@@ -0,0 +1,37 @@
import { hasAvailableQuota } from "../../../../shared/users/user-store";
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
import { HPMRequestCallback } from "../index";
export class QuotaExceededError extends Error {
public quotaInfo: any;
constructor(message: string, quotaInfo: any) {
super(message);
this.name = "QuotaExceededError";
this.quotaInfo = quotaInfo;
}
}
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
const subjectToQuota =
isTextGenerationRequest(req) || isImageGenerationRequest(req);
if (!subjectToQuota || !req.user) return;
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
if (
!hasAvailableQuota({
userToken: req.user.token,
model: req.body.model,
api: req.outboundApi,
requested: requestedTokens,
})
) {
throw new QuotaExceededError(
"You have exceeded your proxy token quota for this model.",
{
quota: req.user.tokenLimits,
used: req.user.tokenCounts,
requested: requestedTokens,
}
);
}
};
@@ -0,0 +1,70 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import {
AnthropicChatMessage,
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-support";
/**
* Given a request with an already-transformed body, counts the number of
* tokens and assigns the count to the request.
*/
export const countPromptTokens: RequestPreprocessor = async (req) => {
const service = req.outboundApi;
let result;
switch (service) {
case "openai": {
req.outputTokens = req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
const prompt: AnthropicChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-text": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "google-ai": {
req.outputTokens = req.body.generationConfig.maxOutputTokens;
const prompt: GoogleAIChatMessage[] = req.body.contents;
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai": {
req.outputTokens = req.body.max_tokens;
const prompt: MistralAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-image": {
req.outputTokens = 1;
result = await countTokens({ req, service });
break;
}
default:
assertNever(service);
}
req.promptTokens = result.token_count;
req.log.debug({ result: result }, "Counted prompt tokens.");
req.tokenizerInfo = req.tokenizerInfo ?? {};
req.tokenizerInfo = { ...req.tokenizerInfo, ...result };
};
@@ -0,0 +1,83 @@
import { Request } from "express";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { BadRequestError } from "../../../../shared/errors";
import {
MistralAIChatMessage,
OpenAIChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-support";
const rejectedClients = new Map<string, number>();
setInterval(() => {
rejectedClients.forEach((count, ip) => {
if (count > 0) {
rejectedClients.set(ip, Math.floor(count / 2));
} else {
rejectedClients.delete(ip);
}
});
}, 30000);
/**
* Block requests containing blacklisted phrases. Repeated rejections from the
* same IP address will be throttled.
*/
export const languageFilter: RequestPreprocessor = async (req) => {
if (!config.rejectPhrases.length) return;
const prompt = getPromptFromRequest(req);
const match = config.rejectPhrases.find((phrase) =>
prompt.match(new RegExp(phrase, "i"))
);
if (match) {
const ip = req.ip;
const rejections = (rejectedClients.get(req.ip) || 0) + 1;
const delay = Math.min(60000, Math.pow(2, rejections - 1) * 1000);
rejectedClients.set(ip, rejections);
req.log.warn(
{ match, ip, rejections, delay },
"Prompt contains rejected phrase"
);
await new Promise((resolve) => {
req.res!.once("close", resolve);
setTimeout(resolve, delay);
});
throw new BadRequestError(config.rejectMessage);
}
};
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "anthropic-text":
return body.prompt;
case "openai":
case "mistral-ai":
return body.messages
.map((msg: OpenAIChatMessage | MistralAIChatMessage) => {
const text = Array.isArray(msg.content)
? msg.content
.map((c) => {
if ("text" in c) return c.text;
})
.join()
: msg.content;
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "openai-text":
case "openai-image":
return body.prompt;
case "google-ai":
return body.prompt.text;
default:
assertNever(service);
}
}
@@ -0,0 +1,16 @@
import { Request } from "express";
import { APIFormat } from "../../../../shared/key-management";
import { LLMService } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
export const setApiFormat = (api: {
inApi: Request["inboundApi"];
outApi: APIFormat;
service: LLMService;
}): RequestPreprocessor => {
return function configureRequestApiFormat(req) {
req.inboundApi = api.inApi;
req.outboundApi = api.outApi;
req.service = api.service;
};
};
@@ -0,0 +1,129 @@
import express from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-support";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
* This happens AFTER request transformation.
*/
export const signAwsRequest: RequestPreprocessor = async (req) => {
const { model, stream } = req.body;
req.key = keyPool.get(model, "aws");
req.isStreaming = stream === true || stream === "true";
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
if (req.outboundApi === "anthropic-text") {
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.body.prompt = preamble + req.body.prompt;
}
// AWS uses mostly the same parameters as Anthropic, with a few removed params
// and much stricter validation on unused parameters. Rather than treating it
// as a separate schema we will use the anthropic ones and strip the unused
// parameters.
// TODO: This should happen in transform-outbound-payload.ts
let strippedParams: Record<string, unknown>;
if (req.outboundApi === "anthropic-chat") {
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
} else {
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
req.headers["anthropic-version"] = "2023-06-01";
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(strippedParams),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
const { key, body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
req.signedRequest = await sign(newRequest, getCredentialParts(req));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
@@ -0,0 +1,57 @@
import {
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-support";
import { BadRequestError } from "../../../../shared/errors";
import {
isImageGenerationRequest,
isTextGenerationRequest,
} from "../../common";
import { RequestPreprocessor } from "../index";
import { fixMistralPrompt } from "../../../../shared/api-support/kits/mistral-ai/request-transformers";
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed || notTransformable) return;
// TODO: this should be an APIFormatTransformer
if (req.inboundApi === "mistral-ai") {
const messages = req.body.messages;
req.body.messages = fixMistralPrompt(messages);
req.log.info(
{ old: messages.length, new: req.body.messages.length },
"Fixed Mistral prompt"
);
}
if (sameService) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request");
req.body = await transFn(req);
return;
}
throw new BadRequestError(
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
@@ -0,0 +1,120 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
const GOOGLE_AI_MAX_CONTEXT = 32000;
const MISTRAL_AI_MAX_CONTENT = 32768;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
* and outbound API format, which combined determine the size of the context.
* If the context is too large, an error is thrown.
* This preprocessor should run after any preprocessor that transforms the
* request body.
*/
export const validateContextSize: RequestPreprocessor = async (req) => {
assertRequestHasTokenCounts(req);
const promptTokens = req.promptTokens;
const outputTokens = req.outputTokens;
const contextTokens = promptTokens + outputTokens;
const model = req.body.model;
let proxyMax: number;
switch (req.outboundApi) {
case "openai":
case "openai-text":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic-chat":
case "anthropic-text":
proxyMax = CLAUDE_MAX_CONTEXT;
break;
case "google-ai":
proxyMax = GOOGLE_AI_MAX_CONTEXT;
break;
case "mistral-ai":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
return;
default:
assertNever(req.outboundApi);
}
proxyMax ||= Number.MAX_SAFE_INTEGER;
let modelMax: number;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-(0125|1106)(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
modelMax = 8192;
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?-100k/)) {
modelMax = 100000;
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000;
} else if (model.match(/^claude-2\.0/)) {
modelMax = 100000;
} else if (model.match(/^claude-2/)) {
modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^gemini-\d{3}$/)) {
modelMax = GOOGLE_AI_MAX_CONTEXT;
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
modelMax = MISTRAL_AI_MAX_CONTENT;
} else if (model.match(/^anthropic\.claude-3-sonnet/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
modelMax = 100000;
} else {
req.log.warn({ model }, "Unknown model, using 200k token limit.");
modelMax = 200000;
}
const finalMax = Math.min(proxyMax, modelMax);
z.object({
tokens: z
.number()
.int()
.max(finalMax, {
message: `Your request exceeds the context size limit. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
}),
}).parse({ tokens: contextTokens });
req.log.debug(
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
"Prompt size validated"
);
req.tokenizerInfo.prompt_tokens = promptTokens;
req.tokenizerInfo.completion_tokens = outputTokens;
req.tokenizerInfo.max_model_tokens = modelMax;
req.tokenizerInfo.max_proxy_tokens = proxyMax;
};
function assertRequestHasTokenCounts(
req: Request
): asserts req is Request & { promptTokens: number; outputTokens: number } {
z.object({
promptTokens: z.number().int().min(1),
outputTokens: z.number().int().min(1),
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
@@ -1,10 +0,0 @@
import { ProxyRequestMiddleware } from ".";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
**/
export const removeOriginHeaders: ProxyRequestMiddleware = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
};
@@ -1,13 +0,0 @@
import { Request } from "express";
import { AIService } from "../../../key-management";
import { RequestPreprocessor } from ".";
export const setApiFormat = (api: {
inApi: Request["inboundApi"];
outApi: AIService;
}): RequestPreprocessor => {
return (req) => {
req.inboundApi = api.inApi;
req.outboundApi = api.outApi;
};
};
@@ -1,112 +0,0 @@
/**
* Transforms a KoboldAI payload into an OpenAI payload.
* @deprecated Kobold input format isn't supported anymore as all popular
* frontends support reverse proxies or changing their base URL. It adds too
* many edge cases to be worth maintaining and doesn't work with newer features.
*/
import { logger } from "../../../logger";
import type { ProxyRequestMiddleware } from ".";
// Kobold requests look like this:
// body:
// {
// prompt: "Aqua is character from Konosuba anime. Aqua is a goddess, before life in the Fantasy World, she was a goddess of water who guided humans to the afterlife. Aqua looks like young woman with beauty no human could match. Aqua has light blue hair, blue eyes, slim figure, long legs, wide hips, blue waist-long hair that is partially tied into a loop with a spherical clip. Aqua's measurements are 83-56-83 cm. Aqua's height 157cm. Aqua wears sleeveless dark-blue dress with white trimmings, extremely short dark blue miniskirt, green bow around her chest with a blue gem in the middle, detached white sleeves with blue and golden trimmings, thigh-high blue heeled boots over white stockings with blue trimmings. Aqua is very strong in water magic, but a little stupid, so she does not always use it to the place. Aqua is high-spirited, cheerful, carefree. Aqua rarely thinks about the consequences of her actions and always acts or speaks on her whims. Because very easy to taunt Aqua with jeers or lure her with praises.\n" +
// "Aqua's personality: high-spirited, likes to party, carefree, cheerful.\n" +
// 'Circumstances and context of the dialogue: Aqua is standing in the city square and is looking for new followers\n' +
// 'This is how Aqua should talk\n' +
// 'You: Hi Aqua, I heard you like to spend time in the pub.\n' +
// "Aqua: *excitedly* Oh my goodness, yes! I just love spending time at the pub! It's so much fun to talk to all the adventurers and hear about their exciting adventures! And you are?\n" +
// "You: I'm a new here and I wanted to ask for your advice.\n" +
// 'Aqua: *giggles* Oh, advice! I love giving advice! And in gratitude for that, treat me to a drink! *gives signals to the bartender*\n' +
// 'This is how Aqua should talk\n' +
// 'You: Hello\n' +
// "Aqua: *excitedly* Hello there, dear! Are you new to Axel? Don't worry, I, Aqua the goddess of water, am here to help you! Do you need any assistance? And may I say, I look simply radiant today! *strikes a pose and looks at you with puppy eyes*\n" +
// '\n' +
// 'Then the roleplay chat between You and Aqua begins.\n' +
// "Aqua: *She is in the town square of a city named Axel. It's morning on a Saturday and she suddenly notices a person who looks like they don't know what they're doing. She approaches him and speaks* \n" +
// '\n' +
// `"Are you new here? Do you need help? Don't worry! I, Aqua the Goddess of Water, shall help you! Do I look beautiful?" \n` +
// '\n' +
// '*She strikes a pose and looks at him with puppy eyes.*\n' +
// 'You: test\n' +
// 'You: test\n' +
// 'You: t\n' +
// 'You: test\n',
// use_story: false,
// use_memory: false,
// use_authors_note: false,
// use_world_info: false,
// max_context_length: 2048,
// max_length: 180,
// rep_pen: 1.1,
// rep_pen_range: 1024,
// rep_pen_slope: 0.9,
// temperature: 0.65,
// tfs: 0.9,
// top_a: 0,
// top_k: 0,
// top_p: 0.9,
// typical: 1,
// sampler_order: [
// 6, 0, 1, 2,
// 3, 4, 5
// ],
// singleline: false
// }
// OpenAI expects this body:
// { model: 'gpt-3.5-turbo', temperature: 0.65, top_p: 0.9, max_tokens: 180, messages }
// there's also a frequency_penalty but it's not clear how that maps to kobold's
// rep_pen.
// messages is an array of { role: "system" | "assistant" | "user", content: ""}
// kobold only sends us the entire prompt. we can try to split the last two
// lines into user and assistant messages, but that's not always correct. For
// now it will have to do.
/**
* Transforms a KoboldAI payload into an OpenAI payload.
* @deprecated Probably doesn't work anymore, idk.
**/
export const transformKoboldPayload: ProxyRequestMiddleware = (
_proxyReq,
req
) => {
if (req.inboundApi !== "kobold") {
throw new Error("transformKoboldPayload called for non-kobold request.");
}
const { body } = req;
const { prompt, max_length, rep_pen, top_p, temperature } = body;
if (!max_length) {
logger.error("KoboldAI request missing max_length.");
throw new Error("You must specify a max_length parameter.");
}
const promptLines = prompt.split("\n");
// The very last line is the contentless "Assistant: " hint to the AI.
// Tavern just leaves an empty line, Agnai includes the AI's name.
const assistantHint = promptLines.pop();
// The second-to-last line is the user's prompt, generally.
const userPrompt = promptLines.pop();
const messages = [
{ role: "system", content: promptLines.join("\n") },
{ role: "user", content: userPrompt },
{ role: "assistant", content: assistantHint },
];
// Kobold doesn't select a model. If the addKey rewriter assigned us a GPT-4
// key, use that. Otherwise, use GPT-3.5-turbo.
const model = req.key!.isGpt4 ? "gpt-4" : "gpt-3.5-turbo";
const newBody = {
model,
temperature,
top_p,
frequency_penalty: rep_pen, // remove this if model turns schizo
max_tokens: max_length,
messages,
};
req.body = newBody;
};
@@ -1,172 +0,0 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { OpenAIPromptMessage } from "../../../tokenization";
import { isCompletionRequest } from "../common";
import { RequestPreprocessor } from ".";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
// https://console.anthropic.com/docs/api/reference#-v1-complete
const AnthropicV1CompleteSchema = z.object({
model: z.string().regex(/^claude-/, "Model must start with 'claude-'"),
prompt: z.string({
required_error:
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
}),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
stop_sequences: z.array(z.string()).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional().default(-1),
top_p: z.coerce.number().optional().default(-1),
metadata: z.any().optional(),
});
// https://platform.openai.com/docs/api-reference/chat/create
const OpenAIV1ChatCompletionSchema = z.object({
model: z.string().regex(/^gpt/, "Model must start with 'gpt-'"),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
name: z.string().optional(),
}),
{
required_error:
"No prompt found. Are you sending an Anthropic-formatted request to the OpenAI endpoint?",
invalid_type_error:
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
}
),
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
n: z
.literal(1, {
errorMap: () => ({
message: "You may only request a single completion at a time.",
}),
})
.optional(),
stream: z.boolean().optional().default(false),
stop: z.union([z.string(), z.array(z.string())]).optional(),
max_tokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, OPENAI_OUTPUT_MAX)),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
logit_bias: z.any().optional(),
user: z.string().optional(),
});
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable = !isCompletionRequest(req);
if (alreadyTransformed || notTransformable) {
return;
}
if (sameService) {
const validator =
req.outboundApi === "openai"
? OpenAIV1ChatCompletionSchema
: AnthropicV1CompleteSchema;
const result = validator.safeParse(req.body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.body = await openaiToAnthropic(req.body, req);
return;
}
throw new Error(
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
);
};
async function openaiToAnthropic(body: any, req: Request) {
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body: req.body },
"Invalid OpenAI-to-Anthropic request"
);
throw result.error;
}
// Anthropic has started versioning their API, indicated by an HTTP header
// `anthropic-version`. The new June 2023 version is not backwards compatible
// with our OpenAI-to-Anthropic transformations so we need to explicitly
// request the older version for now. 2023-01-01 will be removed in September.
// https://docs.anthropic.com/claude/reference/versioning
req.headers["anthropic-version"] = "2023-01-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudePrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
...rest,
// Model may be overridden in `calculate-context-size.ts` to avoid having
// a circular dependency (`calculate-context-size.ts` needs an already-
// transformed request body to count tokens, but this function would like
// to know the count to select a model).
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
};
}
export function openAIMessagesToClaudePrompt(messages: OpenAIPromptMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${m.name?.trim() ? `(as ${m.name}) ` : ""}${
m.content
}`;
})
.join("") + "\n\nAssistant:"
);
}
@@ -0,0 +1,339 @@
import express from "express";
import { APIFormat } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
import { initializeSseStream } from "../../../shared/streaming";
function getMessageContent({
title,
message,
obj,
}: {
title: string;
message: string;
obj?: Record<string, any>;
}) {
/*
Constructs a Markdown-formatted message that renders semi-nicely in most chat
frontends. For example:
**Proxy error (HTTP 404 Not Found)**
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
***
*The requested Claude model might not exist, or the key might not be provisioned for it.*
```
{
"type": "error",
"error": {
"type": "not_found_error",
"message": "model: some-invalid-model-id",
},
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
}
```
*/
const note = obj?.proxy_note || obj?.error?.message || "";
const friendlyMessage = note ? `${message}\n\n***\n\n*${note}*` : message;
const details = JSON.parse(JSON.stringify(obj ?? {}));
let stack = "";
if (details.stack) {
stack = `\n\nInclude this trace when reporting an issue.\n\`\`\`\n${details.stack}\n\`\`\``;
delete details.stack;
}
return `\n\n**${title}**\n${friendlyMessage}${
obj ? `\n\`\`\`\n${JSON.stringify(obj, null, 2)}\n\`\`\`\n${stack}` : ""
}`;
}
type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: object;
reqId: string | number | object;
model?: string;
statusCode?: number;
};
export function tryInferFormat(body: any): APIFormat | "unknown" {
if (typeof body !== "object" || !body.model) {
return "unknown";
}
if (body.model.includes("gpt")) {
return "openai";
}
if (body.model.includes("mistral")) {
return "mistral-ai";
}
if (body.model.includes("claude")) {
return body.messages?.length ? "anthropic-chat" : "anthropic-text";
}
if (body.model.includes("gemini")) {
return "google-ai";
}
return "unknown";
}
export function sendErrorToClient({
options,
req,
res,
}: {
options: ErrorGeneratorOptions;
req: express.Request;
res: express.Response;
}) {
const { format: inputFormat } = options;
// This is an error thrown before we know the format of the request, so we
// can't send a response in the format the client expects.
const format =
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
if (format === "unknown") {
return res.status(options.statusCode || 400).json({
error: options.message,
details: options.obj,
});
}
const completion = buildSpoofedCompletion({ ...options, format });
const event = buildSpoofedSSE({ ...options, format });
const isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
if (isStreaming) {
if (!res.headersSent) {
initializeSseStream(res);
}
res.write(event);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
res.status(200).json(completion);
}
}
/**
* Returns a non-streaming completion object that looks like it came from the
* service that the request is being proxied to. Used to send error messages to
* the client and have them look like normal responses, for clients with poor
* error handling.
*/
export function buildSpoofedCompletion({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
switch (format) {
case "openai":
case "mistral-ai":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "openai-text":
return {
id: "error-" + id,
object: "text_completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
};
case "anthropic-text":
return {
id: "error-" + id,
type: "completion",
completion: content,
stop_reason: title,
stop: null,
model,
};
case "anthropic-chat":
return {
id: "error-" + id,
type: "message",
role: "assistant",
content: [{ type: "text", text: content }],
model,
stop_reason: title,
stop_sequence: null,
};
case "google-ai":
// TODO: Native Google AI non-streaming responses are not supported, this
// is an untested guess at what the response should look like.
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
case "openai-image":
return obj;
default:
assertNever(format);
}
}
/**
* Returns an SSE message that looks like a completion event for the service
* that the request is being proxied to. Used to send error messages to the
* client in the middle of a streaming request.
*/
export function buildSpoofedSSE({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
let event;
switch (format) {
case "openai":
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
model,
};
break;
case "anthropic-text":
event = {
completion: content,
stop_reason: title,
truncated: false,
stop: null,
model,
log_id: "proxy-req-" + id,
};
break;
case "anthropic-chat":
event = {
type: "content_block_delta",
index: 0,
delta: { type: "text_delta", text: content },
};
break;
case "google-ai":
return JSON.stringify({
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
});
case "openai-image":
return JSON.stringify(obj);
default:
assertNever(format);
}
if (format === "anthropic-text") {
return (
["event: completion", `data: ${JSON.stringify(event)}`].join("\n") +
"\n\n"
);
}
// ugh.
if (format === "anthropic-chat") {
return (
[
[
"event: message_start",
`data: ${JSON.stringify({
type: "message_start",
message: {
id: "error-" + id,
type: "message",
role: "assistant",
content: [],
model,
},
})}`,
].join("\n"),
[
"event: content_block_start",
`data: ${JSON.stringify({
type: "content_block_start",
index: 0,
content_block: { type: "text", text: "" },
})}`,
].join("\n"),
["event: content_block_delta", `data: ${JSON.stringify(event)}`].join(
"\n"
),
[
"event: content_block_stop",
`data: ${JSON.stringify({ type: "content_block_stop", index: 0 })}`,
].join("\n"),
[
"event: message_delta",
`data: ${JSON.stringify({
type: "message_delta",
delta: { stop_reason: title, stop_sequence: null, usage: null },
})}`,
],
[
"event: message_stop",
`data: ${JSON.stringify({ type: "message_stop" })}`,
].join("\n"),
].join("\n\n") + "\n\n"
);
}
return `data: ${JSON.stringify(event)}\n\n`;
}
@@ -1,293 +1,181 @@
import { Request, Response } from "express"; import express from "express";
import * as http from "http"; import { pipeline, Readable, Transform } from "stream";
import { buildFakeSseMessage } from "../common"; import StreamArray from "stream-json/streamers/StreamArray";
import { RawResponseBodyHandler, decodeResponseBody } from "."; import { StringDecoder } from "string_decoder";
import { promisify } from "util";
import { APIFormat, keyPool } from "../../../shared/key-management";
import {
copySseResponseHeaders,
initializeSseStream,
} from "../../../shared/streaming";
import type { logger } from "../../../logger";
import { enqueue } from "../../queue";
import { decodeResponseBody, RawResponseBodyHandler, RetryableError } from ".";
import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import { BadRequestError } from "../../../shared/errors";
type OpenAiChatCompletionResponse = { const pipelineAsync = promisify(pipeline);
id: string;
object: string;
created: number;
model: string;
choices: {
message: { role: string; content: string };
finish_reason: string | null;
index: number;
}[];
};
type AnthropicCompletionResponse = {
completion: string;
stop_reason: string;
truncated: boolean;
stop: any;
model: string;
log_id: string;
exception: null;
};
/** /**
* Consume the SSE stream and forward events to the client. Once the stream is * `handleStreamedResponse` consumes and transforms a streamed response from the
* stream is closed, resolve with the full response body so that subsequent * upstream service, forwarding events to the client in their requested format.
* middleware can work with it. * After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response.
* *
* Typically we would only need of the raw response handlers to execute, but * In the event of an error, the request's streaming flag is unset and the non-
* in the event a streamed request results in a non-200 response, we need to * streaming response handler is called instead.
* fall back to the non-streaming response handler so that the error handler
* can inspect the error response.
* *
* Currently most frontends don't support Anthropic streaming, so users can opt * If the error is retryable, that handler will re-enqueue the request and also
* to send requests for Claude models via an endpoint that accepts OpenAI- * reset the streaming flag. Unfortunately the streaming flag is set and unset
* compatible requests and translates the received Anthropic SSE events into * in multiple places, so it's hard to keep track of.
* OpenAI ones, essentially pretending to be an OpenAI streaming API.
*/ */
export const handleStreamedResponse: RawResponseBodyHandler = async ( export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes, proxyRes,
req, req,
res res
) => { ) => {
// If these differ, the user is using the OpenAI-compatibile endpoint, so const { hash } = req.key!;
// we need to translate the SSE events into OpenAI completion events for their
// frontend.
if (!req.isStreaming) { if (!req.isStreaming) {
const err = new Error( throw new Error("handleStreamedResponse called for non-streaming request.");
"handleStreamedResponse called for non-streaming request."
);
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
} }
const key = req.key!; if (proxyRes.statusCode! > 201) {
if (proxyRes.statusCode !== 200) {
// Ensure we use the non-streaming middleware stack since we won't be
// getting any events.
req.isStreaming = false; req.isStreaming = false;
req.log.warn( req.log.warn(
{ statusCode: proxyRes.statusCode, key: key.hash }, { statusCode: proxyRes.statusCode, key: hash },
`Streaming request returned error status code. Falling back to non-streaming response handler.` `Streaming request returned error status code. Falling back to non-streaming response handler.`
); );
return decodeResponseBody(proxyRes, req, res); return decodeResponseBody(proxyRes, req, res);
} }
return new Promise((resolve, reject) => { req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
req.log.info({ key: key.hash }, `Starting to proxy SSE stream.`);
// Queued streaming requests will already have a connection open and headers // Typically, streaming will have already been initialized by the request
// sent due to the heartbeat handler. In that case we can just start // queue to send heartbeat pings.
// streaming the response without sending headers.
if (!res.headersSent) { if (!res.headersSent) {
res.setHeader("Content-Type", "text/event-stream"); copySseResponseHeaders(proxyRes, res);
res.setHeader("Cache-Control", "no-cache"); initializeSseStream(res);
res.setHeader("Connection", "keep-alive");
res.setHeader("X-Accel-Buffering", "no");
copyHeaders(proxyRes, res);
res.flushHeaders();
} }
const originalEvents: string[] = []; const prefersNativeEvents = req.inboundApi === req.outboundApi;
let partialMessage = ""; const streamOptions = {
let lastPosition = 0; contentType: proxyRes.headers["content-type"],
api: req.outboundApi,
type ProxyResHandler<T extends unknown> = (...args: T[]) => void; logger: req.log,
function withErrorHandling<T extends unknown>(fn: ProxyResHandler<T>) {
return (...args: T[]) => {
try {
fn(...args);
} catch (error) {
proxyRes.emit("error", error);
}
}; };
}
proxyRes.on( // Decoder turns the raw response stream into a stream of events in some
"data", // format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
withErrorHandling((chunk: Buffer) => { const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// We may receive multiple (or partial) SSE messages in a single chunk, // Adapter transforms the decoded events into server-sent events.
// so we need to buffer and emit seperate stream events for full const adapter = new SSEStreamAdapter(streamOptions);
// messages so we can parse/transform them properly. // Aggregator compiles all events into a single response object.
const str = chunk.toString(); const aggregator = new EventAggregator({ format: req.outboundApi });
// Transformer converts server-sent events from one vendor's API message
// Anthropic uses CRLF line endings (out-of-spec btw) // format to another.
const fullMessages = (partialMessage + str).split(/\r?\n\r?\n/); const transformer = new SSEMessageTransformer({
partialMessage = fullMessages.pop() || ""; inputFormat: req.outboundApi, // The format of the upstream service's events
outputFormat: req.inboundApi, // The format the client requested
for (const message of fullMessages) { inputApiVersion: String(req.headers["anthropic-version"]),
proxyRes.emit("full-sse-event", message); logger: req.log,
} requestId: String(req.id),
requestedModel: req.body.model,
}) })
); .on("originalMessage", (msg: string) => {
if (prefersNativeEvents) res.write(msg);
proxyRes.on( })
"full-sse-event", .on("data", (msg) => {
withErrorHandling((data) => { if (!prefersNativeEvents) res.write(`data: ${JSON.stringify(msg)}\n\n`);
originalEvents.push(data); aggregator.addEvent(msg);
const { event, position } = transformEvent({
data,
requestApi: req.inboundApi,
responseApi: req.outboundApi,
lastPosition,
}); });
lastPosition = position;
res.write(event + "\n\n");
})
);
proxyRes.on( try {
"end", await Promise.race([
withErrorHandling(() => { handleAbortedStream(req, res),
let finalBody = convertEventsToFinalResponse(originalEvents, req); pipelineAsync(proxyRes, decoder, adapter, transformer),
req.log.info({ key: key.hash }, `Finished proxying SSE stream.`); ]);
req.log.debug(`Finished proxying SSE stream.`);
res.end(); res.end();
resolve(finalBody); return aggregator.getFinalResponse();
}) } catch (err) {
if (err instanceof RetryableError) {
keyPool.markRateLimited(req.key!);
req.log.warn(
{ key: req.key!.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error during streaming response.`
); );
req.retryCount++;
proxyRes.on("error", (err) => { await enqueue(req);
req.log.error({ error: err, key: key.hash }, `Mid-stream error.`); } else if (err instanceof BadRequestError) {
const fakeErrorEvent = buildFakeSseMessage( sendErrorToClient({
"mid-stream-error", req,
err.message, res,
req options: {
); format: req.inboundApi,
res.write(`data: ${JSON.stringify(fakeErrorEvent)}\n\n`); title: "Proxy streaming error (Bad Request)",
res.write("data: [DONE]\n\n"); message: `The API returned an error while streaming your request. Your prompt might not be formatted correctly.\n\n*${err.message}*`,
reqId: req.id,
model: req.body?.model,
},
});
} else {
const { message, stack, lastEvent } = err;
const eventText = JSON.stringify(lastEvent, null, 2) ?? "undefined";
const errorEvent = buildSpoofedSSE({
format: req.inboundApi,
title: "Proxy stream error",
message: "An unexpected error occurred while streaming the response.",
obj: { message, stack, lastEvent: eventText },
reqId: req.id,
model: req.body?.model,
});
res.write(errorEvent);
res.write(`data: [DONE]\n\n`);
res.end(); res.end();
reject(err); }
}); throw err;
}); }
}; };
/** function handleAbortedStream(req: express.Request, res: express.Response) {
* Transforms SSE events from the given response API into events compatible with return new Promise<void>((resolve) =>
* the API requested by the client. res.on("close", () => {
*/ if (!res.writableEnded) {
function transformEvent({ req.log.info("Client prematurely closed connection during stream.");
data, }
requestApi, resolve();
responseApi, })
lastPosition, );
}: { }
data: string;
requestApi: string; function getDecoder(options: {
responseApi: string; input: Readable;
lastPosition: number; api: APIFormat;
logger: typeof logger;
contentType?: string;
}) { }) {
if (requestApi === responseApi) { const { api, contentType, input, logger } = options;
return { position: -1, event: data }; if (contentType?.includes("application/vnd.amazon.eventstream")) {
} return getAwsEventStreamDecoder({ input, logger });
} else if (api === "google-ai") {
if (requestApi === "anthropic" && responseApi === "openai") { return StreamArray.withParser();
throw new Error(`Anthropic -> OpenAI streaming not implemented.`); } else {
} // Passthrough stream, but ensures split chunks across multi-byte characters
// are handled correctly.
// Anthropic sends the full completion so far with each event whereas OpenAI const stringDecoder = new StringDecoder("utf8");
// only sends the delta. To make the SSE events compatible, we remove return new Transform({
// everything before `lastPosition` from the completion. readableObjectMode: true,
if (!data.startsWith("data:")) { writableObjectMode: false,
return { position: lastPosition, event: data }; transform(chunk, _encoding, callback) {
} const text = stringDecoder.write(chunk);
if (text) this.push(text);
if (data.startsWith("data: [DONE]")) { callback();
return { position: lastPosition, event: data };
}
const event = JSON.parse(data.slice("data: ".length));
const newEvent = {
id: "ant-" + event.log_id,
object: "chat.completion.chunk",
created: Date.now(),
model: event.model,
choices: [
{
index: 0,
delta: { content: event.completion?.slice(lastPosition) },
finish_reason: event.stop_reason,
}, },
], });
};
return {
position: event.completion.length,
event: `data: ${JSON.stringify(newEvent)}`,
};
}
/** Copy headers, excluding ones we're already setting for the SSE response. */
function copyHeaders(proxyRes: http.IncomingMessage, res: Response) {
const toOmit = [
"content-length",
"content-encoding",
"transfer-encoding",
"content-type",
"connection",
"cache-control",
];
for (const [key, value] of Object.entries(proxyRes.headers)) {
if (!toOmit.includes(key) && value) {
res.setHeader(key, value);
}
} }
} }
/**
* Converts the list of incremental SSE events into an object that resembles a
* full, non-streamed response from the API so that subsequent middleware can
* operate on it as if it were a normal response.
* Events are expected to be in the format they were received from the API.
*/
function convertEventsToFinalResponse(events: string[], req: Request) {
if (req.outboundApi === "openai") {
let response: OpenAiChatCompletionResponse = {
id: "",
object: "",
created: 0,
model: "",
choices: [],
};
response = events.reduce((acc, event, i) => {
if (!event.startsWith("data: ")) {
return acc;
}
if (event === "data: [DONE]") {
return acc;
}
const data = JSON.parse(event.slice("data: ".length));
if (i === 0) {
return {
id: data.id,
object: data.object,
created: data.created,
model: data.model,
choices: [
{
message: { role: data.choices[0].delta.role, content: "" },
index: 0,
finish_reason: null,
},
],
};
}
if (data.choices[0].delta.content) {
acc.choices[0].message.content += data.choices[0].delta.content;
}
acc.choices[0].finish_reason = data.choices[0].finish_reason;
return acc;
}, response);
return response;
}
if (req.outboundApi === "anthropic") {
/*
* Full complete responses from Anthropic are conveniently just the same as
* the final SSE event before the "DONE" event, so we can reuse that
*/
const lastEvent = events[events.length - 2].toString();
const data = JSON.parse(lastEvent.slice(lastEvent.indexOf("data: ") + "data: ".length));
const response: AnthropicCompletionResponse = {
...data,
log_id: req.id,
};
return response;
}
throw new Error("If you get this, something is fucked");
}
+469 -145
View File
@@ -3,14 +3,27 @@ import { Request, Response } from "express";
import * as http from "http"; import * as http from "http";
import util from "util"; import util from "util";
import zlib from "zlib"; import zlib from "zlib";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { keyPool } from "../../../key-management";
import { enqueue, trackWaitTime } from "../../queue"; import { enqueue, trackWaitTime } from "../../queue";
import { incrementPromptCount } from "../../auth/user-store"; import { HttpError } from "../../../shared/errors";
import { isCompletionRequest, writeErrorResponse } from "../common"; import { keyPool } from "../../../shared/key-management";
import { getOpenAIModelFamily } from "../../../shared/models";
import { countTokens } from "../../../shared/tokenization";
import {
incrementPromptCount,
incrementTokenCount,
} from "../../../shared/users/user-store";
import { assertNever } from "../../../shared/utils";
import { refundLastAttempt } from "../../rate-limit";
import {
getCompletionFromBody,
isImageGenerationRequest,
isTextGenerationRequest,
sendProxyError,
} from "../common";
import { handleStreamedResponse } from "./handle-streamed-response"; import { handleStreamedResponse } from "./handle-streamed-response";
import { logPrompt } from "./log-prompt"; import { logPrompt } from "./log-prompt";
import { saveImage } from "./save-image";
import { config } from "../../../config";
const DECODER_MAP = { const DECODER_MAP = {
gzip: util.promisify(zlib.gunzip), gzip: util.promisify(zlib.gunzip),
@@ -24,7 +37,7 @@ const isSupportedContentEncoding = (
return contentEncoding in DECODER_MAP; return contentEncoding in DECODER_MAP;
}; };
class RetryableError extends Error { export class RetryableError extends Error {
constructor(message: string) { constructor(message: string) {
super(message); super(message);
this.name = "RetryableError"; this.name = "RetryableError";
@@ -74,7 +87,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
? handleStreamedResponse ? handleStreamedResponse
: decodeResponseBody; : decodeResponseBody;
let lastMiddlewareName = initialHandler.name; let lastMiddleware = initialHandler.name;
try { try {
const body = await initialHandler(proxyRes, req, res); const body = await initialHandler(proxyRes, req, res);
@@ -84,61 +97,70 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
if (req.isStreaming) { if (req.isStreaming) {
// `handleStreamedResponse` writes to the response and ends it, so // `handleStreamedResponse` writes to the response and ends it, so
// we can only execute middleware that doesn't write to the response. // we can only execute middleware that doesn't write to the response.
middlewareStack.push(trackRateLimit, incrementKeyUsage, logPrompt); middlewareStack.push(
trackRateLimit,
countResponseTokens,
incrementUsage,
logPrompt
);
} else { } else {
middlewareStack.push( middlewareStack.push(
trackRateLimit, trackRateLimit,
addProxyInfo,
handleUpstreamErrors, handleUpstreamErrors,
incrementKeyUsage, countResponseTokens,
incrementUsage,
copyHttpHeaders, copyHttpHeaders,
saveImage,
logPrompt, logPrompt,
...apiMiddleware ...apiMiddleware
); );
} }
for (const middleware of middlewareStack) { for (const middleware of middlewareStack) {
lastMiddlewareName = middleware.name; lastMiddleware = middleware.name;
await middleware(proxyRes, req, res, body); await middleware(proxyRes, req, res, body);
} }
trackWaitTime(req); trackWaitTime(req);
} catch (error: any) { } catch (error) {
// Hack: if the error is a retryable rate-limit error, the request has // Hack: if the error is a retryable rate-limit error, the request has
// been re-enqueued and we can just return without doing anything else. // been re-enqueued and we can just return without doing anything else.
if (error instanceof RetryableError) { if (error instanceof RetryableError) {
return; return;
} }
const errorData = { // Already logged and responded to the client by handleUpstreamErrors
error: error.stack, if (error instanceof HttpError) {
thrownBy: lastMiddlewareName, if (!res.writableEnded) res.end();
key: req.key?.hash,
};
const message = `Error while executing proxy response middleware: ${lastMiddlewareName} (${error.message})`;
if (res.headersSent) {
req.log.error(errorData, message);
// This should have already been handled by the error handler, but
// just in case...
if (!res.writableEnded) {
res.end();
}
return; return;
} }
logger.error(errorData, message);
const { stack, message } = error;
const info = { stack, lastMiddleware, key: req.key?.hash };
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
if (res.headersSent) {
req.log.error(info, description);
if (!res.writableEnded) res.end();
return;
} else {
req.log.error(info, description);
res res
.status(500) .status(500)
.json({ error: "Internal server error", proxy_note: message }); .json({ error: "Internal server error", proxy_note: description });
}
} }
}; };
}; };
function reenqueueRequest(req: Request) { async function reenqueueRequest(req: Request) {
req.log.info( req.log.info(
{ key: req.key?.hash, retryCount: req.retryCount }, { key: req.key?.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error` `Re-enqueueing request due to retryable error`
); );
req.retryCount++; req.retryCount++;
enqueue(req); await enqueue(req);
} }
/** /**
@@ -158,7 +180,7 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
throw err; throw err;
} }
const promise = new Promise<string>((resolve, reject) => { return new Promise<string>((resolve, reject) => {
let chunks: Buffer[] = []; let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk)); proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => { proxyRes.on("end", async () => {
@@ -168,15 +190,17 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
if (contentEncoding) { if (contentEncoding) {
if (isSupportedContentEncoding(contentEncoding)) { if (isSupportedContentEncoding(contentEncoding)) {
const decoder = DECODER_MAP[contentEncoding]; const decoder = DECODER_MAP[contentEncoding];
// @ts-ignore - started failing after upgrading TypeScript, don't care
// as it was never a problem.
body = await decoder(body); body = await decoder(body);
} else { } else {
const errorMessage = `Proxy received response with unsupported content-encoding: ${contentEncoding}`; const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
logger.warn({ contentEncoding, key: req.key?.hash }, errorMessage); req.log.warn({ contentEncoding, key: req.key?.hash }, error);
writeErrorResponse(req, res, 500, { sendProxyError(req, res, 500, "Internal Server Error", {
error: errorMessage, error,
contentEncoding, contentEncoding,
}); });
return reject(errorMessage); return reject(error);
} }
} }
@@ -186,25 +210,29 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
return resolve(json); return resolve(json);
} }
return resolve(body.toString()); return resolve(body.toString());
} catch (error: any) { } catch (e) {
const errorMessage = `Proxy received response with invalid JSON: ${error.message}`; const msg = `Proxy received response with invalid JSON: ${e.message}`;
logger.warn({ error, key: req.key?.hash }, errorMessage); req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
writeErrorResponse(req, res, 500, { error: errorMessage }); sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
return reject(errorMessage); return reject(msg);
} }
}); });
}); });
return promise;
}; };
// TODO: This is too specific to OpenAI's error responses. type ProxiedErrorPayload = {
error?: Record<string, any>;
message?: string;
proxy_note?: string;
};
/** /**
* Handles non-2xx responses from the upstream service. If the proxied response * Handles non-2xx responses from the upstream service. If the proxied response
* is an error, this will respond to the client with an error payload and throw * is an error, this will respond to the client with an error payload and throw
* an error to stop the middleware stack. * an error to stop the middleware stack.
* On 429 errors, if request queueing is enabled, the request will be silently * On 429 errors, if request queueing is enabled, the request will be silently
* re-enqueued. Otherwise, the request will be rejected with an error payload. * re-enqueued. Otherwise, the request will be rejected with an error payload.
* @throws {Error} On HTTP error status code from upstream service * @throws {HttpError} On HTTP error status code from upstream service
*/ */
const handleUpstreamErrors: ProxyResHandlerWithBody = async ( const handleUpstreamErrors: ProxyResHandlerWithBody = async (
proxyRes, proxyRes,
@@ -213,85 +241,163 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
body body
) => { ) => {
const statusCode = proxyRes.statusCode || 500; const statusCode = proxyRes.statusCode || 500;
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
if (statusCode < 400) { if (statusCode < 400) {
return; return;
} }
let errorPayload: Record<string, any>; let errorPayload: ProxiedErrorPayload;
// Subtract 1 from available keys because if this message is being shown, const tryAgainMessage = keyPool.available(req.body?.model)
// it's because the key is about to be disabled. ? `There may be more keys available for this model; try again in a few seconds.`
const availableKeys = keyPool.available(req.outboundApi) - 1; : "There are no more keys available for this model.";
const tryAgainMessage = Boolean(availableKeys)
? `There are ${availableKeys} more keys available; try your request again.`
: "There are no more keys available.";
try { try {
if (typeof body === "object") { assertJsonResponse(body);
errorPayload = body; errorPayload = body;
} else { } catch (parseError) {
throw new Error("Received unparsable error response from upstream."); // Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
} const hash = req.key?.hash;
} catch (parseError: any) { req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
const statusMessage = proxyRes.statusMessage || "Unknown error";
// Likely Bad Gateway or Gateway Timeout from reverse proxy/load balancer
logger.warn(
{ statusCode, statusMessage, key: req.key?.hash },
parseError.message
);
const errorObject = { const errorObject = {
statusCode,
statusMessage: proxyRes.statusMessage,
error: parseError.message, error: parseError.message,
proxy_note: `This is likely a temporary error with the upstream service.`, status: statusCode,
statusMessage,
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
}; };
writeErrorResponse(req, res, statusCode, errorObject);
throw new Error(parseError.message); sendProxyError(req, res, statusCode, statusMessage, errorObject);
throw new HttpError(statusCode, parseError.message);
} }
logger.warn( const errorType =
{ errorPayload.error?.code ||
statusCode, errorPayload.error?.type ||
type: errorPayload.error?.code, getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
errorPayload,
key: req.key?.hash, req.log.warn(
}, { statusCode, type: errorType, errorPayload, key: req.key?.hash },
`Received error response from upstream. (${proxyRes.statusMessage})` `Received error response from upstream. (${proxyRes.statusMessage})`
); );
const service = req.key!.service;
if (service === "aws") {
// Try to standardize the error format for AWS
errorPayload.error = { message: errorPayload.message, type: errorType };
delete errorPayload.message;
}
if (statusCode === 400) { if (statusCode === 400) {
// Bad request (likely prompt is too long) // Bad request. For OpenAI, this is usually due to prompt length.
if (req.outboundApi === "openai") { // For Anthropic, this is usually due to missing preamble.
errorPayload.proxy_note = `Upstream service rejected the request as invalid. Your prompt may be too long for ${req.body?.model}.`; switch (service) {
} else if (req.outboundApi === "anthropic") { case "openai":
maybeHandleMissingPreambleError(req, errorPayload); case "google-ai":
case "mistral-ai":
case "azure":
const filteredCodes = ["content_policy_violation", "content_filter"];
if (filteredCodes.includes(errorPayload.error?.code)) {
errorPayload.proxy_note = `Request was filtered by the upstream API's content moderation system. Modify your prompt and try again.`;
refundLastAttempt(req);
} else if (errorPayload.error?.code === "billing_hard_limit_reached") {
// For some reason, some models return this 400 error instead of the
// same 429 billing error that other models return.
await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
} else {
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
}
break;
case "anthropic":
case "aws":
await handleAnthropicBadRequestError(req, errorPayload);
break;
default:
assertNever(service);
} }
} else if (statusCode === 401) { } else if (statusCode === 401) {
// Key is invalid or was revoked // Key is invalid or was revoked
keyPool.disable(req.key!, "revoked"); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`; errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
} else if (statusCode === 403) {
if (service === "anthropic") {
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
return;
}
switch (errorType) {
case "UnrecognizedClientException":
// Key is invalid.
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
break;
case "AccessDeniedException":
const isModelAccessError =
errorPayload.error?.message?.includes(`specified model ID`);
if (!isModelAccessError) {
req.log.error(
{ key: req.key?.hash, model: req.body?.model },
"Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions."
);
keyPool.disable(req.key!, "revoked");
}
errorPayload.proxy_note = `API key doesn't have access to the requested resource. Model ID: ${req.body?.model}`;
break;
default:
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
}
} else if (statusCode === 429) { } else if (statusCode === 429) {
// OpenAI uses this for a bunch of different rate-limiting scenarios. switch (service) {
if (req.outboundApi === "openai") { case "openai":
handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload); await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
} else if (req.outboundApi === "anthropic") { break;
handleAnthropicRateLimitError(req, errorPayload); case "anthropic":
await handleAnthropicRateLimitError(req, errorPayload);
break;
case "aws":
await handleAwsRateLimitError(req, errorPayload);
break;
case "azure":
case "mistral-ai":
await handleAzureRateLimitError(req, errorPayload);
break;
case "google-ai":
await handleGoogleAIRateLimitError(req, errorPayload);
break;
default:
assertNever(service);
} }
} else if (statusCode === 404) { } else if (statusCode === 404) {
// Most likely model not found // Most likely model not found
if (req.outboundApi === "openai") { switch (service) {
// TODO: this probably doesn't handle GPT-4-32k variants properly if the case "openai":
// proxy has keys for both the 8k and 32k context models at the same time.
if (errorPayload.error?.code === "model_not_found") { if (errorPayload.error?.code === "model_not_found") {
if (req.key!.isGpt4) { const requestedModel = req.body.model;
errorPayload.proxy_note = `Assigned key isn't provisioned for the GPT-4 snapshot you requested. Try again to get a different key, or use Turbo.`; const modelFamily = getOpenAIModelFamily(requestedModel);
} else { errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
errorPayload.proxy_note = `No model was found for this key.`; req.log.error(
{ key: req.key?.hash, model: requestedModel, modelFamily },
"Prompt was routed to a key that does not support the requested model."
);
} }
} break;
} else if (req.outboundApi === "anthropic") { case "anthropic":
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`; errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
break;
case "google-ai":
errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
break;
case "mistral-ai":
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
break;
case "aws":
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
break;
case "azure":
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
break;
default:
assertNever(service);
} }
} else { } else {
errorPayload.proxy_note = `Unrecognized error from upstream service.`; errorPayload.proxy_note = `Unrecognized error from upstream service.`;
@@ -305,104 +411,279 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
); );
} }
writeErrorResponse(req, res, statusCode, errorPayload); sendProxyError(req, res, statusCode, statusMessage, errorPayload);
throw new Error(errorPayload.error?.message); // This is bubbled up to onProxyRes's handler for logging but will not trigger
// a write to the response as `sendProxyError` has just done that.
throw new HttpError(statusCode, errorPayload.error?.message);
}; };
/** async function handleAnthropicBadRequestError(
* This is a workaround for a very strange issue where certain API keys seem to
* enforce more strict input validation than others -- specifically, they will
* require a `\n\nHuman:` prefix on the prompt, perhaps to prevent the key from
* being used as a generic text completion service and to enforce the use of
* the chat RLHF. This is not documented anywhere, and it's not clear why some
* keys enforce this and others don't.
* This middleware checks for that specific error and marks the key as being
* one that requires the prefix, and then re-enqueues the request.
* The exact error is:
* ```
* {
* "error": {
* "type": "invalid_request_error",
* "message": "prompt must start with \"\n\nHuman:\" turn"
* }
* }
* ```
*/
function maybeHandleMissingPreambleError(
req: Request, req: Request,
errorPayload: Record<string, any> errorPayload: ProxiedErrorPayload
) { ) {
if ( const { error } = errorPayload;
errorPayload.error?.type === "invalid_request_error" && const isMissingPreamble = error?.message.startsWith(
errorPayload.error?.message === 'prompt must start with "\n\nHuman:" turn' `prompt must start with "\n\nHuman:" turn`
) { );
// Some keys mandate a \n\nHuman: preamble, which we can add and retry
if (isMissingPreamble) {
req.log.warn( req.log.warn(
{ key: req.key?.hash }, { key: req.key?.hash },
"Request failed due to missing preamble. Key will be marked as such for subsequent requests." "Request failed due to missing preamble. Key will be marked as such for subsequent requests."
); );
keyPool.update(req.key!, { requiresPreamble: true }); keyPool.update(req.key!, { requiresPreamble: true });
reenqueueRequest(req); await reenqueueRequest(req);
throw new RetryableError("Claude request re-enqueued to add preamble."); throw new RetryableError("Claude request re-enqueued to add preamble.");
} else {
errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`;
} }
// {"type":"error","error":{"type":"invalid_request_error","message":"Usage blocked until 2024-03-01T00:00:00+00:00 due to user specified spend limits."}}
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
const isOverQuota =
error?.message?.match(/usage blocked until/i) ||
error?.message?.match(/credit balance is too low/i);
if (isOverQuota) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has hit spending limit and will be disabled."
);
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
return;
}
const isDisabled = error?.message?.match(/organization has been disabled/i);
if (isDisabled) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has been disabled."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been disabled. ${error?.message}`;
return;
}
errorPayload.proxy_note = `Unrecognized error from the API. (${error?.message})`;
} }
function handleAnthropicRateLimitError( async function handleAnthropicRateLimitError(
req: Request, req: Request,
errorPayload: Record<string, any> errorPayload: ProxiedErrorPayload
) { ) {
if (errorPayload.error?.type === "rate_limit_error") { if (errorPayload.error?.type === "rate_limit_error") {
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
reenqueueRequest(req); await reenqueueRequest(req);
throw new RetryableError("Claude rate-limited request re-enqueued."); throw new RetryableError("Claude rate-limited request re-enqueued.");
} else { } else {
errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`; errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from the API.`;
} }
} }
function handleOpenAIRateLimitError( async function handleAwsRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const errorType = errorPayload.error?.type;
switch (errorType) {
case "ThrottlingException":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("AWS rate-limited request re-enqueued.");
case "ModelNotReadyException":
errorPayload.proxy_note = `The requested model is overloaded. Try again in a few seconds.`;
break;
default:
errorPayload.proxy_note = `Unrecognized rate limit error from AWS. (${errorType})`;
}
}
async function handleOpenAIRateLimitError(
req: Request, req: Request,
tryAgainMessage: string, tryAgainMessage: string,
errorPayload: Record<string, any> errorPayload: ProxiedErrorPayload
): Record<string, any> { ): Promise<Record<string, any>> {
const type = errorPayload.error?.type; const type = errorPayload.error?.type;
if (type === "insufficient_quota") { switch (type) {
case "insufficient_quota":
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
// Billing quota exceeded (key is dead, disable it) // Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!, "quota"); keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
} else if (type === "access_terminated") { break;
case "access_terminated":
// Account banned (key is dead, disable it) // Account banned (key is dead, disable it)
keyPool.disable(req.key!, "revoked"); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`;
} else if (type === "billing_not_active") { break;
// Billing is not active (key is dead, disable it) case "billing_not_active":
keyPool.disable(req.key!, "revoked"); // Key valid but account billing is delinquent
errorPayload.proxy_note = `Assigned key was deactivated by OpenAI. ${tryAgainMessage}`; keyPool.disable(req.key!, "quota");
} else if (type === "requests" || type === "tokens") { errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. ${tryAgainMessage}`;
// Per-minute request or token rate limit is exceeded, which we can retry break;
case "requests":
case "tokens":
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
// I'm aware this is confusing -- throwing this class of error will cause if (errorPayload.error?.message?.match(/on requests per day/)) {
// the proxy response handler to return without terminating the request, // This key has a very low rate limit, so we can't re-enqueue it.
// so that it can be placed back in the queue. errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
reenqueueRequest(req); break;
}
// Per-minute request or token rate limit is exceeded, which we can retry
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued."); throw new RetryableError("Rate-limited request re-enqueued.");
} else { // WIP/nonfunctional
// OpenAI probably overloaded // case "tokens_usage_based":
// // Weird new rate limit type that seems limited to preview models.
// // Distinct from `tokens` type. Can be per-minute or per-day.
//
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
// // 10k tokens per minute is problematic, because this is much less than
// // GPT4-Turbo's max context size for a single prompt and is effectively a
// // cap on the max context size for just that key+model, which the app is
// // not able to deal with.
//
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
// // been used today, the max context for that key becomes 50k tokens until
// // the next day and becomes progressively smaller as more tokens are used.
//
// // To work around these keys we will first retry the request a few times.
// // After that we will reject the request, and if it's a per-day limit we
// // will also disable the key.
//
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
//
// const regex =
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
// const [, period, limit, used, requested] =
// errorPayload.error?.message?.match(regex) || [];
//
// req.log.warn(
// { key: req.key?.hash, period, limit, used, requested },
// "Received `tokens_usage_based` rate limit error from OpenAI."
// );
//
// if (!period || !limit || !requested) {
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
// break;
// }
//
// if (req.retryCount < 2) {
// await reenqueueRequest(req);
// throw new RetryableError("Rate-limited request re-enqueued.");
// }
//
// if (period === "min") {
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
// } else {
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
// }
//
// keyPool.markRateLimited(req.key!);
// break;
default:
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`; errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
break;
} }
return errorPayload; return errorPayload;
} }
const incrementKeyUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => { async function handleAzureRateLimitError(
if (isCompletionRequest(req)) { req: Request,
keyPool.incrementPrompt(req.key!); errorPayload: ProxiedErrorPayload
) {
const code = errorPayload.error?.code;
switch (code) {
case "429":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
default:
errorPayload.proxy_note = `Unrecognized rate limit error from Azure (${code}). Please report this.`;
break;
}
}
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
async function handleGoogleAIRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const status = errorPayload.error?.status;
switch (status) {
case "RESOURCE_EXHAUSTED":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
default:
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
break;
}
}
const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
if (isTextGenerationRequest(req) || isImageGenerationRequest(req)) {
const model = req.body.model;
const tokensUsed = req.promptTokens! + req.outputTokens!;
req.log.debug(
{
model,
tokensUsed,
promptTokens: req.promptTokens,
outputTokens: req.outputTokens,
},
`Incrementing usage for model`
);
keyPool.incrementUsage(req.key!, model, tokensUsed);
if (req.user) { if (req.user) {
incrementPromptCount(req.user.token); incrementPromptCount(req.user.token);
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
} }
} }
}; };
const countResponseTokens: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
body
) => {
if (req.outboundApi === "openai-image") {
req.outputTokens = req.promptTokens;
req.promptTokens = 0;
return;
}
// This function is prone to breaking if the upstream API makes even minor
// changes to the response format, especially for SSE responses. If you're
// seeing errors in this function, check the reassembled response body from
// handleStreamedResponse to see if the upstream API has changed.
try {
assertJsonResponse(body);
const service = req.outboundApi;
const completion = getCompletionFromBody(req, body);
const tokens = await countTokens({ req, completion, service });
req.log.debug(
{ service, tokens, prevOutputTokens: req.outputTokens },
`Counted tokens for completion`
);
if (req.tokenizerInfo) {
req.tokenizerInfo.completion_tokens = tokens;
}
req.outputTokens = tokens.token_count;
} catch (error) {
req.log.warn(
error,
"Error while counting completion tokens; assuming `max_output_tokens`"
);
// req.outputTokens will already be set to `max_output_tokens` from the
// prompt counting middleware, so we don't need to do anything here.
}
};
const trackRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => { const trackRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
keyPool.updateRateLimits(req.key!, proxyRes.headers); keyPool.updateRateLimits(req.key!, proxyRes.headers);
}; };
@@ -426,3 +707,46 @@ const copyHttpHeaders: ProxyResHandlerWithBody = async (
res.setHeader(key, proxyRes.headers[key] as string); res.setHeader(key, proxyRes.headers[key] as string);
}); });
}; };
/**
* Injects metadata into the response, such as the tokenizer used, logging
* status, upstream API endpoint used, and whether the input prompt was modified
* or transformed.
* Only used for non-streaming requests.
*/
const addProxyInfo: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
const { service, inboundApi, outboundApi, tokenizerInfo } = req;
const native = inboundApi === outboundApi;
const info: any = {
logged: config.promptLogging,
tokens: tokenizerInfo,
service,
in_api: inboundApi,
out_api: outboundApi,
prompt_transformed: !native,
};
if (req.query?.debug?.length) {
info.final_request_body = req.signedRequest?.body || req.body;
}
if (typeof body === "object") {
body.proxy = info;
}
};
function getAwsErrorType(header: string | string[] | undefined) {
const val = String(header).match(/^(\w+):?/)?.[1];
return val || String(header);
}
function assertJsonResponse(body: any): asserts body is Record<string, any> {
if (typeof body !== "object") {
throw new Error("Expected response to be an object");
}
}
+90 -39
View File
@@ -1,10 +1,21 @@
import { Request } from "express"; import { Request } from "express";
import { config } from "../../../config"; import { config } from "../../../config";
import { AIService } from "../../../key-management"; import { logQueue } from "../../../shared/prompt-logging";
import { logQueue } from "../../../prompt-logging"; import {
import { isCompletionRequest } from "../common"; getCompletionFromBody,
getModelFromBody,
isImageGenerationRequest,
isTextGenerationRequest,
} from "../common";
import { ProxyResHandlerWithBody } from "."; import { ProxyResHandlerWithBody } from ".";
import { logger } from "../../../logger"; import { assertNever } from "../../../shared/utils";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../shared/api-support";
import { APIFormat } from "../../../shared/key-management";
/** If prompt logging is enabled, enqueues the prompt for logging. */ /** If prompt logging is enabled, enqueues the prompt for logging. */
export const logPrompt: ProxyResHandlerWithBody = async ( export const logPrompt: ProxyResHandlerWithBody = async (
@@ -20,59 +31,99 @@ export const logPrompt: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (!isCompletionRequest(req)) { const loggable =
return; isTextGenerationRequest(req) || isImageGenerationRequest(req);
} if (!loggable) return;
const promptPayload = getPromptForRequest(req); const promptPayload = getPromptForRequest(req, responseBody);
const promptFlattened = flattenMessages(promptPayload); const promptFlattened = flattenMessages(promptPayload, req.outboundApi);
const response = getResponseForService({ const response = getCompletionFromBody(req, responseBody);
service: req.outboundApi, const model = getModelFromBody(req, responseBody);
body: responseBody,
});
logQueue.enqueue({ logQueue.enqueue({
endpoint: req.inboundApi, endpoint: req.inboundApi,
promptRaw: JSON.stringify(promptPayload), promptRaw: JSON.stringify(promptPayload),
promptFlattened, promptFlattened,
model: response.model, // may differ from the requested model model,
response: response.completion, response,
}); });
}; };
type OaiMessage = { type OaiImageResult = {
role: "user" | "assistant" | "system"; prompt: string;
content: string; size: string;
style: string;
quality: string;
revisedPrompt?: string;
}; };
const getPromptForRequest = (req: Request): string | OaiMessage[] => { const getPromptForRequest = (
req: Request,
responseBody: Record<string, any>
):
| string
| OpenAIChatMessage[]
| AnthropicChatMessage[]
| MistralAIChatMessage[]
| OaiImageResult => {
// Since the prompt logger only runs after the request has been proxied, we // Since the prompt logger only runs after the request has been proxied, we
// can assume the body has already been transformed to the target API's // can assume the body has already been transformed to the target API's
// format. // format.
if (req.outboundApi === "anthropic") { switch (req.outboundApi) {
return req.body.prompt; case "openai":
} else { case "mistral-ai":
case "anthropic-chat":
return req.body.messages; return req.body.messages;
case "openai-text":
return req.body.prompt;
case "openai-image":
return {
prompt: req.body.prompt,
size: req.body.size,
style: req.body.style,
quality: req.body.quality,
revisedPrompt: responseBody.data[0].revised_prompt,
};
case "anthropic-text":
return req.body.prompt;
case "google-ai":
return req.body.prompt.text;
default:
assertNever(req.outboundApi);
} }
}; };
const flattenMessages = (messages: string | OaiMessage[]): string => { const flattenMessages = (
if (typeof messages === "string") { val:
return messages.trim(); | string
| OaiImageResult
| OpenAIChatMessage[]
| AnthropicChatMessage[]
| MistralAIChatMessage[],
format: APIFormat
): string => {
if (typeof val === "string") {
return val.trim();
} }
return messages.map((m) => `${m.role}: ${m.content}`).join("\n"); if (format === "anthropic-chat") {
}; return flattenAnthropicMessages(val as AnthropicChatMessage[]);
const getResponseForService = ({
service,
body,
}: {
service: AIService;
body: Record<string, any>;
}): { completion: string; model: string } => {
if (service === "anthropic") {
return { completion: body.completion.trim(), model: body.model };
} else {
return { completion: body.choices[0].message.content, model: body.model };
} }
if (Array.isArray(val)) {
return val
.map(({ content, role }) => {
const text = Array.isArray(content)
? content
.map((c) => {
if ("text" in c) return c.text;
if ("image_url" in c) return "(( Attached Image ))";
if ("source" in c) return "(( Attached Image ))";
return "(( Unsupported Content ))";
})
.join("\n")
: content;
return `${role}: ${text}`;
})
.join("\n");
}
return val.prompt.trim();
}; };
@@ -0,0 +1,33 @@
import { ProxyResHandlerWithBody } from "./index";
import {
mirrorGeneratedImage,
OpenAIImageGenerationResult,
} from "../../../shared/file-storage/mirror-generated-image";
export const saveImage: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
body
) => {
if (req.outboundApi !== "openai-image") {
return;
}
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
if (body.data) {
const prompt = body.data[0].revised_prompt ?? req.body.prompt;
const res = await mirrorGeneratedImage(
req,
prompt,
body as OpenAIImageGenerationResult
);
req.log.info(
{ urls: res.data.map((item) => item.url) },
"Saved generated image to user_content"
);
}
};
@@ -0,0 +1,49 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicChatCompletionResponse = {
id: string;
type: "message";
role: "assistant";
content: { type: "text"; text: string }[];
model: string;
stop_reason: string | null;
stop_sequence: string | null;
usage: { input_tokens: number; output_tokens: number };
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Anthropic chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForAnthropicChat(
events: OpenAIChatCompletionStreamEvent[]
): AnthropicChatCompletionResponse {
let merged: AnthropicChatCompletionResponse = {
id: "",
type: "message",
role: "assistant",
content: [],
model: "",
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 },
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.id = event.id;
acc.model = event.model;
acc.content = [{ type: "text", text: "" }];
return acc;
}
acc.stop_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.content[0].text += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -0,0 +1,48 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicTextCompletionResponse = {
completion: string;
stop_reason: string;
truncated: boolean;
stop: any;
model: string;
log_id: string;
exception: null;
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Anthropic completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForAnthropicText(
events: OpenAIChatCompletionStreamEvent[]
): AnthropicTextCompletionResponse {
let merged: AnthropicTextCompletionResponse = {
log_id: "",
exception: null,
model: "",
completion: "",
stop_reason: "",
truncated: false,
stop: null,
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.log_id = event.id;
acc.model = event.model;
acc.completion = "";
acc.stop_reason = "";
return acc;
}
acc.stop_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.completion += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -0,0 +1,58 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type OpenAiChatCompletionResponse = {
id: string;
object: string;
created: number;
model: string;
choices: {
message: { role: string; content: string };
finish_reason: string | null;
index: number;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized OpenAI chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForOpenAIChat(
events: OpenAIChatCompletionStreamEvent[]
): OpenAiChatCompletionResponse {
let merged: OpenAiChatCompletionResponse = {
id: "",
object: "",
created: 0,
model: "",
choices: [],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.id = event.id;
acc.object = event.object;
acc.created = event.created;
acc.model = event.model;
acc.choices = [
{
index: 0,
message: {
role: event.choices[0].delta.role ?? "assistant",
content: "",
},
finish_reason: null,
},
];
return acc;
}
acc.choices[0].finish_reason = event.choices[0].finish_reason;
if (event.choices[0].delta.content) {
acc.choices[0].message.content += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}

Some files were not shown because too many files have changed in this diff Show More