164 Commits

Author SHA1 Message Date
nai-degen 84acc429d7 wip 2024-03-16 00:04:27 -05:00
nai-degen d9117bf08e fixes AWS debug log 2024-03-14 21:34:07 -05:00
nai-degen 57d9791270 fixes uncounted tokens when Response stream is prematurely closed 2024-03-14 21:32:20 -05:00
nai-degen 367ac3d075 adds ?debug=true query param to have proxy respond with transformed prompt 2024-03-14 08:16:38 -05:00
nai-degen 276a1a1d44 small fix for recurring AWS logging check 2024-03-13 20:53:21 -05:00
nai-degen 6cf029112e adds Anthropic's SOTA Haiku model; misc code cleanup 2024-03-13 20:48:05 -05:00
nai-degen 4b86802eb2 adds separate model detection for gpt-4-32k-0314 2024-03-10 19:16:11 -05:00
nai-degen 7f431de98e sets cache-control on static user images 2024-03-10 15:50:40 -05:00
nai-degen e0bf10626e removes .reverse() from image history to avoid thumbnails shifting as users browse 2024-03-10 15:12:20 -05:00
nai-degen eb55f30414 adds input prompt to imagehistory 2024-03-10 15:08:44 -05:00
nai-degen e1fb53b461 pretty-prints dall-e image metadata JSON download 2024-03-10 15:04:44 -05:00
nai-degen 7610369c6d adds dall-e full history page and metadata downloader 2024-03-10 14:53:11 -05:00
nai-degen 37f17ded60 removes OpenAI max_tokens default as that isn't aligned with the real API 2024-03-10 12:32:15 -05:00
nai-degen 96b6ea9568 adds azure-image endpoint to service info; hides unavailable endpoints 2024-03-09 13:25:50 -06:00
nai-degen cec39328a2 adds azure dall-e support 2024-03-09 13:03:50 -06:00
nai-degen cab346787c fixes regression in anthropic text > anthropic chat api translation 2024-03-08 21:16:25 -06:00
nai-degen fab404b232 refactors api transformers and adds oai->anthropic chat api translation 2024-03-08 20:59:19 -06:00
nai-degen 8d84f289b2 fixes issue with mistral-large model family not being detected 2024-03-08 17:07:25 -06:00
nai-degen 9ce10b4f6a shows more helpful errors when users' prefills are invalid during AWS streaming 2024-03-07 13:28:23 -06:00
nai-degen 96756d32f3 fixes handling of DALL-E content_policy_violation errors 2024-03-07 12:56:35 -06:00
nai-degen 1fb3eac154 maybe shows clearer AWS ValidationExceptions when users have bad prefills 2024-03-06 05:12:47 -06:00
nai-degen 8f46bd4397 handles 'this organization is disabled' error from anthropic 2024-03-06 00:42:10 -06:00
nai-degen ddf34685df adds Claude 3 Vision support 2024-03-05 18:34:10 -06:00
nai-degen ea3aae5da6 allows selecting compat model via endpoint name and makes errors less confusing 2024-03-05 05:13:22 -06:00
nai-degen 055d650c5d fixes legacy compat endpoint 2024-03-05 01:38:39 -06:00
nai-degen 2643dfea61 improves aws sonnet key detection and no keys available error messaging 2024-03-05 01:04:08 -06:00
nai-degen 434445797a fixes bad handleCompatibilityRequest middleware fallthrough 2024-03-04 23:53:13 -06:00
nai-degen 03c5c473e1 improves error handling for sillytavern 2024-03-04 22:59:32 -06:00
nai-degen 068e7a834f fixes AWS legacy models for non-streaming requests 2024-03-04 21:22:43 -06:00
nai-degen 736803ad92 enables opus by default 2024-03-04 21:11:32 -06:00
nai-degen 6b22d17c50 fixes claude-opus token usage being attributed to regular claude 2024-03-04 17:03:02 -06:00
nai-degen 51ffca480a adds AWS Claude Chat Completions and Claude 3 Sonnet support 2024-03-04 16:25:06 -06:00
nai-degen 802d847cc6 enables Claude opus by default 2024-03-04 16:21:40 -06:00
nai-degen 90ddcac55b makes claude3 compat model customizable via environment variable 2024-03-04 14:21:55 -06:00
nai-degen 36923686f6 shows claude-opus key count on service info page 2024-03-04 14:12:38 -06:00
nai-degen 1edc93dc72 adds claude-opus model family 2024-03-04 14:08:59 -06:00
nai-degen f6c124c1d3 fixes issue with preamble-required claude keys and anthropic chat 2024-03-04 14:00:25 -06:00
nai-degen 90a053d0e0 detects and removes over-quota claude keys from keypool 2024-03-04 13:42:29 -06:00
khanon db318ec237 Implement Anthropic Chat Completions endpoint and Claude 3 (khanon/oai-reverse-proxy!64) 2024-03-04 19:06:46 +00:00
nai-degen b90abbda88 spoofs response for SillyTavern test messages 2024-02-28 15:57:18 -06:00
nai-degen 93cee1db9b removes claude v1 from AWS keychecker as it has been retired 2024-02-27 15:52:09 -06:00
nai-degen bd15728743 uses explicitly set keyprovider rather than inferring via requested model 2024-02-27 10:56:50 -06:00
nai-degen 627559b729 updates mistral modelids 2024-02-26 23:55:03 -06:00
nai-degen 428e103323 allows customizing the /proxy endpoint prefix 2024-02-26 18:20:34 -06:00
nai-degen fd742fc0cb Merge remote-tracking branch 'origin/main' 2024-02-26 18:12:23 -06:00
nai-degen 5e19e2756a adds mistral-large model family, untested 2024-02-26 18:12:08 -06:00
devvnull d3f7c675e3 add pricing for Azure GPT counterparts and update Claude pricing (khanon/oai-reverse-proxy!65) 2024-02-20 03:53:26 +00:00
nai-degen 59bda40bbc handles google streaming json response format variation 2024-02-19 00:12:09 -06:00
nai-degen 68d829bceb adds Claude over-quota detection 2024-02-17 15:56:22 -06:00
nai-degen 9c03290a3d detects anthropic copyright prefill pozzing 2024-02-16 10:22:45 -06:00
nai-degen 3498584a1f removes forceModel on Google AI endpoint 2024-02-15 11:41:34 -06:00
nai-degen 21d61da62b increases max image payload size for gpt4v 2024-02-12 21:59:48 -06:00
nai-degen 35dc0f4826 fixes 'Premature close' caused by fucked up AWS unmarshaller errors 2024-02-10 14:47:14 -06:00
nai-degen a2ae9f32db handles OpenAI organization check failures due to missing API scopes 2024-02-09 10:10:22 -06:00
devvnull 0ce4582f3b Improve "\n\nHuman" prefix requirement detection for Anthropic (khanon/oai-reverse-proxy!63) 2024-02-08 16:28:11 +00:00
nai-degen bbee056114 fixes Force Key Recheck admin function for azure/aws 2024-02-07 19:54:40 -06:00
nai-degen ecc804887b uses EventStreamMarshaller from AWS SDK to hopefully handle split messages 2024-02-05 19:56:41 -06:00
nai-degen a8fd3c7240 fixes AWS Claude throttlingException handling 2024-02-04 20:48:20 -06:00
nai-degen 40240601f5 refactors SSEStreamAdapter to fix leaking decoder streams 2024-02-04 18:38:06 -06:00
nai-degen 98cea2da02 replaces eventstream lib to (hopefully) fix interrupted AWS streams 2024-02-04 17:18:28 -06:00
nai-degen c88f47d0ed fixes middleware order breaking /proxy endpoint 2024-02-04 16:21:44 -06:00
nai-degen 43106d9c7f tracks Risu userid rather than IP address on usertokens 2024-02-04 14:14:36 -06:00
nai-degen fe429a7610 adds SERVICE_INFO_PASSWORD to gate infopage behind a password 2024-02-04 14:04:46 -06:00
nai-degen 235510e588 fixes incorrect AWS Claude 2.1 max context limit 2024-02-01 20:40:15 -06:00
nai-degen 7eb6eb90ad moves api schema validators from transform-outbound-payload into shared 2024-01-29 19:38:22 -06:00
nai-degen 924db33f7e attempts to auto-convert Mistral prompts for its more strict rules 2024-01-28 17:42:23 -06:00
nai-degen 3f2f30e605 updates gpt4-v tokenizer for previous Risu change 2024-01-27 13:35:46 -06:00
nai-degen c9791acd85 makes gpt4-v input validation less strict to accomodate Risu 2024-01-27 13:24:11 -06:00
nai-degen e871b8ecf1 removes logprobs default value since it breaks gpt-4-vision 2024-01-27 12:19:24 -06:00
nai-degen 37ca98ad30 adds dark mode (infopage only currently) 2024-01-25 16:24:11 -06:00
nai-degen e6dc4475e6 fixes max context size for nu-gpt4-turbo 2024-01-25 14:07:42 -06:00
nai-degen 5e646b1c86 adds gpt-4-0125-preview and gpt-4-turbo-preview alias 2024-01-25 13:27:03 -06:00
nai-degen 6f626e623e fixes OAI trial keys bricking the dall-e queue 2024-01-25 01:47:51 -06:00
nai-degen 02a54bf4e3 fixes azure openai logprobs (actually tested this time) 2024-01-25 01:17:18 -06:00
nai-degen 79b2e5b6fd adds very basic support for OpenAI function calling 2024-01-24 16:42:26 -06:00
nai-degen 935a633325 fixes typo in Azure logprob adjustment 2024-01-24 16:03:47 -06:00
nai-degen 4a4b60ebcd handles Azure deviation from OpenAI spec on logprobs param 2024-01-24 16:01:19 -06:00
nai-degen ad465be363 fixes logprobs schema validation for turbo instruct endpoint 2024-01-24 14:31:10 -06:00
nai-degen c7a351baa8 adds support for requesting logprobs from OpenAI Chat Completions API 2024-01-24 11:46:09 -06:00
nai-degen ba8b052b17 adds bindAddress to omitted config keys 2024-01-18 04:14:15 -06:00
nai-degen e813cd9d22 default claude 2.1 instead of 1.3 in openai compat endpoint since 1.3 is not accessible on all keys 2024-01-18 04:14:15 -06:00
nai-degen 4c2a2c1e6c improves handle-streamed-response comments/docs [skip-ci] 2024-01-18 04:14:15 -06:00
nai-degen f1d927fa62 updates README with building/forking info [skip-ci] 2024-01-15 11:46:09 -06:00
nai-degen ad6e5224e3 allows binding to loopback interface via app config instead of only docker 2024-01-15 11:32:26 -06:00
nai-degen 85d89bdb9f fixes CI image tagging on main branch 2024-01-15 01:37:50 -06:00
khanon f5e7195cc9 Add Gitlab CI and self-hosting instructions (khanon/oai-reverse-proxy!61) 2024-01-15 06:51:12 +00:00
nai-degen 81f1e2bc37 fixes broken GET models endpoint for openai/mistral 2024-01-14 05:33:24 -06:00
nai-degen c2a686f229 Revert "reduces max request body size for now"
This reverts commit 4ffa7fb12b.
2024-01-13 18:12:16 -06:00
twinkletoes 96a0f94041 Fix Mistral safe_prompt schema property (khanon/oai-reverse-proxy!60) 2024-01-14 00:11:39 +00:00
nai-degen d56043616e adds keychecker workaround for OpenAI API bug falsely returning gpt4-32k 2024-01-12 10:33:48 -06:00
nai-degen e3e06b065d fixes sourcemap dependency in package.json 2024-01-09 00:32:34 -06:00
nai-degen 1bbb515200 updates static service info 2024-01-08 23:32:25 -06:00
nai-degen a57cc4e8d4 updates dotenv 2024-01-08 23:25:02 -06:00
nai-degen 2239bead2c updates README.md 2024-01-08 19:36:35 -06:00
nai-degen 1a585ddd32 adds TRUSTED_PROXIES to .env.example 2024-01-08 16:41:30 -06:00
nai-degen be731691a1 allows configurable trust proxy setting for Render deployments 2024-01-08 16:39:28 -06:00
nai-degen c2e442e030 long overdue removal of tired in-joke 2024-01-08 11:01:44 -06:00
nai-degen d3ac3b362b trusts only one proxy hop (AWS WAF in huggingface's case) 2024-01-07 19:18:01 -06:00
nai-degen 7b0892ddae fixes unawaited call to async enqueue 2024-01-07 16:23:53 -06:00
nai-degen 7f92565739 SSE queueing adjustments, untested 2024-01-07 16:19:22 -06:00
nai-degen 936d3c0721 corrects nodejs max heap memory config 2024-01-07 16:16:27 -06:00
nai-degen 4ffa7fb12b reduces max request body size for now 2024-01-07 13:03:24 -06:00
nai-degen 8dc7464381 strips extraneous properties on zod schemas 2024-01-07 13:00:48 -06:00
nai-degen d2cd24bfd2 suggest larger nodejs max heap 2024-01-07 12:58:50 -06:00
twinkletoes e33f778192 Change mistral-medium friendly name (khanon/oai-reverse-proxy!59) 2023-12-26 00:27:17 +00:00
twinkletoes 4a823b216f Mistral AI support (khanon/oai-reverse-proxy!58) 2023-12-25 18:33:16 +00:00
nai-degen 01e76cbb1c restores accidentally deleted line breaking infopage stats 2023-12-17 00:25:58 -06:00
nai-degen 655703e680 refactors infopage 2023-12-16 20:30:20 -06:00
nai-degen 3be2687793 tries to detect Azure GPT4-Turbo deployments more reliably 2023-12-15 12:14:23 -06:00
nai-degen 5599a83ae4 improves streaming error handling 2023-12-14 05:01:10 -06:00
nai-degen de34d41918 fixes gemini name prefixing when 'Add character names' is disabled in ST 2023-12-13 23:21:30 -06:00
nai-degen c5cd90dcef adjusts prompt transform to discourage Gemini from speaking for user 2023-12-13 23:03:57 -06:00
nai-degen 8a135a960d fixes gemini prompt reformatting for jbs; adds stop sequences 2023-12-13 21:45:53 -06:00
nai-degen 707cbbce16 fixes gemini throwing an error on JB prompts 2023-12-13 19:14:31 -06:00
khanon fad16cc268 Add Google AI API (khanon/oai-reverse-proxy!57) 2023-12-13 21:56:07 +00:00
nai-degen 0d3682197c treats 403 from anthropic as key dead 2023-12-11 09:13:53 -06:00
valadaptive e0624e30fd Fix some corner cases in SSE parsing (khanon/oai-reverse-proxy!56) 2023-12-09 06:18:01 +00:00
nai-degen 94d4efe9bb properly enforce allowedModelFamilies; refactor HPM proxyReq handlers 2023-12-05 22:07:56 -06:00
random-username-423 12276a1f59 Fix AWS Claude Model Reassigning (khanon/oai-reverse-proxy!55) 2023-12-06 03:21:27 +00:00
nai-degen fdd824f0e4 adds azure rate limit auto-retry 2023-12-04 01:24:33 -06:00
khanon fbdea30264 Azure OpenAI suport (khanon/oai-reverse-proxy!48) 2023-12-04 04:21:18 +00:00
nai-degen cd1b9d0e0c don't print google api key to container logs on error 2023-12-01 11:23:56 -06:00
nai-degen 9e61d9029f adds claude-2.1 (untested) 2023-11-21 11:32:43 -06:00
nai-degen f95e24afbb fixes incorrect max model size for gpt4-v 2023-11-19 02:23:41 -06:00
khanon f29049f993 Support for GPT-4-Vision (khanon/oai-reverse-proxy!54) 2023-11-19 05:06:21 +00:00
nai-degen 7f2f324e26 fixes render dockerfile and dalle3 model detection 2023-11-18 12:27:14 -06:00
nai-degen dc61291933 adds temporary keychecker var to treat dall-e-2 the same as dall-e-3 2023-11-17 20:24:36 -06:00
nai-degen 6c02e9b265 don't enqueue requests which fail stream check 2023-11-17 14:36:47 -06:00
nai-degen e018672968 re-adds keychecker info to STATIC_INFO_PAGE 2023-11-16 02:16:24 -06:00
nai-degen bfd7e23124 encodes queue payload 2023-11-16 01:19:01 -06:00
khanon 6aa6bebf08 Scale SSE heartbeat size with traffic (khanon/oai-reverse-proxy!53) 2023-11-16 05:45:35 +00:00
nai-degen 6acdf35914 removes length from stalled request error message 2023-11-15 17:18:51 -06:00
nai-degen 3de79873e9 adds STATIC_SERVICE_INFO config 2023-11-15 17:12:07 -06:00
nai-degen 3aca9e90f0 fixes rate limiter always using IMAGE_MODEL_RATE_LIMIT 2023-11-15 13:07:58 -06:00
nai-degen 5fabe1d1f8 uses exponential moving average for wait time calculation 2023-11-14 01:36:11 -06:00
nai-degen 4a68c14477 further increases OpenAI rate limit backoff 2023-11-14 01:28:28 -06:00
khanon 20c064394a OpenAI DALL-E Image Generation (khanon/oai-reverse-proxy!52) 2023-11-14 05:41:19 +00:00
nai-degen 3ea23760c3 adjusts prompt logging to truncate huge prompts from the end 2023-11-11 20:14:32 -06:00
nai-degen 5db07404f2 fixes infopage crash when check_keys is disabled 2023-11-10 22:41:57 -06:00
nai-degen c453a5f2ad logs usertoken lookup attempts 2023-11-10 22:41:36 -06:00
nai-degen c7a095d345 removes debug log 2023-11-09 16:25:57 -06:00
nai-degen e9110611fa adds REJECT_PHRASES configuration setting 2023-11-09 16:24:49 -06:00
nai-degen 79e1fe09e4 fixes multiple enumeration on infopage 2023-11-08 12:02:23 -06:00
dllt98 08b2196bfb Update .env.example to include MAX_CONTEXT_TOKENS_OPENAI (khanon/oai-reverse-proxy!50) 2023-11-08 02:50:19 +00:00
nai-degen 350d6542cf fixes stats for non-openai models 2023-11-06 22:41:48 -06:00
nai-degen c9c24f86bb improvements to infopage key categorization 2023-11-06 22:13:34 -06:00
nai-degen b6f8f15a1f tries to prevent per-day rate limited keys from bricking the queue 2023-11-06 21:16:36 -06:00
nai-degen 5467136c1a adds gpt4-turbo to userschema; updates docs 2023-11-06 16:35:35 -06:00
nai-degen 0d5dfeccf8 adds gpt4-turbo model family and support for gpt-4-1106-preview model 2023-11-06 15:29:43 -06:00
nai-degen b615ffa433 fixes issue with local development cookies 2023-11-06 10:28:27 -06:00
nai-degen a27163a629 adds option to not disable keys when reaching IP limit 2023-11-06 10:15:57 -06:00
nai-degen 5a8fb3aff6 adds USE_INSECURE_COOKIES for hosts without SSL support 2023-11-03 15:25:06 -05:00
nai-degen 51dd0c71ba removes unused import in openai proxy 2023-10-24 13:17:46 -05:00
nai-degen 89e1ed46d5 re-signs AWS requests on every attempt to fix fucked up queueing 2023-10-24 13:10:50 -05:00
nai-degen 26dc79c8f1 fixes broken AWS rate limit backoff 2023-10-24 09:19:46 -05:00
nai-degen 89e9b67f3f fixes AWS mid-stream rate limits not actually marking key as rate-limited 2023-10-23 22:47:29 -05:00
nai-degen 52ec2ec265 fixes blank AWS responses due to reqs sometimes using wrong handler 2023-10-23 22:23:06 -05:00
nai-degen 8bd2f749c1 reduces logging severity of prompt validation errors 2023-10-23 20:30:27 -05:00
khanon ff27ca3780 Update info-page.ts 2023-10-20 00:33:57 +00:00
nai-degen 41a463d2c8 possibly fix issue with AWS keychecker due to amazon API change 2023-10-16 12:17:02 -05:00
nai-degen 3f7e50f87e follow-up 'fixes empty AWS streaming responses when under heavy load' 2023-10-15 00:06:38 -05:00
nai-degen f6cfc6e882 fixes empty AWS streaming responses when under heavy load 2023-10-15 00:05:36 -05:00
nai-degen af4d8dae40 changes default AMZ_HOST to bedrock-runtime.region.amazonaws.com 2023-10-12 15:39:06 -05:00
nai-degen 725fd6e6f1 deprioritizes queued Agnai.chat requests and limits concurrency to five across all shared IPs 2023-10-09 12:36:54 -05:00
155 changed files with 10000 additions and 3079 deletions
+50 -9
View File
@@ -5,17 +5,29 @@
# All values have reasonable defaults, so you only need to change the ones you # All values have reasonable defaults, so you only need to change the ones you
# want to override. # want to override.
# Use production mode unless you are developing locally.
NODE_ENV=production
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# General settings: # General settings:
# The title displayed on the info page. # The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel # SERVER_TITLE=Coom Tunnel
# Model requests allowed per minute per user. # The route name used to proxy requests to APIs, relative to the Web site root.
# MODEL_RATE_LIMIT=4 # PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
# IMAGE_MODEL_RATE_LIMIT=2
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=16384
# Max number of output tokens a user can request at once. # Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=300 # MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400 # MAX_OUTPUT_TOKENS_ANTHROPIC=400
# Whether to show the estimated cost of consumed tokens on the info page. # Whether to show the estimated cost of consumed tokens on the info page.
@@ -27,7 +39,12 @@
# CHECK_KEYS=true # CHECK_KEYS=true
# Which model types users are allowed to access. # Which model types users are allowed to access.
# ALLOWED_MODEL_FAMILIES=claude,turbo,gpt4,gpt4-32k # The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-dall-e
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
# URLs from which requests will be blocked. # URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com # BLOCKED_ORIGINS=reddit.com,9gag.com
@@ -36,8 +53,10 @@
# Destination to redirect blocked requests to. # Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/" # BLOCK_REDIRECT="https://roblox.com/"
# Whether to reject requests containing disallowed content. # Comma-separated list of phrases that will be rejected. Only whole words are matched.
# REJECT_DISALLOWED=false # Surround phrases with quotes if they contain commas.
# Avoid short or common phrases as this tests the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected. # Message to show when requests are rejected.
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy." # REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
@@ -45,8 +64,12 @@
# Requires additional setup. See `docs/google-sheets.md` for more information. # Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false # PROMPT_LOGGING=false
# The port to listen on. # The port and network interface to listen on.
# PORT=7860 # PORT=7860
# BIND_ADDRESS=0.0.0.0
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
# USE_INSECURE_COOKIES=false
# Detail level of logging. (trace | debug | info | warn | error) # Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info # LOG_LEVEL=info
@@ -63,24 +86,40 @@
# Maximum number of unique IPs a user can connect from. (0 for unlimited) # Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0 # MAX_IPS_PER_USER=0
# Whether user_tokens should be automatically disabled when reaching the IP limit.
# MAX_IPS_AUTO_BAN=true
# With user_token gatekeeper, whether to allow users to change their nickname. # With user_token gatekeeper, whether to allow users to change their nickname.
# ALLOW_NICKNAME_CHANGES=true # ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited) # Default token quotas for each model family. (0 for unlimited)
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
# which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_TURBO=0 # TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0 # TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0 # TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_DALL_E=0
# TOKEN_QUOTA_CLAUDE=0 # TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# How often to refresh token quotas. (hourly | daily) # How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas. # Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily # QUOTA_REFRESH_PERIOD=daily
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Secrets and keys: # Secrets and keys:
# Do not put any passwords or API keys directly in this file. # For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Huggingface, set them via the Secrets section in your Space's config UI.
# For Render, create a "secret file" called .env using the Environment tab. # For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple API keys by separating them with a comma. # You can add multiple API keys by separating them with a comma.
@@ -89,6 +128,8 @@ OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS. # See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2 AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
# With proxy_key gatekeeper, the password users must provide to access the API. # With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key # PROXY_KEY=your-secret-key
+5 -1
View File
@@ -1,7 +1,11 @@
.env .aider*
.env*
!.env.vault
.venv .venv
.vscode .vscode
.idea .idea
build build
greeting.md greeting.md
node_modules node_modules
http-client.private.env.json
+43 -15
View File
@@ -1,34 +1,53 @@
# OAI Reverse Proxy # OAI Reverse Proxy
Reverse proxy server for the OpenAI and Anthropic APIs. Forwards text generation requests while rejecting administrative/billing requests. Includes optional rate limiting and prompt filtering to prevent abuse. Reverse proxy server for various LLM APIs.
### Table of Contents ### Table of Contents
- [What is this?](#what-is-this) - [What is this?](#what-is-this)
- [Why?](#why) - [Features](#features)
- [Usage Instructions](#setup-instructions) - [Usage Instructions](#usage-instructions)
- [Deploy to Huggingface (Recommended)](#deploy-to-huggingface-recommended) - [Self-hosting](#self-hosting)
- [Deploy to Repl.it (WIP)](#deploy-to-replit-wip) - [Alternatives](#alternatives)
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
- [Render (outdated, not advised)](#render-outdated-not-advised)
- [Local Development](#local-development) - [Local Development](#local-development)
## What is this? ## What is this?
If you would like to provide a friend access to an API via keys you own, you can use this to keep your keys safe while still allowing them to generate text with the API. You can also use this if you'd like to build a client-side application which uses the OpenAI or Anthropic APIs, but don't want to build your own backend. You should never embed your real API keys in a client-side application. Instead, you can have your frontend connect to this reverse proxy and forward requests to the downstream service. This project allows you to run a reverse proxy server for various LLM APIs.
This keeps your keys safe and allows you to use the rate limiting and prompt filtering features of the proxy to prevent abuse. ## Features
- [x] Support for multiple APIs
## Why? - [x] [OpenAI](https://openai.com/)
OpenAI keys have full account permissions. They can revoke themselves, generate new keys, modify spend quotas, etc. **You absolutely should not share them, post them publicly, nor embed them in client-side applications as they can be easily stolen.** - [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
This proxy only forwards text generation requests to the downstream service and rejects requests which would otherwise modify your account. - [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
- [x] Multiple API keys with rotation and rate limit handling
- [x] Basic user management
- [x] Simple role-based permissions
- [x] Per-model token quotas
- [x] Temporary user accounts
- [x] Prompt and completion logging
- [x] Abuse detection and prevention
--- ---
## Usage Instructions ## Usage Instructions
If you'd like to run your own instance of this proxy, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like. If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
### Deploy to Huggingface (Recommended) ### Self-hosting
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
### Alternatives
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
### Huggingface (outdated, not advised)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md) [See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
### Deploy to Render ### Render (outdated, not advised)
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md) [See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
## Local Development ## Local Development
@@ -40,3 +59,12 @@ To run the proxy locally for development or testing, install Node.js >= 18.0.0 a
4. Start the server in development mode with `npm run start:dev`. 4. Start the server in development mode with `npm run start:dev`.
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server. You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
## Building
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
## Forking
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
+2
View File
@@ -0,0 +1,2 @@
*
!.gitkeep
View File
+21
View File
@@ -0,0 +1,21 @@
stages:
- build
build_image:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- |
if [ "$CI_COMMIT_REF_NAME" = "main" ]; then
TAG="latest"
else
TAG=$CI_COMMIT_REF_NAME
fi
- echo "Building image with tag $TAG"
- BASE64_AUTH=$(echo -n "$DOCKER_HUB_USERNAME:$DOCKER_HUB_ACCESS_TOKEN" | base64)
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"auth\":\"$BASE64_AUTH\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/docker/ci/Dockerfile --destination docker.io/khanonci/oai-reverse-proxy:$TAG --build-arg CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME --build-arg CI_COMMIT_SHA=$CI_COMMIT_SHA --build-arg CI_PROJECT_PATH=$CI_PROJECT_PATH
only:
- main
+22
View File
@@ -0,0 +1,22 @@
FROM node:18-bullseye-slim
WORKDIR /app
COPY . .
RUN npm ci
RUN npm run build
RUN npm prune --production
EXPOSE 7860
ENV PORT=7860
ENV NODE_ENV=production
ARG CI_COMMIT_REF_NAME
ARG CI_COMMIT_SHA
ARG CI_PROJECT_PATH
ENV GITGUD_BRANCH=$CI_COMMIT_REF_NAME
ENV GITGUD_COMMIT=$CI_COMMIT_SHA
ENV GITGUD_PROJECT=$CI_PROJECT_PATH
CMD [ "npm", "start" ]
+17
View File
@@ -0,0 +1,17 @@
# Before running this, create a .env and greeting.md file.
# Refer to .env.example for the required environment variables.
# User-generated content is stored in the data directory.
# When self-hosting, it's recommended to run this behind a reverse proxy like
# nginx or Caddy to handle SSL/TLS and rate limiting. Refer to
# docs/self-hosting.md for more information and an example nginx config.
version: '3.8'
services:
oai-reverse-proxy:
image: khanonci/oai-reverse-proxy:latest
ports:
- "127.0.0.1:7860:7860"
env_file:
- ./.env
volumes:
- ./greeting.md:/app/greeting.md
- ./data:/app/data
+4
View File
@@ -3,9 +3,13 @@ RUN apt-get update && \
apt-get install -y git apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install RUN npm install
COPY Dockerfile greeting.md* .env* ./ COPY Dockerfile greeting.md* .env* ./
RUN npm run build RUN npm run build
EXPOSE 7860 EXPOSE 7860
ENV NODE_ENV=production ENV NODE_ENV=production
# Huggigface free VMs have 16GB of RAM so we can be greedy
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ] CMD [ "npm", "start" ]
+4 -3
View File
@@ -45,10 +45,11 @@ You can also request Claude Instant, but support for this isn't fully implemente
### Supported model IDs ### Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models. Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude** - **Claude**
- `anthropic.claude-v1` (~18k context) - `anthropic.claude-v1` (~18k context, claude 1.3 -- EOL 2024-02-28)
- `anthropic.claude-v2` (~100k context) - `anthropic.claude-v2` (~100k context, claude 2.0)
- `anthropic.claude-v2:1` (~200k context, claude 2.1)
- **Claude Instant** - **Claude Instant**
- `anthropic.claude-instant-v1` - `anthropic.claude-instant-v1` (~100k context, claude instant 1.2)
## Note regarding logging ## Note regarding logging
+30
View File
@@ -0,0 +1,30 @@
# Configuring the proxy for Azure
The proxy supports Azure OpenAI Service via the `/proxy/azure/openai` endpoint. The process of setting it up is slightly different from regular OpenAI.
- [Setting keys](#setting-keys)
- [Model assignment](#model-assignment)
## Setting keys
Use the `AZURE_CREDENTIALS` environment variable to set the Azure API keys.
Like other APIs, you can provide multiple keys separated by commas. Each Azure key, however, is a set of values including the Resource Name, Deployment ID, and API key. These are separated by a colon (`:`).
For example:
```
AZURE_CREDENTIALS=contoso-ml:gpt4-8k:0123456789abcdef0123456789abcdef,northwind-corp:testdeployment:0123456789abcdef0123456789abcdef
```
## Model assignment
Note that each Azure deployment is assigned a model when you create it in the Azure OpenAI Service portal. If you want to use a different model, you'll need to create a new deployment, and therefore a new key to be added to the AZURE_CREDENTIALS environment variable. Each credential only grants access to one model.
### Supported model IDs
Users can send normal OpenAI model IDs to the proxy to invoke the corresponding models. For the most part they work the same with Azure. GPT-3.5 Turbo has an ID of "gpt-35-turbo" because Azure doesn't allow periods in model names, but the proxy should automatically convert this to the correct ID.
As noted above, you can only use model IDs for which a deployment has been created and added to the proxy.
## On content filtering
Be aware that all Azure OpenAI Service deployments have content filtering enabled by default at a Medium level. Prompts or responses which are deemed to be inappropriate will be rejected by the API. This is a feature of the Azure OpenAI Service and not the proxy.
You can disable this from deployment's settings within Azure, but you would need to request an exemption from Microsoft for your organization first. See [this page](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/content-filters) for more information.
+71
View File
@@ -0,0 +1,71 @@
# Configuring the proxy for DALL-E
The proxy supports DALL-E 2 and DALL-E 3 image generation via the `/proxy/openai-images` endpoint. By default it is disabled as it is somewhat expensive and potentially more open to abuse than text generation.
- [Updating your Dockerfile](#updating-your-dockerfile)
- [Enabling DALL-E](#enabling-dall-e)
- [Setting quotas](#setting-quotas)
- [Rate limiting](#rate-limiting)
## Updating your Dockerfile
If you are using a previous version of the Dockerfile supplied with the proxy, it doesn't have the necessary permissions to let the proxy save temporary files.
You can replace the entire thing with the new Dockerfile at [./docker/huggingface/Dockerfile](../docker/huggingface/Dockerfile) (or the equivalent for Render deployments).
You can also modify your existing Dockerfile; just add the following lines after the `WORKDIR` line:
```Dockerfile
# Existing
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
# Take ownership of the app directory and switch to the non-root user
RUN chown -R 1000:1000 /app
USER 1000
# Existing
RUN npm install
```
## Enabling DALL-E
Add `dall-e` to the `ALLOWED_MODEL_FAMILIES` environment variable to enable DALL-E. For example:
```
# GPT3.5 Turbo, GPT-4, GPT-4 Turbo, and DALL-E
ALLOWED_MODEL_FAMILIES=turbo,gpt-4,gpt-4turbo,dall-e
# All models as of this writing
ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,gemini-pro,aws-claude,dall-e
```
Refer to [.env.example](../.env.example) for a full list of supported model families. You can add `dall-e` to that list to enable all models.
## Setting quotas
DALL-E doesn't bill by token like text generation models. Instead there is a fixed cost per image generated, depending on the model, image size, and selected quality.
The proxy still uses tokens to set quotas for users. The cost for each generated image will be converted to "tokens" at a rate of 100000 tokens per US$1.00. This works out to a similar cost-per-token as GPT-4 Turbo, so you can use similar token quotas for both.
Use `TOKEN_QUOTA_DALL_E` to set the default quota for image generation. Otherwise it works the same as token quotas for other models.
```
# ~50 standard DALL-E images per refresh period, or US$2.00
TOKEN_QUOTA_DALL_E=200000
```
Refer to [https://openai.com/pricing](https://openai.com/pricing) for the latest pricing information. As of this writing, the cheapest DALL-E 3 image costs $0.04 per generation, which works out to 4000 tokens. Higher resolution and quality settings can cost up to $0.12 per image, or 12000 tokens.
## Rate limiting
The old `MODEL_RATE_LIMIT` setting has been split into `TEXT_MODEL_RATE_LIMIT` and `IMAGE_MODEL_RATE_LIMIT`. Whatever value you previously set for `MODEL_RATE_LIMIT` will be used for text models.
If you don't specify a `IMAGE_MODEL_RATE_LIMIT`, it defaults to half of the `TEXT_MODEL_RATE_LIMIT`, to a minimum of 1 image per minute.
```
# 4 text generations per minute, 2 images per minute
TEXT_MODEL_RATE_LIMIT=4
IMAGE_MODEL_RATE_LIMIT=2
```
If a prompt is filtered by OpenAI's content filter, it won't count towards the rate limit.
## Hiding recent images
By default, the proxy shows the last 12 recently generated images by users. You can hide this section by setting `SHOW_RECENT_IMAGES` to `false`.
+5
View File
@@ -1,5 +1,7 @@
# Deploy to Huggingface Space # Deploy to Huggingface Space
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend. This repository can be deployed to a [Huggingface Space](https://huggingface.co/spaces). This is a free service that allows you to run a simple server in the cloud. You can use it to safely share your OpenAI API key with a friend.
### 1. Get an API key ### 1. Get an API key
@@ -25,11 +27,14 @@ RUN apt-get update && \
apt-get install -y git apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app WORKDIR /app
RUN chown -R 1000:1000 /app
USER 1000
RUN npm install RUN npm install
COPY Dockerfile greeting.md* .env* ./ COPY Dockerfile greeting.md* .env* ./
RUN npm run build RUN npm run build
EXPOSE 7860 EXPOSE 7860
ENV NODE_ENV=production ENV NODE_ENV=production
ENV NODE_OPTIONS="--max-old-space-size=12882"
CMD [ "npm", "start" ] CMD [ "npm", "start" ]
``` ```
- Click "Commit new file to `main`" to save the Dockerfile. - Click "Commit new file to `main`" to save the Dockerfile.
+5
View File
@@ -1,4 +1,7 @@
# Deploy to Render.com # Deploy to Render.com
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive. Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
### 1. Create account ### 1. Create account
@@ -28,6 +31,8 @@ The service will be created according to the instructions in the `render.yaml` f
- For example, `OPENAI_KEY=sk-abc123`. - For example, `OPENAI_KEY=sk-abc123`.
- Click **Save Changes**. - Click **Save Changes**.
**IMPORTANT:** Set `TRUSTED_PROXIES=3`, otherwise users' IP addresses will not be recorded correctly (the server will see the IP address of Render's load balancer instead of the user's real IP address).
The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page. The service will automatically rebuild and deploy with the new environment variables. This will take a few minutes. The link to your deployed proxy will appear at the top of the page.
If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet. If you want to change the URL, go to the **Settings** tab of your Web Service and click the **Edit** button next to **Name**. You can also set a custom domain, though I haven't tried this yet.
+150
View File
@@ -0,0 +1,150 @@
# Quick self-hosting guide
Temporary guide for self-hosting. This will be improved in the future to provide more robust instructions and options. Provided commands are for Ubuntu.
This uses prebuilt Docker images for convenience. If you want to make adjustments to the code you can instead clone the repo and follow the Local Development guide in the [README](../README.md).
## Table of Contents
- [Requirements](#requirements)
- [Running the application](#running-the-application)
- [Setting up a reverse proxy](#setting-up-a-reverse-proxy)
- [trycloudflare](#trycloudflare)
- [nginx](#nginx)
- [Example basic nginx configuration (no SSL)](#example-basic-nginx-configuration-no-ssl)
- [Example with Cloudflare SSL](#example-with-cloudflare-ssl)
- [Updating/Restarting the application](#updatingrestarting-the-application)
## Requirements
- Docker
- Docker Compose
- A VPS with at least 512MB of RAM (1GB recommended)
- A domain name
If you don't have a VPS and domain name you can use TryCloudflare to set up a temporary URL that you can share with others. See [trycloudflare](#trycloudflare) for more information.
## Running the application
- Install Docker and Docker Compose
- Create a new directory for the application
- This will contain your .env file, greeting file, and any user-generated files
- Execute the following commands:
- ```
touch .env
touch greeting.md
echo "OPENAI_KEY=your-openai-key" >> .env
curl https://gitgud.io/khanon/oai-reverse-proxy/-/raw/main/docker/docker-compose-selfhost.yml -o docker-compose.yml
```
- You can set further environment variables and keys in the `.env` file. See [.env.example](../.env.example) for a list of available options.
- You can set a custom greeting in `greeting.md`. This will be displayed on the homepage.
- Run `docker compose up -d`
You can check logs with `docker compose logs -n 100 -f`.
The provided docker-compose file listens on port 7860 but binds to localhost only. You should use a reverse proxy to expose the application to the internet as described in the next section.
## Setting up a reverse proxy
Rather than exposing the application directly to the internet, it is recommended to set up a reverse proxy. This will allow you to use HTTPS and add additional security measures.
### trycloudflare
This will give you a temporary (72 hours) URL that you can use to let others connect to your instance securely, without having to set up a reverse proxy. If you are running the server on your home network, this is probably the best option.
- Install `cloudflared` following the instructions at [try.cloudflare.com](https://try.cloudflare.com/).
- Run `cloudflared tunnel --url http://localhost:7860`
- You will be given a temporary URL that you can share with others.
If you have a VPS, you should use a proper reverse proxy like nginx instead for a more permanent solution which will allow you to use your own domain name, handle SSL, and add additional security/anti-abuse measures.
### nginx
First, install nginx.
- `sudo apt update && sudo apt install nginx`
#### Example basic nginx configuration (no SSL)
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://localhost:7860;
}
}
```
- Replace `example.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
- `sudo nginx -t`
- This will check the configuration file for errors.
- `sudo systemctl restart nginx`
- This will restart nginx and apply the new configuration.
#### Example with Cloudflare SSL
This allows you to use a self-signed certificate on the server, and have Cloudflare handle client SSL. You need to have a Cloudflare account and have your domain set up with Cloudflare already, pointing to your server's IP address.
- Set Cloudflare to use Full SSL mode. Since we are using a self-signed certificate, don't use Full (strict) mode.
- Create a self-signed certificate:
- `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/ssl/private/nginx-selfsigned.key -out /etc/ssl/certs/nginx-selfsigned.crt`
- `sudo nano /etc/nginx/sites-available/oai.conf`
- ```
server {
listen 443 ssl;
server_name yourdomain.com www.yourdomain.com;
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
# Only allow inbound traffic from Cloudflare
allow 173.245.48.0/20;
allow 103.21.244.0/22;
allow 103.22.200.0/22;
allow 103.31.4.0/22;
allow 141.101.64.0/18;
allow 108.162.192.0/18;
allow 190.93.240.0/20;
allow 188.114.96.0/20;
allow 197.234.240.0/22;
allow 198.41.128.0/17;
allow 162.158.0.0/15;
allow 104.16.0.0/13;
allow 104.24.0.0/14;
allow 172.64.0.0/13;
allow 131.0.72.0/22;
deny all;
location / {
proxy_pass http://localhost:7860;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
}
```
- Replace `yourdomain.com` with your domain name.
- Ctrl+X to exit, Y to save, Enter to confirm.
- `sudo ln -s /etc/nginx/sites-available/oai.conf /etc/nginx/sites-enabled`
## Updating/Restarting the application
After making an .env change, you need to restart the application for it to take effect.
- `docker compose down`
- `docker compose up -d`
To update the application to the latest version:
- `docker compose pull`
- `docker compose down`
- `docker compose up -d`
- `docker image prune -f`
+9
View File
@@ -0,0 +1,9 @@
{
"dev": {
"proxy-host": "http://localhost:7860",
"oai-key-1": "override in http-client.private.env.json",
"proxy-key": "override in http-client.private.env.json",
"azu-resource-name": "override in http-client.private.env.json",
"azu-deployment-id": "override in http-client.private.env.json"
}
}
+650 -262
View File
File diff suppressed because it is too large Load Diff
+17 -10
View File
@@ -18,30 +18,36 @@
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4", "@anthropic-ai/tokenizer": "^0.0.4",
"@aws-crypto/sha256-js": "^5.1.0", "@aws-crypto/sha256-js": "^5.2.0",
"@smithy/protocol-http": "^3.0.6", "@smithy/eventstream-codec": "^2.1.3",
"@smithy/signature-v4": "^2.0.10", "@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/types": "^2.3.4", "@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/types": "^2.10.1",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.3.5", "axios": "^1.3.5",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6", "cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1", "copyfiles": "^2.4.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"csrf-csrf": "^2.3.0", "csrf-csrf": "^2.3.0",
"dotenv": "^16.0.3", "dotenv": "^16.3.1",
"ejs": "^3.1.9", "ejs": "^3.1.9",
"express": "^4.18.2", "express": "^4.18.2",
"express-session": "^1.17.3", "express-session": "^1.17.3",
"firebase-admin": "^11.10.1", "firebase-admin": "^11.10.1",
"googleapis": "^122.0.0", "googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1", "http-proxy-middleware": "^3.0.0-beta.1",
"lifion-aws-event-stream": "^1.0.7",
"memorystore": "^1.6.7", "memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1", "node-schedule": "^2.1.1",
"pino": "^8.11.0", "pino": "^8.11.0",
"pino-http": "^8.3.3", "pino-http": "^8.3.3",
"sanitize-html": "^2.11.0", "sanitize-html": "2.12.1",
"sharp": "^0.32.6",
"showdown": "^2.1.0", "showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
"tiktoken": "^1.0.10", "tiktoken": "^1.0.10",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"zlib": "^1.0.5", "zlib": "^1.0.5",
@@ -57,6 +63,7 @@
"@types/node-schedule": "^2.1.0", "@types/node-schedule": "^2.1.0",
"@types/sanitize-html": "^2.9.0", "@types/sanitize-html": "^2.9.0",
"@types/showdown": "^2.0.0", "@types/showdown": "^2.0.0",
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1", "@types/uuid": "^9.0.1",
"concurrently": "^8.0.1", "concurrently": "^8.0.1",
"esbuild": "^0.17.16", "esbuild": "^0.17.16",
@@ -65,12 +72,12 @@
"nodemon": "^3.0.1", "nodemon": "^3.0.1",
"pino-pretty": "^10.2.3", "pino-pretty": "^10.2.3",
"prettier": "^3.0.3", "prettier": "^3.0.3",
"source-map-support": "^0.5.21",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typescript": "^5.1.3" "typescript": "^5.4.2"
}, },
"overrides": { "overrides": {
"google-gax": "^3.6.1", "google-gax": "^3.6.1",
"postcss": "^8.4.31" "postcss": "^8.4.31",
"follow-redirects": "^1.15.4"
} }
} }
+276
View File
@@ -0,0 +1,276 @@
# OAI Reverse Proxy
###
# @name OpenAI -- Chat Completions
POST https://api.openai.com/v1/chat/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 30,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name OpenAI -- Text Completions
POST https://api.openai.com/v1/completions
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 30,
"stream": false,
"prompt": "This is a test prompt where"
}
###
# @name OpenAI -- Create Embedding
POST https://api.openai.com/v1/embeddings
Authorization: Bearer {{oai-key-1}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name OpenAI -- Get Organizations
GET https://api.openai.com/v1/organizations
Authorization: Bearer {{oai-key-1}}
###
# @name OpenAI -- Get Models
GET https://api.openai.com/v1/models
Authorization: Bearer {{oai-key-1}}
###
# @name Azure OpenAI -- Chat Completions
POST https://{{azu-resource-name}}.openai.azure.com/openai/deployments/{{azu-deployment-id}}/chat/completions?api-version=2023-09-01-preview
api-key: {{azu-key-1}}
Content-Type: application/json
{
"max_tokens": 1,
"stream": false,
"messages": [
{
"role": "user",
"content": "This is a test prompt."
}
]
}
###
# @name Proxy / OpenAI -- Get Models
GET {{proxy-host}}/proxy/openai/v1/models
Authorization: Bearer {{proxy-key}}
###
# @name Proxy / OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4-1106-preview",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 123,
"messages": [
{
"role": "user",
"content": "phrase one"
}
]
}
###
# @name Proxy / OpenAI -- Native Text Completions
POST {{proxy-host}}/proxy/openai/v1/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo-instruct",
"max_tokens": 20,
"temperature": 0,
"prompt": "Genshin Impact is a game about",
"stream": false
}
###
# @name Proxy / OpenAI -- Chat-to-Text API Translation
# Accepts a chat completion request and reformats it to work with the text completion API. `model` is ignored.
POST {{proxy-host}}/proxy/openai/turbo-instruct/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "I don't think that's right..."
}
]
}
###
# @name Proxy / OpenAI -- Create Embedding
POST {{proxy-host}}/proxy/openai/embeddings
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "text-embedding-ada-002",
"input": "This is a test embedding input."
}
###
# @name Proxy / Anthropic -- Native Completion (old API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- Native Completion (2023-06-01 API)
POST {{proxy-host}}/proxy/anthropic/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "claude-v1.3",
"max_tokens_to_sample": 20,
"temperature": 0.2,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / Anthropic -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/anthropic/v1/chat/completions
Authorization: Bearer {{proxy-key}}
#anthropic-version: 2023-06-01
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 20,
"stream": false,
"temperature": 0,
"messages": [
{
"role": "user",
"content": "What is genshin impact"
}
]
}
###
# @name Proxy / AWS Claude -- Native Completion
POST {{proxy-host}}/proxy/aws/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / AWS Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/aws/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 20,
"stream": true,
"temperature": 1,
"seed": 2,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
},
{
"role": "assistant",
"content": "That would be George Washington."
},
{
"role": "user",
"content": "That's not right."
}
]
}
###
# @name Proxy / Google AI -- OpenAI-to-Google AI API Translation
POST {{proxy-host}}/proxy/google-ai/v1/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-4",
"max_tokens": 42,
"messages": [
{
"role": "user",
"content": "Hi what is the name of the fourth president of the united states?"
}
]
}
+45
View File
@@ -0,0 +1,45 @@
const axios = require("axios");
const concurrentRequests = 75;
const headers = {
Authorization: "Bearer test",
"Content-Type": "application/json",
};
const payload = {
model: "gpt-4",
max_tokens: 1,
stream: false,
messages: [{ role: "user", content: "Hi" }],
};
const makeRequest = async (i) => {
try {
const response = await axios.post(
"http://localhost:7860/proxy/google-ai/v1/chat/completions",
payload,
{ headers }
);
console.log(
`Req ${i} finished with status code ${response.status} and response:`,
response.data
);
} catch (error) {
const msg = error.response
console.error(`Error in req ${i}:`, error.message, msg || "");
}
};
const executeRequestsConcurrently = () => {
const promises = [];
for (let i = 1; i <= concurrentRequests; i++) {
console.log(`Starting request ${i}`);
promises.push(makeRequest(i));
}
Promise.all(promises).then(() => {
console.log("All requests finished");
});
};
executeRequestsConcurrently();
+7
View File
@@ -4,6 +4,8 @@ import { HttpError } from "../shared/errors";
import { injectLocals } from "../shared/inject-locals"; import { injectLocals } from "../shared/inject-locals";
import { withSession } from "../shared/with-session"; import { withSession } from "../shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf"; import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { renderPage } from "../info-page";
import { buildInfo } from "../service-info";
import { loginRouter } from "./login"; import { loginRouter } from "./login";
import { usersApiRouter as apiRouter } from "./api/users"; import { usersApiRouter as apiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage"; import { usersWebRouter as webRouter } from "./web/manage";
@@ -23,6 +25,11 @@ adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals); adminRouter.use(injectLocals);
adminRouter.use("/", loginRouter); adminRouter.use("/", loginRouter);
adminRouter.use("/manage", authorize({ via: "cookie" }), webRouter); adminRouter.use("/manage", authorize({ via: "cookie" }), webRouter);
adminRouter.use("/service-info", authorize({ via: "cookie" }), (req, res) => {
return res.send(
renderPage(buildInfo(req.protocol + "://" + req.get("host"), true))
);
});
adminRouter.use( adminRouter.use(
( (
+20 -6
View File
@@ -6,7 +6,7 @@ import { HttpError } from "../../shared/errors";
import * as userStore from "../../shared/users/user-store"; import * as userStore from "../../shared/users/user-store";
import { parseSort, sortBy, paginate } from "../../shared/utils"; import { parseSort, sortBy, paginate } from "../../shared/utils";
import { keyPool } from "../../shared/key-management"; import { keyPool } from "../../shared/key-management";
import { MODEL_FAMILIES } from "../../shared/models"; import { LLMService, MODEL_FAMILIES } from "../../shared/models";
import { getTokenCostUsd, prettyTokens } from "../../shared/stats"; import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
import { import {
User, User,
@@ -14,6 +14,7 @@ import {
UserSchema, UserSchema,
UserTokenCounts, UserTokenCounts,
} from "../../shared/users/schema"; } from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
const router = Router(); const router = Router();
@@ -196,13 +197,14 @@ router.post("/maintenance", (req, res) => {
let flash = { type: "", message: "" }; let flash = { type: "", message: "" };
switch (action) { switch (action) {
case "recheck": { case "recheck": {
keyPool.recheck("openai"); const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
keyPool.recheck("anthropic"); checkable.forEach((s) => keyPool.recheck(s));
const size = keyPool const keyCount = keyPool
.list() .list()
.filter((k) => k.service !== "google-palm").length; .filter((k) => checkable.includes(k.service)).length;
flash.type = "success"; flash.type = "success";
flash.message = `Scheduled recheck of ${size} keys for OpenAI and Anthropic.`; flash.message = `Scheduled recheck of ${keyCount} keys.`;
break; break;
} }
case "resetQuotas": { case "resetQuotas": {
@@ -220,6 +222,18 @@ router.post("/maintenance", (req, res) => {
flash.message = `All users' token usage records reset.`; flash.message = `All users' token usage records reset.`;
break; break;
} }
case "downloadImageMetadata": {
const data = JSON.stringify({
exportedAt: new Date().toISOString(),
generations: getLastNImages()
}, null, 2);
res.setHeader(
"Content-Disposition",
`attachment; filename=image-metadata-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
default: { default: {
throw new HttpError(400, "Invalid action"); throw new HttpError(400, "Invalid action");
} }
+14
View File
@@ -1,5 +1,11 @@
<%- include("partials/shared_header", { title: "OAI Reverse Proxy Admin" }) %> <%- include("partials/shared_header", { title: "OAI Reverse Proxy Admin" }) %>
<h1>OAI Reverse Proxy Admin</h1> <h1>OAI Reverse Proxy Admin</h1>
<% if (!usersEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>🚨 <code>user_token</code> gatekeeper is not enabled.</strong><br />
<br />None of the user management features will do anything.
</p>
<% } %>
<% if (!persistenceEnabled) { %> <% if (!persistenceEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em"> <p style="color: red; background-color: #eedddd; padding: 1em">
<strong>⚠️ Users will be lost when the server restarts because persistence is not configured.</strong><br /> <strong>⚠️ Users will be lost when the server restarts because persistence is not configured.</strong><br />
@@ -19,6 +25,7 @@
<li><a href="/admin/manage/import-users">Import Users</a></li> <li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li> <li><a href="/admin/manage/export-users">Export Users</a></li>
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a> <li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
<li><a href="/admin/service-info">Service Info</a></li>
</ul> </ul>
<h3>Maintenance</h3> <h3>Maintenance</h3>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post"> <form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
@@ -43,6 +50,13 @@
</p> </p>
</fieldset> </fieldset>
<% } %> <% } %>
<% if (imageGenerationEnabled) { %>
<fieldset>
<legend>Image Generation</legend>
<button id="download-image-metadata" type="button" onclick="submitForm('downloadImageMetadata')">Download Image Metadata</button>
<label for="download-image-metadata">Downloads a metadata file containing URL, prompt, and truncated user token for all cached images.</label>
</fieldset>
<% } %>
</div> </div>
</form> </form>
+1 -1
View File
@@ -6,7 +6,7 @@
<% } else { %> <% } else { %>
<input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" /> <input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
<label for="toggle-nicknames">Show Nicknames</label> <label for="toggle-nicknames">Show Nicknames</label>
<table> <table class="striped">
<thead> <thead>
<tr> <tr>
<th>User</th> <th>User</th>
+203 -39
View File
@@ -1,21 +1,37 @@
import dotenv from "dotenv"; import dotenv from "dotenv";
import type firebase from "firebase-admin"; import type firebase from "firebase-admin";
import path from "path";
import pino from "pino"; import pino from "pino";
import type { ModelFamily } from "./shared/models"; import type { ModelFamily } from "./shared/models";
import { MODEL_FAMILIES } from "./shared/models";
dotenv.config(); dotenv.config();
const startupLogger = pino({ level: "debug" }).child({ module: "startup" }); const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
const isDev = process.env.NODE_ENV !== "production"; const isDev = process.env.NODE_ENV !== "production";
export const DATA_DIR = path.join(__dirname, "..", "data");
export const USER_ASSETS_DIR = path.join(DATA_DIR, "user-files");
type Config = { type Config = {
/** The port the proxy server will listen on. */ /** The port the proxy server will listen on. */
port: number; port: number;
/** The network interface the proxy server will listen on. */
bindAddress: string;
/** Comma-delimited list of OpenAI API keys. */ /** Comma-delimited list of OpenAI API keys. */
openaiKey?: string; openaiKey?: string;
/** Comma-delimited list of Anthropic API keys. */ /** Comma-delimited list of Anthropic API keys. */
anthropicKey?: string; anthropicKey?: string;
/** Comma-delimited list of Google PaLM API keys. */ /**
googlePalmKey?: string; * Comma-delimited list of Google AI API keys. Note that these are not the
* same as the GCP keys/credentials used for Vertex AI; the models are the
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/** /**
* Comma-delimited list of AWS credentials. Each credential item should be a * Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region. * colon-delimited list of access key, secret key, and AWS region.
@@ -28,6 +44,17 @@ type Config = {
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2` * @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/ */
awsCredentials?: string; awsCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
* API key.
*
* The resource name is the subdomain in your Azure OpenAI deployment's URL,
* e.g. `https://resource-name.openai.azure.com
*
* @example `AZURE_CREDENTIALS=resource_name_1:deployment_id_1:api_key_1,resource_name_2:deployment_id_2:api_key_2`
*/
azureCredentials?: string;
/** /**
* The proxy key to require for requests. Only applicable if the user * The proxy key to require for requests. Only applicable if the user
* management mode is set to 'proxy_key', and required if so. * management mode is set to 'proxy_key', and required if so.
@@ -38,6 +65,11 @@ type Config = {
* management mode is set to 'user_token'. * management mode is set to 'user_token'.
*/ */
adminKey?: string; adminKey?: string;
/**
* The password required to view the service info/status page. If not set, the
* info page will be publicly accessible.
*/
serviceInfoPassword?: string;
/** /**
* Which user management mode to use. * Which user management mode to use.
* - `none`: No user management. Proxy is open to all requests with basic * - `none`: No user management. Proxy is open to all requests with basic
@@ -65,13 +97,20 @@ type Config = {
*/ */
firebaseKey?: string; firebaseKey?: string;
/** /**
* Maximum number of IPs per user, after which their token is disabled. * Maximum number of IPs allowed per user token.
* Users with the manually-assigned `special` role are exempt from this limit. * Users with the manually-assigned `special` role are exempt from this limit.
* - Defaults to 0, which means that users are not IP-limited. * - Defaults to 0, which means that users are not IP-limited.
*/ */
maxIpsPerUser: number; maxIpsPerUser: number;
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */ /**
modelRateLimit: number; * Whether a user token should be automatically disabled if it exceeds the
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
*/
maxIpsAutoBan: boolean;
/** Per-IP limit for requests per minute to text and chat models. */
textModelRateLimit: number;
/** Per-IP limit for requests per minute to image generation models. */
imageModelRateLimit: number;
/** /**
* For OpenAI, the maximum number of context tokens (prompt + max output) a * For OpenAI, the maximum number of context tokens (prompt + max output) a
* user can request before their request is rejected. * user can request before their request is rejected.
@@ -90,10 +129,10 @@ type Config = {
maxOutputTokensOpenAI: number; maxOutputTokensOpenAI: number;
/** For Anthropic, the maximum number of sampled tokens a user can request. */ /** For Anthropic, the maximum number of sampled tokens a user can request. */
maxOutputTokensAnthropic: number; maxOutputTokensAnthropic: number;
/** Whether requests containing disallowed characters should be rejected. */ /** Whether requests containing the following phrases should be rejected. */
rejectDisallowed?: boolean; rejectPhrases: string[];
/** Message to return when rejecting requests. */ /** Message to return when rejecting requests. */
rejectMessage?: string; rejectMessage: string;
/** Verbosity level of diagnostic logging. */ /** Verbosity level of diagnostic logging. */
logLevel: "trace" | "debug" | "info" | "warn" | "error"; logLevel: "trace" | "debug" | "info" | "warn" | "error";
/** /**
@@ -152,32 +191,101 @@ type Config = {
quotaRefreshPeriod?: "hourly" | "daily" | string; quotaRefreshPeriod?: "hourly" | "daily" | string;
/** Whether to allow users to change their own nicknames via the UI. */ /** Whether to allow users to change their own nicknames via the UI. */
allowNicknameChanges: boolean; allowNicknameChanges: boolean;
/** Whether to show recent DALL-E image generations on the homepage. */
showRecentImages: boolean;
/**
* If true, cookies will be set without the `Secure` attribute, allowing
* the admin UI to used over HTTP.
*/
useInsecureCookies: boolean;
/**
* Whether to use a more minimal public Service Info page with static content.
* Disables all stats pertaining to traffic, prompt/token usage, and queues.
* The full info page will appear if you have signed in as an admin using the
* configured ADMIN_KEY and go to /admin/service-info.
**/
staticServiceInfo?: boolean;
/**
* Trusted proxy hops. If you are deploying the server behind a reverse proxy
* (Nginx, Cloudflare Tunnel, AWS WAF, etc.) the IP address of incoming
* requests will be the IP address of the proxy, not the actual user.
*
* Depending on your hosting configuration, there may be multiple proxies/load
* balancers between your server and the user. Each one will append the
* incoming IP address to the `X-Forwarded-For` header. The user's real IP
* address will be the first one in the list, assuming the header has not been
* tampered with. Setting this value correctly ensures that the server doesn't
* trust values in `X-Forwarded-For` not added by trusted proxies.
*
* In order for the server to determine the user's real IP address, you need
* to tell it how many proxies are between the user and the server so it can
* select the correct IP address from the `X-Forwarded-For` header.
*
* *WARNING:* If you set it incorrectly, the proxy will either record the
* wrong IP address, or it will be possible for users to spoof their IP
* addresses and bypass rate limiting. Check the request logs to see what
* incoming X-Forwarded-For values look like.
*
* Examples:
* - X-Forwarded-For: "34.1.1.1, 172.1.1.1, 10.1.1.1" => trustedProxies: 3
* - X-Forwarded-For: "34.1.1.1" => trustedProxies: 1
* - no X-Forwarded-For header => trustedProxies: 0 (the actual IP of the incoming request will be used)
*
* As of 2024/01/08:
* For HuggingFace or Cloudflare Tunnel, use 1.
* For Render, use 3.
* For deployments not behind a load balancer, use 0.
*
* You should double check against your actual request logs to be sure.
*
* Defaults to 1, as most deployments are on HuggingFace or Cloudflare Tunnel.
*/
trustedProxies?: number;
/**
* Whether to allow OpenAI tool usage. The proxy doesn't impelment any
* support for tools/function calling but can pass requests and responses as
* is. Note that the proxy also cannot accurately track quota usage for
* requests involving tools, so you must opt in to this feature at your own
* risk.
*/
allowOpenAIToolUsage?: boolean;
/**
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
* A leading slash is required.
*/
proxyEndpointRoute: string;
}; };
// To change configs, create a file called .env in the root directory. // To change configs, create a file called .env in the root directory.
// See .env.example for an example. // See .env.example for an example.
export const config: Config = { export const config: Config = {
port: getEnvWithDefault("PORT", 7860), port: getEnvWithDefault("PORT", 7860),
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""), openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""), anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
googlePalmKey: getEnvWithDefault("GOOGLE_PALM_KEY", ""), googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""), awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""), proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""), adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"), gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"), gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0), maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", true),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined), firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined), firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4), textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 0), imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
maxContextTokensAnthropic: getEnvWithDefault( maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC", "MAX_CONTEXT_TOKENS_ANTHROPIC",
0 0
), ),
maxOutputTokensOpenAI: getEnvWithDefault( maxOutputTokensOpenAI: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"], ["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
300 400
), ),
maxOutputTokensAnthropic: getEnvWithDefault( maxOutputTokensAnthropic: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"], ["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
@@ -187,11 +295,21 @@ export const config: Config = {
"turbo", "turbo",
"gpt4", "gpt4",
"gpt4-32k", "gpt4-32k",
"gpt4-turbo",
"claude", "claude",
"bison", "claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude", "aws-claude",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-turbo",
"azure-gpt4-32k",
]), ]),
rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false), rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault( rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE", "REJECT_MESSAGE",
"This content violates /aicg/'s acceptable use policy." "This content violates /aicg/'s acceptable use policy."
@@ -213,16 +331,24 @@ export const config: Config = {
"You must be over the age of majority in your country to use this service." "You must be over the age of majority in your country to use this service."
), ),
blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"), blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"),
tokenQuota: { tokenQuota: MODEL_FAMILIES.reduce(
turbo: getEnvWithDefault("TOKEN_QUOTA_TURBO", 0), (acc, family: ModelFamily) => {
gpt4: getEnvWithDefault("TOKEN_QUOTA_GPT4", 0), acc[family] = getEnvWithDefault(
"gpt4-32k": getEnvWithDefault("TOKEN_QUOTA_GPT4_32K", 0), `TOKEN_QUOTA_${family.toUpperCase().replace(/-/g, "_")}`,
claude: getEnvWithDefault("TOKEN_QUOTA_CLAUDE", 0), 0
bison: getEnvWithDefault("TOKEN_QUOTA_BISON", 0), ) as number;
"aws-claude": getEnvWithDefault("TOKEN_QUOTA_AWS_CLAUDE", 0), return acc;
}, },
{} as { [key in ModelFamily]: number }
),
quotaRefreshPeriod: getEnvWithDefault("QUOTA_REFRESH_PERIOD", undefined), quotaRefreshPeriod: getEnvWithDefault("QUOTA_REFRESH_PERIOD", undefined),
allowNicknameChanges: getEnvWithDefault("ALLOW_NICKNAME_CHANGES", true), allowNicknameChanges: getEnvWithDefault("ALLOW_NICKNAME_CHANGES", true),
showRecentImages: getEnvWithDefault("SHOW_RECENT_IMAGES", true),
useInsecureCookies: getEnvWithDefault("USE_INSECURE_COOKIES", isDev),
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
} as const; } as const;
function generateCookieSecret() { function generateCookieSecret() {
@@ -238,12 +364,16 @@ function generateCookieSecret() {
export const COOKIE_SECRET = generateCookieSecret(); export const COOKIE_SECRET = generateCookieSecret();
export async function assertConfigIsValid() { export async function assertConfigIsValid() {
if (process.env.TURBO_ONLY === "true") { if (process.env.MODEL_RATE_LIMIT !== undefined) {
const limit =
parseInt(process.env.MODEL_RATE_LIMIT, 10) || config.textModelRateLimit;
config.textModelRateLimit = limit;
config.imageModelRateLimit = Math.max(Math.floor(limit / 2), 1);
startupLogger.warn( startupLogger.warn(
"TURBO_ONLY is deprecated. Use ALLOWED_MODEL_FAMILIES=turbo instead." { textLimit: limit, imageLimit: config.imageModelRateLimit },
); "MODEL_RATE_LIMIT is deprecated. Use TEXT_MODEL_RATE_LIMIT and IMAGE_MODEL_RATE_LIMIT instead."
config.allowedModelFamilies = config.allowedModelFamilies.filter(
(f) => !f.includes("gpt4")
); );
} }
@@ -284,7 +414,8 @@ export async function assertConfigIsValid() {
// them to users. // them to users.
for (const key of getKeys(config)) { for (const key of getKeys(config)) {
const maybeSensitive = ["key", "credentials", "secret", "password"].some( const maybeSensitive = ["key", "credentials", "secret", "password"].some(
(sensitive) => key.toLowerCase().includes(sensitive) (sensitive) =>
key.toLowerCase().includes(sensitive) && !["checkKeys"].includes(key)
); );
const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]); const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]);
if (maybeSensitive && !secured.has(key)) if (maybeSensitive && !secured.has(key))
@@ -306,16 +437,21 @@ export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
* Config keys that are not displayed on the info page at all, generally because * Config keys that are not displayed on the info page at all, generally because
* they are not relevant to the user or can be inferred from other config. * they are not relevant to the user or can be inferred from other config.
*/ */
export const OMITTED_KEYS: (keyof Config)[] = [ export const OMITTED_KEYS = [
"port", "port",
"bindAddress",
"logLevel", "logLevel",
"openaiKey", "openaiKey",
"anthropicKey", "anthropicKey",
"googlePalmKey", "googleAIKey",
"mistralAIKey",
"awsCredentials", "awsCredentials",
"azureCredentials",
"proxyKey", "proxyKey",
"adminKey", "adminKey",
"checkKeys", "serviceInfoPassword",
"rejectPhrases",
"rejectMessage",
"showTokenCosts", "showTokenCosts",
"googleSheetsKey", "googleSheetsKey",
"firebaseKey", "firebaseKey",
@@ -326,34 +462,53 @@ export const OMITTED_KEYS: (keyof Config)[] = [
"blockMessage", "blockMessage",
"blockRedirect", "blockRedirect",
"allowNicknameChanges", "allowNicknameChanges",
]; "showRecentImages",
"useInsecureCookies",
"staticServiceInfo",
"checkKeys",
"allowedModelFamilies",
"trustedProxies",
"proxyEndpointRoute",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
type Printable<T> = {
[P in keyof T as Exclude<P, OmitKeys>]: T[P] extends object
? Printable<T[P]>
: string;
};
type PublicConfig = Printable<Config>;
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>; const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
export function listConfig(obj: Config = config): Record<string, any> { export function listConfig(obj: Config = config) {
const result: Record<string, any> = {}; const result: Record<string, unknown> = {};
for (const key of getKeys(obj)) { for (const key of getKeys(obj)) {
const value = obj[key]?.toString() || ""; const value = obj[key]?.toString() || "";
const shouldOmit =
OMITTED_KEYS.includes(key) || value === "" || value === "undefined";
const shouldMask = SENSITIVE_KEYS.includes(key); const shouldMask = SENSITIVE_KEYS.includes(key);
const shouldOmit =
OMITTED_KEYS.includes(key as OmitKeys) ||
value === "" ||
value === "undefined";
if (shouldOmit) { if (shouldOmit) {
continue; continue;
} }
const validKey = key as keyof Printable<Config>;
if (value && shouldMask) { if (value && shouldMask) {
result[key] = "********"; result[validKey] = "********";
} else { } else {
result[key] = value; result[validKey] = value;
} }
if (typeof obj[key] === "object" && !Array.isArray(obj[key])) { if (typeof obj[key] === "object" && !Array.isArray(obj[key])) {
result[key] = listConfig(obj[key] as unknown as Config); result[key] = listConfig(obj[key] as unknown as Config);
} }
} }
return result; return result as PublicConfig;
} }
/** /**
@@ -372,8 +527,9 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
[ [
"OPENAI_KEY", "OPENAI_KEY",
"ANTHROPIC_KEY", "ANTHROPIC_KEY",
"GOOGLE_PALM_KEY", "GOOGLE_AI_KEY",
"AWS_CREDENTIALS", "AWS_CREDENTIALS",
"AZURE_CREDENTIALS",
].includes(String(env)) ].includes(String(env))
) { ) {
return value as unknown as T; return value as unknown as T;
@@ -415,3 +571,11 @@ export function getFirebaseApp(): firebase.app.App {
} }
return firebaseApp; return firebaseApp;
} }
function parseCsv(val: string): string[] {
if (!val) return [];
const regex = /(".*?"|[^",]+)(?=\s*,|\s*$)/g;
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
+170 -413
View File
@@ -1,136 +1,90 @@
/** This whole module kinda sucks */
import fs from "fs"; import fs from "fs";
import { Request, Response } from "express"; import express, { Router, Request, Response } from "express";
import showdown from "showdown"; import showdown from "showdown";
import { config, listConfig } from "./config"; import { config } from "./config";
import { import { buildInfo, ServiceInfo } from "./service-info";
AnthropicKey, import { getLastNImages } from "./shared/file-storage/image-history";
GooglePalmKey, import { keyPool } from "./shared/key-management";
OpenAIKey, import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
AwsBedrockKey, import { withSession } from "./shared/with-session";
keyPool, import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
} from "./shared/key-management";
import { ModelFamily, OpenAIModelFamily } from "./shared/models";
import { getUniqueIps } from "./proxy/rate-limit";
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
import { getTokenCostUsd, prettyTokens } from "./shared/stats";
import { assertNever } from "./shared/utils";
const INFO_PAGE_TTL = 2000; const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
turbo: "GPT-3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
"dall-e": "DALL-E",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-pro": "Gemini Pro",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-dall-e": "Azure DALL-E",
};
const converter = new showdown.Converter();
const customGreeting = fs.existsSync("greeting.md")
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
: "";
let infoPageHtml: string | undefined; let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0; let infoPageLastUpdated = 0;
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsGooglePalmKey = (k: KeyPoolKey): k is GooglePalmKey =>
k.service === "google-palm";
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
type ModelAggregates = {
active: number;
trial?: number;
revoked?: number;
overQuota?: number;
pozzed?: number;
awsLogged?: number;
queued: number;
queueTime: string;
tokens: number;
};
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
type ServiceAggregates = {
status?: string;
openaiKeys?: number;
openaiOrgs?: number;
anthropicKeys?: number;
palmKeys?: number;
awsKeys?: number;
proompts: number;
tokens: number;
tokenCost: number;
openAiUncheckedKeys?: number;
anthropicUncheckedKeys?: number;
} & {
[modelFamily in ModelFamily]?: ModelAggregates;
};
const modelStats = new Map<ModelAggregateKey, number>();
const serviceStats = new Map<keyof ServiceAggregates, number>();
export const handleInfoPage = (req: Request, res: Response) => { export const handleInfoPage = (req: Request, res: Response) => {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) { if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
res.send(infoPageHtml); return res.send(infoPageHtml);
return;
} }
// Sometimes huggingface doesn't send the host header and makes us guess.
const baseUrl = const baseUrl =
process.env.SPACE_ID && !req.get("host")?.includes("hf.space") process.env.SPACE_ID && !req.get("host")?.includes("hf.space")
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID) ? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
: req.protocol + "://" + req.get("host"); : req.protocol + "://" + req.get("host");
res.send(cacheInfoPageHtml(baseUrl)); const info = buildInfo(baseUrl + config.proxyEndpointRoute);
infoPageHtml = renderPage(info);
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
}; };
function getCostString(cost: number) { export function renderPage(info: ServiceInfo) {
if (!config.showTokenCosts) return "";
return ` ($${cost.toFixed(2)})`;
}
function cacheInfoPageHtml(baseUrl: string) {
const keys = keyPool.list();
modelStats.clear();
serviceStats.clear();
keys.forEach(addKeyToAggregates);
const openaiKeys = serviceStats.get("openaiKeys") || 0;
const anthropicKeys = serviceStats.get("anthropicKeys") || 0;
const palmKeys = serviceStats.get("palmKeys") || 0;
const awsKeys = serviceStats.get("awsKeys") || 0;
const proompts = serviceStats.get("proompts") || 0;
const tokens = serviceStats.get("tokens") || 0;
const tokenCost = serviceStats.get("tokenCost") || 0;
const info = {
uptime: Math.floor(process.uptime()),
endpoints: {
...(openaiKeys ? { openai: baseUrl + "/proxy/openai" } : {}),
...(openaiKeys
? { ["openai2"]: baseUrl + "/proxy/openai/turbo-instruct" }
: {}),
...(anthropicKeys ? { anthropic: baseUrl + "/proxy/anthropic" } : {}),
...(palmKeys ? { "google-palm": baseUrl + "/proxy/google-palm" } : {}),
...(awsKeys ? { aws: baseUrl + "/proxy/aws/claude" } : {}),
},
proompts,
tookens: `${prettyTokens(tokens)}${getCostString(tokenCost)}`,
...(config.modelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
openaiKeys,
anthropicKeys,
palmKeys,
awsKeys,
...(openaiKeys ? getOpenAIInfo() : {}),
...(anthropicKeys ? getAnthropicInfo() : {}),
...(palmKeys ? { "palm-bison": getPalmInfo() } : {}),
...(awsKeys ? { "aws-claude": getAwsInfo() } : {}),
config: listConfig(),
build: process.env.BUILD_INFO || "dev",
};
const title = getServerTitle(); const title = getServerTitle();
const headerHtml = buildInfoPageHeader(new showdown.Converter(), title); const headerHtml = buildInfoPageHeader(info);
const pageBody = `<!DOCTYPE html> return `<!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<meta name="robots" content="noindex" /> <meta name="robots" content="noindex" />
<title>${title}</title> <title>${title}</title>
<style>
body {
font-family: sans-serif;
background-color: #f0f0f0;
padding: 1em;
}
@media (prefers-color-scheme: dark) {
body {
background-color: #222;
color: #eee;
}
a:link, a:visited {
color: #bbe;
}
}
</style>
</head> </head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;"> <body>
${headerHtml} ${headerHtml}
<hr /> <hr />
<h2>Service Info</h2> <h2>Service Info</h2>
@@ -138,317 +92,52 @@ function cacheInfoPageHtml(baseUrl: string) {
${getSelfServiceLinks()} ${getSelfServiceLinks()}
</body> </body>
</html>`; </html>`;
infoPageHtml = pageBody;
infoPageLastUpdated = Date.now();
return pageBody;
} }
function getUniqueOpenAIOrgs(keys: KeyPoolKey[]) {
const orgIds = new Set(
keys.filter((k) => k.service === "openai").map((k: any) => k.organizationId)
);
return orgIds.size;
}
function increment<T extends keyof ServiceAggregates | ModelAggregateKey>(
map: Map<T, number>,
key: T,
delta = 1
) {
map.set(key, (map.get(key) || 0) + delta);
}
function addKeyToAggregates(k: KeyPoolKey) {
increment(serviceStats, "proompts", k.promptCount);
increment(serviceStats, "openaiKeys", k.service === "openai" ? 1 : 0);
increment(serviceStats, "anthropicKeys", k.service === "anthropic" ? 1 : 0);
increment(serviceStats, "palmKeys", k.service === "google-palm" ? 1 : 0);
increment(serviceStats, "awsKeys", k.service === "aws" ? 1 : 0);
let sumTokens = 0;
let sumCost = 0;
let family: ModelFamily;
const families = k.modelFamilies.filter((f) =>
config.allowedModelFamilies.includes(f)
);
switch (k.service) {
case "openai":
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
increment(
serviceStats,
"openAiUncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
// Technically this would not account for keys that have tokens recorded
// on models they aren't provisioned for, but that would be strange
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
});
if (families.includes("gpt4-32k")) {
family = "gpt4-32k";
} else if (families.includes("gpt4")) {
family = "gpt4";
} else {
family = "turbo";
}
increment(modelStats, `${family}__trial`, k.isTrial ? 1 : 0);
break;
case "anthropic":
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
family = "claude";
sumTokens += k.claudeTokens;
sumCost += getTokenCostUsd(family, k.claudeTokens);
increment(modelStats, `${family}__tokens`, k.claudeTokens);
increment(modelStats, `${family}__pozzed`, k.isPozzed ? 1 : 0);
increment(
serviceStats,
"anthropicUncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
break;
case "google-palm":
if (!keyIsGooglePalmKey(k)) throw new Error("Invalid key type");
family = "bison";
sumTokens += k.bisonTokens;
sumCost += getTokenCostUsd(family, k.bisonTokens);
increment(modelStats, `${family}__tokens`, k.bisonTokens);
break;
case "aws":
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
family = "aws-claude";
sumTokens += k["aws-claudeTokens"];
sumCost += getTokenCostUsd(family, k["aws-claudeTokens"]);
increment(modelStats, `${family}__tokens`, k["aws-claudeTokens"]);
// Ignore revoked keys for aws logging stats, but include keys where the
// logging status is unknown.
const countAsLogged =
k.lastChecked && !k.isDisabled && k.awsLoggingStatus !== "disabled";
increment(modelStats, `${family}__awsLogged`, countAsLogged ? 1 : 0);
break;
default:
assertNever(k.service);
}
increment(serviceStats, "tokens", sumTokens);
increment(serviceStats, "tokenCost", sumCost);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
if ("isRevoked" in k) {
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
}
if ("isOverQuota" in k) {
increment(modelStats, `${family}__overQuota`, k.isOverQuota ? 1 : 0);
}
}
function getOpenAIInfo() {
const info: { status?: string; openaiKeys?: number; openaiOrgs?: number } & {
[modelFamily in OpenAIModelFamily]?: {
usage?: string;
activeKeys: number;
trialKeys?: number;
revokedKeys?: number;
overQuotaKeys?: number;
proomptersInQueue?: number;
estimatedQueueTime?: string;
};
} = {};
const allowedFamilies = new Set(config.allowedModelFamilies);
let families = new Set<OpenAIModelFamily>();
const keys = keyPool.list().filter((k) => {
const isOpenAI = keyIsOpenAIKey(k);
if (isOpenAI) k.modelFamilies.forEach((f) => families.add(f));
return isOpenAI;
}) as Omit<OpenAIKey, "key">[];
families = new Set([...families].filter((f) => allowedFamilies.has(f)));
if (config.checkKeys) {
const unchecked = serviceStats.get("openAiUncheckedKeys") || 0;
if (unchecked > 0) {
info.status = `Checking ${unchecked} keys...`;
}
info.openaiKeys = keys.length;
info.openaiOrgs = getUniqueOpenAIOrgs(keys);
families.forEach((f) => {
const tokens = modelStats.get(`${f}__tokens`) || 0;
const cost = getTokenCostUsd(f, tokens);
info[f] = {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: modelStats.get(`${f}__active`) || 0,
trialKeys: modelStats.get(`${f}__trial`) || 0,
revokedKeys: modelStats.get(`${f}__revoked`) || 0,
overQuotaKeys: modelStats.get(`${f}__overQuota`) || 0,
};
});
} else {
info.status = "Key checking is disabled.";
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
info.gpt4 = {
activeKeys: keys.filter(
(k) => !k.isDisabled && k.modelFamilies.includes("gpt4")
).length,
};
}
families.forEach((f) => {
if (info[f]) {
const { estimatedQueueTime, proomptersInQueue } = getQueueInformation(f);
info[f]!.proomptersInQueue = proomptersInQueue;
info[f]!.estimatedQueueTime = estimatedQueueTime;
}
});
return info;
}
function getAnthropicInfo() {
const claudeInfo: Partial<ModelAggregates> = {
active: modelStats.get("claude__active") || 0,
pozzed: modelStats.get("claude__pozzed") || 0,
};
const queue = getQueueInformation("claude");
claudeInfo.queued = queue.proomptersInQueue;
claudeInfo.queueTime = queue.estimatedQueueTime;
const tokens = modelStats.get("claude__tokens") || 0;
const cost = getTokenCostUsd("claude", tokens);
const unchecked =
(config.checkKeys && serviceStats.get("anthropicUncheckedKeys")) || 0;
return {
claude: {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
...(unchecked > 0 ? { status: `Checking ${unchecked} keys...` } : {}),
activeKeys: claudeInfo.active,
...(config.checkKeys ? { pozzedKeys: claudeInfo.pozzed } : {}),
proomptersInQueue: claudeInfo.queued,
estimatedQueueTime: claudeInfo.queueTime,
},
};
}
function getPalmInfo() {
const bisonInfo: Partial<ModelAggregates> = {
active: modelStats.get("bison__active") || 0,
};
const queue = getQueueInformation("bison");
bisonInfo.queued = queue.proomptersInQueue;
bisonInfo.queueTime = queue.estimatedQueueTime;
const tokens = modelStats.get("bison__tokens") || 0;
const cost = getTokenCostUsd("bison", tokens);
return {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: bisonInfo.active,
proomptersInQueue: bisonInfo.queued,
estimatedQueueTime: bisonInfo.queueTime,
};
}
function getAwsInfo() {
const awsInfo: Partial<ModelAggregates> = {
active: modelStats.get("aws-claude__active") || 0,
};
const queue = getQueueInformation("aws-claude");
awsInfo.queued = queue.proomptersInQueue;
awsInfo.queueTime = queue.estimatedQueueTime;
const tokens = modelStats.get("aws-claude__tokens") || 0;
const cost = getTokenCostUsd("aws-claude", tokens);
const logged = modelStats.get("aws-claude__awsLogged") || 0;
const logMsg = config.allowAwsLogging
? `${logged} active keys are potentially logged.`
: `${logged} active keys are potentially logged and can't be used.`;
return {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: awsInfo.active,
proomptersInQueue: awsInfo.queued,
estimatedQueueTime: awsInfo.queueTime,
...(logged > 0 ? { privacy: logMsg } : {}),
};
}
const customGreeting = fs.existsSync("greeting.md")
? fs.readFileSync("greeting.md", "utf8")
: null;
/** /**
* If the server operator provides a `greeting.md` file, it will be included in * If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page. * the rendered info page.
**/ **/
function buildInfoPageHeader(converter: showdown.Converter, title: string) { function buildInfoPageHeader(info: ServiceInfo) {
const title = getServerTitle();
// TODO: use some templating engine instead of this mess // TODO: use some templating engine instead of this mess
let infoBody = `<!-- Header for Showdown's parser, don't remove this line --> let infoBody = `# ${title}`;
# ${title}`;
if (config.promptLogging) { if (config.promptLogging) {
infoBody += `\n## Prompt logging is enabled! infoBody += `\n## Prompt Logging Enabled
The server operator has enabled prompt logging. The prompts you send to this proxy and the AI responses you receive may be saved. This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
Logs are anonymous and do not contain IP addresses or timestamps. [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/prompt-logging/index.ts). [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/shared/prompt-logging/index.ts).
**If you are uncomfortable with this, don't send prompts to this proxy!**`; **If you are uncomfortable with this, don't send prompts to this proxy!**`;
} }
if (config.staticServiceInfo) {
return converter.makeHtml(infoBody + customGreeting);
}
const waits: string[] = []; const waits: string[] = [];
infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
if (config.openaiKey) { for (const modelFamily of config.allowedModelFamilies) {
// TODO: un-fuck this const service = MODEL_FAMILY_SERVICE[modelFamily];
const keys = keyPool.list().filter((k) => k.service === "openai");
const turboWait = getQueueInformation("turbo").estimatedQueueTime; const hasKeys = keyPool.list().some((k) => {
waits.push(`**Turbo:** ${turboWait}`); return k.service === service && k.modelFamilies.includes(modelFamily);
});
const gpt4Wait = getQueueInformation("gpt4").estimatedQueueTime; const wait = info[modelFamily]?.estimatedQueueTime;
const hasGpt4 = keys.some((k) => k.modelFamilies.includes("gpt4")); if (hasKeys && wait) {
const allowedGpt4 = config.allowedModelFamilies.includes("gpt4"); waits.push(
if (hasGpt4 && allowedGpt4) { `**${MODEL_FAMILY_FRIENDLY_NAME[modelFamily] || modelFamily}**: ${wait}`
waits.push(`**GPT-4:** ${gpt4Wait}`); );
} }
const gpt432kWait = getQueueInformation("gpt4-32k").estimatedQueueTime;
const hasGpt432k = keys.some((k) => k.modelFamilies.includes("gpt4-32k"));
const allowedGpt432k = config.allowedModelFamilies.includes("gpt4-32k");
if (hasGpt432k && allowedGpt432k) {
waits.push(`**GPT-4-32k:** ${gpt432kWait}`);
}
}
if (config.anthropicKey) {
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
waits.push(`**Claude:** ${claudeWait}`);
}
if (config.awsCredentials) {
const awsClaudeWait = getQueueInformation("aws-claude").estimatedQueueTime;
waits.push(`**Claude (AWS):** ${awsClaudeWait}`);
} }
infoBody += "\n\n" + waits.join(" / "); infoBody += "\n\n" + waits.join(" / ");
if (customGreeting) { infoBody += customGreeting;
infoBody += `\n## Server Greeting\n${customGreeting}`;
} infoBody += buildRecentImageSection();
return converter.makeHtml(infoBody); return converter.makeHtml(infoBody);
} }
@@ -457,21 +146,6 @@ function getSelfServiceLinks() {
return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`; return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
} }
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
function getQueueInformation(partition: ModelFamily) {
const waitMs = getEstimatedWaitTime(partition);
const waitTime =
waitMs < 60000
? `${Math.round(waitMs / 1000)}sec`
: `${Math.round(waitMs / 60000)}min, ${Math.round(
(waitMs % 60000) / 1000
)}sec`;
return {
proomptersInQueue: getQueueLength(partition),
estimatedQueueTime: waitMs > 2000 ? waitTime : "no wait",
};
}
function getServerTitle() { function getServerTitle() {
// Use manually set title if available // Use manually set title if available
if (process.env.SERVER_TITLE) { if (process.env.SERVER_TITLE) {
@@ -491,9 +165,46 @@ function getServerTitle() {
return "OAI Reverse Proxy"; return "OAI Reverse Proxy";
} }
function buildRecentImageSection() {
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
if (
!config.showRecentImages ||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return "";
}
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
}
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`
return html;
}
function escapeHtml(unsafe: string) {
return unsafe
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) { function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
// Huggingface broke their amazon elb config and no longer sends the
// x-forwarded-host header. This is a workaround.
try { try {
const [username, spacename] = spaceId.split("/"); const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`; return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
@@ -501,3 +212,49 @@ function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
return ""; return "";
} }
} }
function checkIfUnlocked(
req: Request,
res: Response,
next: express.NextFunction
) {
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
return res.redirect("/unlock-info");
}
next();
}
const infoPageRouter = Router();
if (config.serviceInfoPassword?.length) {
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
infoPageRouter.use(withSession);
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
infoPageRouter.post("/unlock-info", (req, res) => {
if (req.body.password !== config.serviceInfoPassword) {
return res.status(403).send("Incorrect password");
}
req.session!.unlocked = true;
res.redirect("/");
});
infoPageRouter.get("/unlock-info", (_req, res) => {
if (_req.session?.unlocked) return res.redirect("/");
res.send(`
<form method="post" action="/unlock-info">
<h1>Unlock Service Info</h1>
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
<input type="password" name="password" placeholder="Password" />
<button type="submit">Unlock</button>
</form>
`);
});
infoPageRouter.use(checkIfUnlocked);
}
infoPageRouter.get("/", handleInfoPage);
infoPageRouter.get("/status", (req, res) => {
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
});
export { infoPageRouter };
+211 -55
View File
@@ -1,4 +1,4 @@
import { Request, RequestHandler, Router } from "express"; import { Request, Response, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware"; import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config"; import { config } from "../config";
import { logger } from "../logger"; import { logger } from "../logger";
@@ -7,18 +7,16 @@ import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common"; import { handleProxyError } from "./middleware/common";
import { import {
addKey, addKey,
applyQuotaLimits,
addAnthropicPreamble, addAnthropicPreamble,
blockZoomerOrigins,
createPreprocessorMiddleware, createPreprocessorMiddleware,
finalizeBody, finalizeBody,
languageFilter, createOnProxyReqHandler,
stripHeaders, createOnProxyReqHandler
} from "./middleware/request"; } from "./middleware/request";
import { import {
ProxyResHandlerWithBody, ProxyResHandlerWithBody,
createOnProxyResHandler, createOnProxyResHandler,
} from "./middleware/response"; } from "./middleware/response";
import { sendErrorToClient } from "./middleware/response/error-generator";
let modelsCache: any = null; let modelsCache: any = null;
let modelsCacheTime = 0; let modelsCacheTime = 0;
@@ -42,8 +40,12 @@ const getModelsResponse = () => {
"claude-instant-v1.1", "claude-instant-v1.1",
"claude-instant-v1.1-100k", "claude-instant-v1.1-100k",
"claude-instant-v1.0", "claude-instant-v1.0",
"claude-2", // claude-2 is 100k by default it seems "claude-2",
"claude-2.0", "claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
]; ];
const models = claudeVariants.map((id) => ({ const models = claudeVariants.map((id) => ({
@@ -77,31 +79,56 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (config.promptLogging) { let newBody = body;
const host = req.get("host"); switch (`${req.inboundApi}<-${req.outboundApi}`) {
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`; case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAnthropicTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to Anthropic chat format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
} }
if (req.inboundApi === "openai") { res.status(200).json({ ...newBody, proxy: body.proxy });
req.log.info("Transforming Anthropic response to OpenAI format");
body = transformAnthropicResponse(body, req);
}
// TODO: Remove once tokenization is stable
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
res.status(200).json(body);
}; };
function flattenChatResponse(
content: { type: string; text: string }[]
): string {
return content
.map((part: { type: string; text: string }) =>
part.type === "text" ? part.text : ""
)
.join("\n");
}
export function transformAnthropicChatResponseToAnthropicText(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
type: "completion",
id: "ant-" + anthropicBody.id,
completion: flattenChatResponse(anthropicBody.content),
stop_reason: anthropicBody.stop_reason,
stop: anthropicBody.stop_sequence,
model: anthropicBody.model,
usage: anthropicBody.usage,
};
}
/** /**
* Transforms a model response from the Anthropic API to match those from the * Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This * OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled * is only used for non-streaming requests as streaming requests are handled
* on-the-fly. * on-the-fly.
*/ */
function transformAnthropicResponse( function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any>, anthropicBody: Record<string, any>,
req: Request req: Request
): Record<string, any> { ): Record<string, any> {
@@ -129,71 +156,200 @@ function transformAnthropicResponse(
}; };
} }
const anthropicProxy = createQueueMiddleware( function transformAnthropicChatResponseToOpenAI(
createProxyMiddleware({ anthropicBody: Record<string, any>
): Record<string, any> {
return {
id: "ant-" + anthropicBody.id,
object: "chat.completion",
created: Date.now(),
model: anthropicBody.model,
usage: anthropicBody.usage,
choices: [
{
message: {
role: "assistant",
content: flattenChatResponse(anthropicBody.content),
},
finish_reason: anthropicBody.stop_reason,
index: 0,
},
],
};
}
const anthropicProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.anthropic.com", target: "https://api.anthropic.com",
changeOrigin: true, changeOrigin: true,
selfHandleResponse: true, selfHandleResponse: true,
logger, logger,
on: { on: {
proxyReq: createOnProxyReqHandler({ proxyReq: createOnProxyReqHandler({
pipeline: [ pipeline: [addKey, addAnthropicPreamble, finalizeBody],
applyQuotaLimits,
addKey,
addAnthropicPreamble,
languageFilter,
blockZoomerOrigins,
stripHeaders,
finalizeBody,
],
}), }),
proxyRes: createOnProxyResHandler([anthropicResponseHandler]), proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
error: handleProxyError, error: handleProxyError,
}, },
pathRewrite: { // Abusing pathFilter to rewrite the paths dynamically.
// Send OpenAI-compat requests to the real Anthropic endpoint. pathFilter: (pathname, req) => {
"^/v1/chat/completions": "/v1/complete", const isText = req.outboundApi === "anthropic-text";
const isChat = req.outboundApi === "anthropic-chat";
if (isChat && pathname === "/v1/complete") {
req.url = "/v1/messages";
}
if (isText && pathname === "/v1/chat/completions") {
req.url = "/v1/complete";
}
if (isChat && pathname === "/v1/chat/completions") {
req.url = "/v1/messages";
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
req.url = "/v1/messages";
}
return true;
}, },
}) }),
); });
const nativeTextPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
});
const textToChatPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.startsWith("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
});
const oaiToChatPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
if (req.body.model?.includes("claude-3")) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
}
};
const anthropicRouter = Router(); const anthropicRouter = Router();
anthropicRouter.get("/v1/models", handleModelRequest); anthropicRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint. // Native Anthropic chat completion endpoint.
anthropicRouter.post( anthropicRouter.post(
"/v1/complete", "/v1/messages",
ipLimiter, ipLimiter,
createPreprocessorMiddleware({ createPreprocessorMiddleware({
inApi: "anthropic", inApi: "anthropic-chat",
outApi: "anthropic", outApi: "anthropic-chat",
service: "anthropic", service: "anthropic",
}), }),
anthropicProxy anthropicProxy
); );
// OpenAI-to-Anthropic compatibility endpoint. // Anthropic text completion endpoint. Translates to Anthropic chat completion
// if the requested model is a Claude 3 model.
anthropicRouter.post(
"/v1/complete",
ipLimiter,
preprocessAnthropicTextRequest,
anthropicProxy
);
// OpenAI-to-Anthropic compatibility endpoint. Accepts an OpenAI chat completion
// request and transforms/routes it to the appropriate Anthropic format and
// endpoint based on the requested model.
anthropicRouter.post( anthropicRouter.post(
"/v1/chat/completions", "/v1/chat/completions",
ipLimiter, ipLimiter,
createPreprocessorMiddleware( preprocessOpenAICompatRequest,
{ inApi: "openai", outApi: "anthropic", service: "anthropic" }, anthropicProxy
{ afterTransform: [maybeReassignModel] } );
), // Temporarily force Anthropic Text to Anthropic Chat for frontends which do not
// yet support the new model. Forces claude-3. Will be removed once common
// frontends have been updated.
anthropicRouter.post(
"/v1/:type(sonnet|opus)/:action(complete|messages)",
ipLimiter,
handleAnthropicTextCompatRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
}),
anthropicProxy anthropicProxy
); );
function handleAnthropicTextCompatRequest(
req: Request,
res: Response,
next: any
) {
const type = req.params.type;
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = `claude-3-${type}-20240229`;
req.log.info(
{ type, inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling Anthropic compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/anthropic\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/anthropic/" + type,
correct_endpoint: "/anthropic",
},
},
});
}
req.body.model = compatModel;
next();
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Claude 3 Sonnet.
*/
function maybeReassignModel(req: Request) { function maybeReassignModel(req: Request) {
const model = req.body.model; const model = req.body.model;
if (!model.startsWith("gpt-")) return; if (!model.startsWith("gpt-")) return;
req.body.model = "claude-3-sonnet-20240229";
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const contextSize = req.promptTokens! + req.outputTokens!;
if (contextSize > 8500) {
req.log.debug(
{ model: bigModel, contextSize },
"Using Claude 100k model for OpenAI-to-Anthropic request"
);
req.body.model = bigModel;
}
} }
export const anthropic = anthropicRouter; export const anthropic = anthropicRouter;
+165 -54
View File
@@ -1,4 +1,4 @@
import { Request, RequestHandler, Router } from "express"; import { Request, RequestHandler, Response, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware"; import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid"; import { v4 } from "uuid";
import { config } from "../config"; import { config } from "../config";
@@ -7,19 +7,19 @@ import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit"; import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common"; import { handleProxyError } from "./middleware/common";
import { import {
applyQuotaLimits,
createPreprocessorMiddleware, createPreprocessorMiddleware,
stripHeaders,
signAwsRequest, signAwsRequest,
finalizeAwsRequest, finalizeSignedRequest,
createOnProxyReqHandler, createOnProxyReqHandler,
languageFilter,
blockZoomerOrigins,
} from "./middleware/request"; } from "./middleware/request";
import { import {
ProxyResHandlerWithBody, ProxyResHandlerWithBody,
createOnProxyResHandler, createOnProxyResHandler,
} from "./middleware/response"; } from "./middleware/response";
import { transformAnthropicChatResponseToAnthropicText } from "./anthropic";
import { sendErrorToClient } from "./middleware/response/error-generator";
const LATEST_AWS_V2_MINOR_VERSION = "1";
let modelsCache: any = null; let modelsCache: any = null;
let modelsCacheTime = 0; let modelsCacheTime = 0;
@@ -31,7 +31,13 @@ const getModelsResponse = () => {
if (!config.awsCredentials) return { object: "list", data: [] }; if (!config.awsCredentials) return { object: "list", data: [] };
const variants = ["anthropic.claude-v1", "anthropic.claude-v2"]; // https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const variants = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
];
const models = variants.map((id) => ({ const models = variants.map((id) => ({
id, id,
@@ -64,25 +70,26 @@ const awsResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (config.promptLogging) { let newBody = body;
const host = req.get("host"); switch (`${req.inboundApi}<-${req.outboundApi}`) {
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`; case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
// case "openai<-anthropic-chat":
// todo: implement this
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
} }
if (req.inboundApi === "openai") { // AWS does not always confirm the model in the response, so we have to add it
req.log.info("Transforming AWS Claude response to OpenAI format"); if (!newBody.model && req.body.model) {
body = transformAwsResponse(body, req); newBody.model = req.body.model;
} }
// TODO: Remove once tokenization is stable res.status(200).json({ ...newBody, proxy: body.proxy });
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
// AWS does not confirm the model in the response, so we have to add it
body.model = req.body.model;
res.status(200).json(body);
}; };
/** /**
@@ -91,7 +98,7 @@ const awsResponseHandler: ProxyResHandlerWithBody = async (
* is only used for non-streaming requests as streaming requests are handled * is only used for non-streaming requests as streaming requests are handled
* on-the-fly. * on-the-fly.
*/ */
function transformAwsResponse( function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>, awsBody: Record<string, any>,
req: Request req: Request
): Record<string, any> { ): Record<string, any> {
@@ -119,54 +126,81 @@ function transformAwsResponse(
}; };
} }
const awsProxy = createQueueMiddleware( const awsProxy = createQueueMiddleware({
createProxyMiddleware({ beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten", target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => { router: ({ signedRequest }) => {
if (!signedRequest) { if (!signedRequest) throw new Error("Must sign request before proxying");
throw new Error("AWS requests must go through signAwsRequest first");
}
return `${signedRequest.protocol}//${signedRequest.hostname}`; return `${signedRequest.protocol}//${signedRequest.hostname}`;
}, },
changeOrigin: true, changeOrigin: true,
selfHandleResponse: true, selfHandleResponse: true,
logger, logger,
on: { on: {
proxyReq: createOnProxyReqHandler({ proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
pipeline: [
applyQuotaLimits,
// Credentials are added by signAwsRequest preprocessor
languageFilter,
blockZoomerOrigins,
stripHeaders,
finalizeAwsRequest,
],
}),
proxyRes: createOnProxyResHandler([awsResponseHandler]), proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError, error: handleProxyError,
}, },
}) }),
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
); );
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const awsTextCompletionRouter: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const awsRouter = Router(); const awsRouter = Router();
awsRouter.get("/v1/models", handleModelRequest); awsRouter.get("/v1/models", handleModelRequest);
// Native(ish) Anthropic chat completion endpoint. // Native(ish) Anthropic text completion endpoint.
awsRouter.post("/v1/complete", ipLimiter, awsTextCompletionRouter, awsProxy);
// Native Anthropic chat completion endpoint.
awsRouter.post( awsRouter.post(
"/v1/complete", "/v1/messages",
ipLimiter, ipLimiter,
createPreprocessorMiddleware( createPreprocessorMiddleware(
{ inApi: "anthropic", outApi: "anthropic", service: "aws" }, { inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel, signAwsRequest] } { afterTransform: [maybeReassignModel] }
), ),
awsProxy awsProxy
); );
// Temporary force-Claude3 endpoint
awsRouter.post(
"/v1/sonnet/:action(complete|messages)",
ipLimiter,
handleCompatibilityRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "aws",
}),
awsProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint. // OpenAI-to-AWS Anthropic compatibility endpoint.
awsRouter.post( awsRouter.post(
"/v1/chat/completions", "/v1/chat/completions",
ipLimiter, ipLimiter,
createPreprocessorMiddleware( createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic", service: "aws" }, { inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel, signAwsRequest] } { afterTransform: [maybeReassignModel] }
), ),
awsProxy awsProxy
); );
@@ -181,16 +215,93 @@ awsRouter.post(
*/ */
function maybeReassignModel(req: Request) { function maybeReassignModel(req: Request) {
const model = req.body.model; const model = req.body.model;
// User's client sent an AWS model already
if (model.includes("anthropic.claude")) return; // If client already specified an AWS Claude model ID, use it
// User's client is sending Anthropic-style model names, check for v1 if (model.includes("anthropic.claude")) {
if (model.match(/^claude-v?1/)) { return;
req.body.model = "anthropic.claude-v1";
} else {
// User's client requested v2 or possibly some OpenAI model, default to v2
req.body.model = "anthropic.claude-v2";
} }
// TODO: Handle claude-instant
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?)(\d*)/i;
const match = model.match(pattern);
// If there's no match, return the latest v2 model
if (!match) {
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
const instant = match[2];
const major = match[4];
const minor = match[6];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
// There's only one v1 model
if (major === "1") {
req.body.model = "anthropic.claude-v1";
return;
}
// Try to map Anthropic API v2 models to AWS v2 models
if (major === "2") {
if (minor === "0") {
req.body.model = "anthropic.claude-v2";
return;
}
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
// AWS currently only supports one v3 model.
const variant = match[8]; // sonnet or opus
const variantVersion = match[9];
if (major === "3") {
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
}
// Fallback to latest v2 model
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
export function handleCompatibilityRequest(
req: Request,
res: Response,
next: any
) {
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
req.log.info(
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling AWS compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/aws/claude/sonnet",
correct_endpoint: "/aws/claude",
},
},
});
}
req.body.model = compatModel;
next();
} }
export const aws = awsRouter; export const aws = awsRouter;
+129
View File
@@ -0,0 +1,129 @@
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
import {
AzureOpenAIModelFamily,
getAzureOpenAIModelFamily,
ModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { KNOWN_OPENAI_MODELS } from "./openai";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addAzureKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
let modelsCache: any = null;
let modelsCacheTime = 0;
function getModelsResponse() {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
let available = new Set<AzureOpenAIModelFamily>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "azure") continue;
key.modelFamilies.forEach((family) =>
available.add(family as AzureOpenAIModelFamily)
);
}
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
const models = KNOWN_OPENAI_MODELS.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "azure",
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const azureOpenAIProxy = createQueueMiddleware({
beforeProxy: addAzureKey,
proxyMiddleware: createProxyMiddleware({
target: "will be set by router",
router: (req) => {
if (!req.signedRequest) throw new Error("signedRequest not set");
const { hostname, path } = req.signedRequest;
return `https://${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
error: handleProxyError,
},
}),
});
const azureOpenAIRouter = Router();
azureOpenAIRouter.get("/v1/models", handleModelRequest);
azureOpenAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "openai",
service: "azure",
}),
azureOpenAIProxy
);
azureOpenAIRouter.post(
"/v1/images/generations",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "azure",
}),
azureOpenAIProxy
);
export const azure = azureOpenAIRouter;
+19 -8
View File
@@ -46,18 +46,29 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
} }
if (GATEKEEPER === "user_token" && token) { if (GATEKEEPER === "user_token" && token) {
const user = authenticate(token, req.ip); // RisuAI users all come from a handful of aws lambda IPs so we cannot use
if (user) { // IP alone to distinguish between them and prevent usertoken sharing.
// Risu sends a signed token in the request headers with an anonymous user
// ID that we can instead use to associate requests with an individual.
const ip = req.risuToken?.length ?
`risu${req.risuToken}-${req.ip}` :
req.ip;
const { user, result } = authenticate(token, ip);
switch (result) {
case "success":
req.user = user; req.user = user;
return next(); return next();
} else { case "limited":
const maybeBannedUser = getUser(token);
if (maybeBannedUser?.disabledAt) {
return res.status(403).json({ return res.status(403).json({
error: `Forbidden: ${ error: `Forbidden: no more IPs can authenticate with this token`,
maybeBannedUser.disabledReason || "Token disabled"
}`,
}); });
case "disabled":
const bannedUser = getUser(token);
if (bannedUser?.disabledAt) {
const reason = bannedUser.disabledReason || "Token disabled";
return res.status(403).json({ error: `Forbidden: ${reason}` });
} }
} }
} }
+135
View File
@@ -0,0 +1,135 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
let modelsCache: any = null;
let modelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.googleAIKey) return { object: "list", data: [] };
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
const models = googleAIVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
permission: [],
root: "google",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "openai") {
req.log.info("Transforming Google AI response to OpenAI format");
newBody = transformGoogleAIResponse(body, req);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformGoogleAIResponse(
resBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
return {
id: "goo-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates[0].finishReason,
index: 0,
},
],
};
}
const googleAIProxy = createQueueMiddleware({
beforeProxy: addGoogleAIKey,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
const { protocol, hostname, path } = signedRequest;
return `${protocol}//${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
error: handleProxyError,
},
}),
});
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "google-ai",
service: "google-ai",
}),
googleAIProxy
);
export const googleAI = googleAIRouter;
+133 -47
View File
@@ -1,63 +1,73 @@
import { Request, Response } from "express"; import { Request, Response } from "express";
import http from "http";
import httpProxy from "http-proxy"; import httpProxy from "http-proxy";
import { ZodError } from "zod"; import { ZodError } from "zod";
import { generateErrorMessage } from "zod-error"; import { generateErrorMessage } from "zod-error";
import { buildFakeSse } from "../../shared/streaming";
import { assertNever } from "../../shared/utils"; import { assertNever } from "../../shared/utils";
import { QuotaExceededError } from "./request/apply-quota-limits"; import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
import { sendErrorToClient } from "./response/error-generator";
import { HttpError } from "../../shared/errors";
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions"; const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions"; const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings"; const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete"; const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
/** Returns true if we're making a request to a completion endpoint. */ export function isTextGenerationRequest(req: Request) {
export function isCompletionRequest(req: Request) {
// 99% sure this function is not needed anymore
return ( return (
req.method === "POST" && req.method === "POST" &&
[ [
OPENAI_CHAT_COMPLETION_ENDPOINT, OPENAI_CHAT_COMPLETION_ENDPOINT,
OPENAI_TEXT_COMPLETION_ENDPOINT, OPENAI_TEXT_COMPLETION_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT, ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint)) ].some((endpoint) => req.path.startsWith(endpoint))
); );
} }
export function isImageGenerationRequest(req: Request) {
return (
req.method === "POST" &&
req.path.startsWith(OPENAI_IMAGE_COMPLETION_ENDPOINT)
);
}
export function isEmbeddingsRequest(req: Request) { export function isEmbeddingsRequest(req: Request) {
return ( return (
req.method === "POST" && req.path.startsWith(OPENAI_EMBEDDINGS_ENDPOINT) req.method === "POST" && req.path.startsWith(OPENAI_EMBEDDINGS_ENDPOINT)
); );
} }
export function writeErrorResponse( export function sendProxyError(
req: Request, req: Request,
res: Response, res: Response,
statusCode: number, statusCode: number,
statusMessage: string,
errorPayload: Record<string, any> errorPayload: Record<string, any>
) { ) {
const errorSource = errorPayload.error?.type?.startsWith("proxy") const msg =
? "proxy" statusCode === 500
: "upstream"; ? `The proxy encountered an error while trying to process your prompt.`
: `The proxy encountered an error while trying to send your prompt to the upstream service.`;
// If we're mid-SSE stream, send a data event with the error payload and end sendErrorToClient({
// the stream. Otherwise just send a normal error response. options: {
if ( format: req.inboundApi,
res.headersSent || title: `Proxy error (HTTP ${statusCode} ${statusMessage})`,
String(res.getHeader("content-type")).startsWith("text/event-stream") message: `${msg} Further technical details are provided below.`,
) { obj: errorPayload,
const errorTitle = `${errorSource} error (${statusCode})`; reqId: req.id,
const errorContent = JSON.stringify(errorPayload, null, 2); model: req.body?.model,
const msg = buildFakeSse(errorTitle, errorContent, req); },
res.write(msg); req,
res.write(`data: [DONE]\n\n`); res,
res.end(); });
} else {
if (req.debug && errorPayload.error) {
errorPayload.error.proxy_tokenizer_debug_info = req.debug;
}
res.status(statusCode).json(errorPayload);
}
} }
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => { export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
@@ -71,31 +81,65 @@ export const classifyErrorAndSend = (
res: Response res: Response
) => { ) => {
try { try {
const { status, userMessage, ...errorDetails } = classifyError(err); const { statusCode, statusMessage, userMessage, ...errorDetails } =
writeErrorResponse(req, res, status, { classifyError(err);
sendProxyError(req, res, statusCode, statusMessage, {
error: { message: userMessage, ...errorDetails }, error: { message: userMessage, ...errorDetails },
}); });
} catch (error) { } catch (error) {
req.log.error(error, `Error writing error response headers, giving up.`); req.log.error(error, `Error writing error response headers, giving up.`);
res.end();
} }
}; };
function classifyError(err: Error): { function classifyError(err: Error): {
/** HTTP status code returned to the client. */ /** HTTP status code returned to the client. */
status: number; statusCode: number;
/** HTTP status message returned to the client. */
statusMessage: string;
/** Message displayed to the user. */ /** Message displayed to the user. */
userMessage: string; userMessage: string;
/** Short error type, e.g. "proxy_validation_error". */ /** Short error type, e.g. "proxy_validation_error". */
type: string; type: string;
} & Record<string, any> { } & Record<string, any> {
const defaultError = { const defaultError = {
status: 500, statusCode: 500,
userMessage: `Reverse proxy encountered an unexpected error. (${err.message})`, statusMessage: "Internal Server Error",
userMessage: `Reverse proxy error: ${err.message}`,
type: "proxy_internal_error", type: "proxy_internal_error",
stack: err.stack, stack: err.stack,
}; };
switch (err.constructor.name) { switch (err.constructor.name) {
case "HttpError":
const statusCode = (err as HttpError).status;
return {
statusCode,
statusMessage: `HTTP ${statusCode} ${http.STATUS_CODES[statusCode]}`,
userMessage: `Reverse proxy error: ${err.message}`,
type: "proxy_http_error",
};
case "BadRequestError":
return {
statusCode: 400,
statusMessage: "Bad Request",
userMessage: `Request is not valid. (${err.message})`,
type: "proxy_bad_request",
};
case "NotFoundError":
return {
statusCode: 404,
statusMessage: "Not Found",
userMessage: `Requested resource not found. (${err.message})`,
type: "proxy_not_found",
};
case "PaymentRequiredError":
return {
statusCode: 402,
statusMessage: "No Keys Available",
userMessage: err.message,
type: "proxy_no_keys_available",
};
case "ZodError": case "ZodError":
const userMessage = generateErrorMessage((err as ZodError).issues, { const userMessage = generateErrorMessage((err as ZodError).issues, {
prefix: "Request validation failed. ", prefix: "Request validation failed. ",
@@ -103,22 +147,36 @@ function classifyError(err: Error): {
code: { enabled: false }, code: { enabled: false },
maxErrors: 3, maxErrors: 3,
transform: ({ issue, ...rest }) => { transform: ({ issue, ...rest }) => {
return `At '${rest.pathComponent}', ${issue.message}`; return `At '${rest.pathComponent}': ${issue.message}`;
}, },
}); });
return { status: 400, userMessage, type: "proxy_validation_error" }; return {
case "ForbiddenError": statusCode: 400,
statusMessage: "Bad Request",
userMessage,
type: "proxy_validation_error",
};
case "ZoomerForbiddenError":
// Mimics a ban notice from OpenAI, thrown when blockZoomerOrigins blocks // Mimics a ban notice from OpenAI, thrown when blockZoomerOrigins blocks
// a request. // a request.
return { return {
status: 403, statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Your account has been disabled for violating our terms of service.`, userMessage: `Your account has been disabled for violating our terms of service.`,
type: "organization_account_disabled", type: "organization_account_disabled",
code: "policy_violation", code: "policy_violation",
}; };
case "ForbiddenError":
return {
statusCode: 403,
statusMessage: "Forbidden",
userMessage: `Request is not allowed. (${err.message})`,
type: "proxy_forbidden",
};
case "QuotaExceededError": case "QuotaExceededError":
return { return {
status: 429, statusCode: 429,
statusMessage: "Too Many Requests",
userMessage: `You've exceeded your token quota for this model type.`, userMessage: `You've exceeded your token quota for this model type.`,
type: "proxy_quota_exceeded", type: "proxy_quota_exceeded",
info: (err as QuotaExceededError).quotaInfo, info: (err as QuotaExceededError).quotaInfo,
@@ -128,21 +186,24 @@ function classifyError(err: Error): {
switch (err.code) { switch (err.code) {
case "ENOTFOUND": case "ENOTFOUND":
return { return {
status: 502, statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy encountered a DNS error while trying to connect to the upstream service.`, userMessage: `Reverse proxy encountered a DNS error while trying to connect to the upstream service.`,
type: "proxy_network_error", type: "proxy_network_error",
code: err.code, code: err.code,
}; };
case "ECONNREFUSED": case "ECONNREFUSED":
return { return {
status: 502, statusCode: 502,
statusMessage: "Bad Gateway",
userMessage: `Reverse proxy couldn't connect to the upstream service.`, userMessage: `Reverse proxy couldn't connect to the upstream service.`,
type: "proxy_network_error", type: "proxy_network_error",
code: err.code, code: err.code,
}; };
case "ECONNRESET": case "ECONNRESET":
return { return {
status: 504, statusCode: 504,
statusMessage: "Gateway Timeout",
userMessage: `Reverse proxy timed out while waiting for the upstream service to respond.`, userMessage: `Reverse proxy timed out while waiting for the upstream service to respond.`,
type: "proxy_network_error", type: "proxy_network_error",
code: err.code, code: err.code,
@@ -159,20 +220,41 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi; const format = req.outboundApi;
switch (format) { switch (format) {
case "openai": case "openai":
return body.choices[0].message.content; case "mistral-ai":
// Can be null if the model wants to invoke tools rather than return a
// completion.
return body.choices[0].message.content || "";
case "openai-text": case "openai-text":
return body.choices[0].text; return body.choices[0].text;
case "anthropic": case "anthropic-chat":
if (!body.content) {
req.log.error(
{ body: JSON.stringify(body) },
"Received empty Anthropic chat completion"
);
return "";
}
return body.content
.map(({ text, type }: { type: string; text: string }) =>
type === "text" ? text : `[Unsupported content type: ${type}]`
)
.join("\n");
case "anthropic-text":
if (!body.completion) { if (!body.completion) {
req.log.error( req.log.error(
{ body: JSON.stringify(body) }, { body: JSON.stringify(body) },
"Received empty Anthropic completion" "Received empty Anthropic text completion"
); );
return ""; return "";
} }
return body.completion.trim(); return body.completion.trim();
case "google-palm": case "google-ai":
return body.candidates[0].output; if ("choices" in body) {
return body.choices[0].message.content;
}
return body.candidates[0].content.parts[0].text;
case "openai-image":
return body.data?.map((item: any) => item.url).join("\n");
default: default:
assertNever(format); assertNever(format);
} }
@@ -183,11 +265,15 @@ export function getModelFromBody(req: Request, body: Record<string, any>) {
switch (format) { switch (format) {
case "openai": case "openai":
case "openai-text": case "openai-text":
case "mistral-ai":
return body.model; return body.model;
case "anthropic": case "openai-image":
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't. // Anthropic confirms the model in the response, but AWS Claude doesn't.
return body.model || req.body.model; return body.model || req.body.model;
case "google-palm": case "google-ai":
// Google doesn't confirm the model in the response. // Google doesn't confirm the model in the response.
return req.body.model; return req.body.model;
default: default:
-137
View File
@@ -1,137 +0,0 @@
import { Key, OpenAIKey, keyPool } from "../../../shared/key-management";
import { isCompletionRequest, isEmbeddingsRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
import { assertNever } from "../../../shared/utils";
/** Add a key that can service this request to the request object. */
export const addKey: ProxyRequestMiddleware = (proxyReq, req) => {
let assignedKey: Key;
if (!isCompletionRequest(req)) {
// Horrible, horrible hack to stop the proxy from complaining about clients
// not sending a model when they are requesting the list of models (which
// requires a key, but obviously not a model).
// I don't think this is needed anymore since models requests are no longer
// proxied to the upstream API. Everything going through this is either a
// completion request or a special case like OpenAI embeddings.
req.log.warn({ path: req.path }, "addKey called on non-completion request");
req.body.model = "gpt-3.5-turbo";
}
if (!req.inboundApi || !req.outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error(
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
err.message
);
throw err;
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
// TODO: use separate middleware to deal with stream flags
req.isStreaming = req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
if (req.inboundApi === req.outboundApi) {
assignedKey = keyPool.get(req.body.model);
} else {
switch (req.outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
case "anthropic":
assignedKey = keyPool.get("claude-v1");
break;
case "google-palm":
assignedKey = keyPool.get("text-bison-001");
delete req.body.stream;
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct");
break;
case "openai":
throw new Error(
"OpenAI Chat as an API translation target is not supported"
);
default:
assertNever(req.outboundApi);
}
}
req.key = assignedKey;
req.log.info(
{
key: assignedKey.hash,
model: req.body?.model,
fromApi: req.inboundApi,
toApi: req.outboundApi,
},
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "google-palm":
const originalPath = proxyReq.path;
proxyReq.path = originalPath.replace(
/(\?.*)?$/,
`?key=${assignedKey.key}`
);
break;
case "aws":
throw new Error(
"add-key should not be used for AWS security credentials. Use sign-aws-request instead."
);
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: ProxyRequestMiddleware = (
proxyReq,
req
) => {
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
req.body = { input: req.body.input, model: "text-embedding-ada-002" }
const key = keyPool.get("text-embedding-ada-002") as OpenAIKey;
req.key = key;
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -1,30 +0,0 @@
import { hasAvailableQuota } from "../../../shared/users/user-store";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
export class QuotaExceededError extends Error {
public quotaInfo: any;
constructor(message: string, quotaInfo: any) {
super(message);
this.name = "QuotaExceededError";
this.quotaInfo = quotaInfo;
}
}
export const applyQuotaLimits: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!isCompletionRequest(req) || !req.user) {
return;
}
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
if (!hasAvailableQuota(req.user.token, req.body.model, requestedTokens)) {
throw new QuotaExceededError(
"You have exceeded your proxy token quota for this model.",
{
quota: req.user.tokenLimits,
used: req.user.tokenCounts,
requested: requestedTokens,
}
);
}
};
@@ -1,48 +0,0 @@
import { RequestPreprocessor } from "./index";
import { countTokens, OpenAIPromptMessage } from "../../../shared/tokenization";
import { assertNever } from "../../../shared/utils";
/**
* Given a request with an already-transformed body, counts the number of
* tokens and assigns the count to the request.
*/
export const countPromptTokens: RequestPreprocessor = async (req) => {
const service = req.outboundApi;
let result;
switch (service) {
case "openai": {
req.outputTokens = req.body.max_tokens;
const prompt: OpenAIPromptMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "google-palm": {
req.outputTokens = req.body.maxOutputTokens;
const prompt: string = req.body.prompt.text;
result = await countTokens({ req, prompt, service });
break;
}
default:
assertNever(service);
}
req.promptTokens = result.token_count;
// TODO: Remove once token counting is stable
req.log.debug({ result: result }, "Counted prompt tokens.");
req.debug = req.debug ?? {};
req.debug = { ...req.debug, ...result };
};
+20 -19
View File
@@ -2,29 +2,30 @@ import type { Request } from "express";
import type { ClientRequest } from "http"; import type { ClientRequest } from "http";
import type { ProxyReqCallback } from "http-proxy"; import type { ProxyReqCallback } from "http-proxy";
export { createOnProxyReqHandler } from "./rewrite"; export { createOnProxyReqHandler } from "./onproxyreq-factory";
export { export {
createPreprocessorMiddleware, createPreprocessorMiddleware,
createEmbeddingsPreprocessorMiddleware, createEmbeddingsPreprocessorMiddleware,
} from "./preprocess"; } from "./preprocessor-factory";
// Express middleware (runs before http-proxy-middleware, can be async) // Express middleware (runs before http-proxy-middleware, can be async)
export { applyQuotaLimits } from "./apply-quota-limits"; export { addAzureKey } from "./preprocessors/add-azure-key";
export { validateContextSize } from "./validate-context-size"; export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { countPromptTokens } from "./count-prompt-tokens"; export { validateContextSize } from "./preprocessors/validate-context-size";
export { setApiFormat } from "./set-api-format"; export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { signAwsRequest } from "./sign-aws-request"; export { languageFilter } from "./preprocessors/language-filter";
export { transformOutboundPayload } from "./transform-outbound-payload"; export { setApiFormat } from "./preprocessors/set-api-format";
export { signAwsRequest } from "./preprocessors/sign-aws-request";
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
// HPM middleware (runs on onProxyReq, cannot be async) // http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
export { addKey, addKeyForEmbeddingsRequest } from "./add-key"; export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
export { addAnthropicPreamble } from "./add-anthropic-preamble"; export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { blockZoomerOrigins } from "./block-zoomer-origins"; export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
export { finalizeBody } from "./finalize-body"; export { checkModelFamily } from "./onproxyreq/check-model-family";
export { finalizeAwsRequest } from "./finalize-aws-request"; export { finalizeBody } from "./onproxyreq/finalize-body";
export { languageFilter } from "./language-filter"; export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
export { limitCompletions } from "./limit-completions"; export { stripHeaders } from "./onproxyreq/strip-headers";
export { stripHeaders } from "./strip-headers";
/** /**
* Middleware that runs prior to the request being handled by http-proxy- * Middleware that runs prior to the request being handled by http-proxy-
@@ -43,7 +44,7 @@ export { stripHeaders } from "./strip-headers";
export type RequestPreprocessor = (req: Request) => void | Promise<void>; export type RequestPreprocessor = (req: Request) => void | Promise<void>;
/** /**
* Middleware that runs immediately before the request is sent to the API in * Callbacks that run immediately before the request is sent to the API in
* response to http-proxy-middleware's `proxyReq` event. * response to http-proxy-middleware's `proxyReq` event.
* *
* Async functions cannot be used here as HPM's event emitter is not async and * Async functions cannot be used here as HPM's event emitter is not async and
@@ -53,7 +54,7 @@ export type RequestPreprocessor = (req: Request) => void | Promise<void>;
* first attempt is rate limited and the request is automatically retried by the * first attempt is rate limited and the request is automatically retried by the
* request queue middleware. * request queue middleware.
*/ */
export type ProxyRequestMiddleware = ProxyReqCallback<ClientRequest, Request>; export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
export const forceModel = (model: string) => (req: Request) => export const forceModel = (model: string) => (req: Request) =>
void (req.body.model = model); void (req.body.model = model);
@@ -1,56 +0,0 @@
import { Request } from "express";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { assertNever } from "../../../shared/utils";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
const DISALLOWED_REGEX =
/[\u2E80-\u2E99\u2E9B-\u2EF3\u2F00-\u2FD5\u3005\u3007\u3021-\u3029\u3038-\u303B\u3400-\u4DB5\u4E00-\u9FD5\uF900-\uFA6D\uFA70-\uFAD9]/;
// Our shitty free-tier VMs will fall over if we test every single character in
// each 15k character request ten times a second. So we'll just sample 20% of
// the characters and hope that's enough.
const containsDisallowedCharacters = (text: string) => {
const sampleSize = Math.ceil(text.length * 0.2);
const sample = text
.split("")
.sort(() => 0.5 - Math.random())
.slice(0, sampleSize)
.join("");
return DISALLOWED_REGEX.test(sample);
};
/** Block requests containing too many disallowed characters. */
export const languageFilter: ProxyRequestMiddleware = (_proxyReq, req) => {
if (!config.rejectDisallowed) {
return;
}
if (isCompletionRequest(req)) {
const combinedText = getPromptFromRequest(req);
if (containsDisallowedCharacters(combinedText)) {
logger.warn(`Blocked request containing bad characters`);
_proxyReq.destroy(new Error(config.rejectMessage));
}
}
};
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic":
return body.prompt;
case "openai":
return body.messages
.map((m: { content: string }) => m.content)
.join("\n");
case "openai-text":
return body.prompt;
case "google-palm":
return body.prompt.text;
default:
assertNever(service);
}
}
@@ -1,16 +0,0 @@
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/**
* Don't allow multiple completions to be requested to prevent abuse.
* OpenAI-only, Anthropic provides no such parameter.
**/
export const limitCompletions: ProxyRequestMiddleware = (_proxyReq, req) => {
if (isCompletionRequest(req) && req.outboundApi === "openai") {
const originalN = req.body?.n || 1;
req.body.n = 1;
if (originalN !== req.body.n) {
req.log.warn(`Limiting completion choices from ${originalN} to 1`);
}
}
};
@@ -0,0 +1,45 @@
import {
applyQuotaLimits,
blockZoomerOrigins,
checkModelFamily,
HPMRequestCallback,
stripHeaders,
} from "./index";
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
/**
* Returns an http-proxy-middleware request handler that runs the given set of
* onProxyReq callback functions in sequence.
*
* These will run each time a request is proxied, including on automatic retries
* by the queue after encountering a rate limit.
*/
export const createOnProxyReqHandler = ({
pipeline,
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
const callbackPipeline = [
checkModelFamily,
applyQuotaLimits,
blockZoomerOrigins,
stripHeaders,
...pipeline,
];
return (proxyReq, req, res, options) => {
// The streaming flag must be set before any other onProxyReq handler runs,
// as it may influence the behavior of subsequent handlers.
// Image generation requests can't be streamed.
// TODO: this flag is set in too many places
req.isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
try {
for (const fn of callbackPipeline) {
fn(proxyReq, req, res, options);
}
} catch (error) {
proxyReq.destroy(error);
}
};
};
@@ -1,24 +1,25 @@
import { AnthropicKey, Key } from "../../../shared/key-management"; import { AnthropicKey, Key } from "../../../../shared/key-management";
import { isCompletionRequest } from "../common"; import { isTextGenerationRequest } from "../../common";
import { ProxyRequestMiddleware } from "."; import { HPMRequestCallback } from "../index";
/** /**
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to * Some keys require the prompt to start with `\n\nHuman:`. There is no way to
* know this without trying to send the request and seeing if it fails. If a * know this without trying to send the request and seeing if it fails. If a
* key is marked as requiring a preamble, it will be added here. * key is marked as requiring a preamble, it will be added here.
*/ */
export const addAnthropicPreamble: ProxyRequestMiddleware = ( export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
_proxyReq, if (
req !isTextGenerationRequest(req) ||
) => { req.key?.service !== "anthropic" ||
if (!isCompletionRequest(req) || req.key?.service !== "anthropic") { req.outboundApi !== "anthropic-text"
) {
return; return;
} }
let preamble = ""; let preamble = "";
let prompt = req.body.prompt; let prompt = req.body.prompt;
assertAnthropicKey(req.key); assertAnthropicKey(req.key);
if (req.key.requiresPreamble) { if (req.key.requiresPreamble && prompt) {
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:"; preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt"); req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
} }
@@ -0,0 +1,116 @@
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { HPMRequestCallback } from "../index";
import { assertNever } from "../../../../shared/utils";
export const addKey: HPMRequestCallback = (proxyReq, req) => {
let assignedKey: Key;
const { service, inboundApi, outboundApi, body } = req;
if (!inboundApi || !outboundApi) {
const err = new Error(
"Request API format missing. Did you forget to add the request preprocessor to your router?"
);
req.log.error({ inboundApi, outboundApi, path: req.path }, err.message);
throw err;
}
if (!body?.model) {
throw new Error("You must specify a model with your request.");
}
if (inboundApi === outboundApi) {
assignedKey = keyPool.get(body.model, service);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
// TODO: This whole else condition is probably no longer needed since API
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-chat":
case "anthropic-text":
assignedKey = keyPool.get("claude-v1", service);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
assignedKey = keyPool.get("dall-e-3", service);
break;
case "openai":
case "google-ai":
case "mistral-ai":
throw new Error(
`add-key should not be called for outbound API ${outboundApi}`
);
default:
assertNever(outboundApi);
}
}
req.key = assignedKey;
req.log.info(
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
"Assigned key to request"
);
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "mistral-ai":
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
proxyReq.setHeader("api-key", azureKey);
break;
case "aws":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
assertNever(assignedKey.service);
}
};
/**
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
proxyReq,
req
) => {
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
);
}
if (req.inboundApi !== "openai") {
throw new Error("Embeddings requests must be from OpenAI");
}
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
req.key = key;
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -1,12 +1,11 @@
import { isCompletionRequest } from "../common"; import { HPMRequestCallback } from "../index";
import { ProxyRequestMiddleware } from ".";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(","); const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
class ForbiddenError extends Error { class ZoomerForbiddenError extends Error {
constructor(message: string) { constructor(message: string) {
super(message); super(message);
this.name = "ForbiddenError"; this.name = "ZoomerForbiddenError";
} }
} }
@@ -14,11 +13,7 @@ class ForbiddenError extends Error {
* Blocks requests from Janitor AI users with a fake, scary error message so I * Blocks requests from Janitor AI users with a fake, scary error message so I
* stop getting emails asking for tech support. * stop getting emails asking for tech support.
*/ */
export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => { export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
if (!isCompletionRequest(req)) {
return;
}
const origin = req.headers.origin || req.headers.referer; const origin = req.headers.origin || req.headers.referer;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) { if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working. // Venus-derivatives send a test prompt to check if the proxy is working.
@@ -27,7 +22,7 @@ export const blockZoomerOrigins: ProxyRequestMiddleware = (_proxyReq, req) => {
return; return;
} }
throw new ForbiddenError( throw new ZoomerForbiddenError(
`Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.` `Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.`
); );
} }
@@ -0,0 +1,14 @@
import { HPMRequestCallback } from "../index";
import { config } from "../../../../config";
import { ForbiddenError } from "../../../../shared/errors";
import { getModelFamilyForRequest } from "../../../../shared/models";
/**
* Ensures the selected model family is enabled by the proxy configuration.
**/
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
const family = getModelFamilyForRequest(req);
if (!config.allowedModelFamilies.includes(family)) {
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
}
};
@@ -1,9 +1,18 @@
import { fixRequestBody } from "http-proxy-middleware"; import { fixRequestBody } from "http-proxy-middleware";
import type { ProxyRequestMiddleware } from "."; import type { HPMRequestCallback } from "../index";
/** Finalize the rewritten request body. Must be the last rewriter. */ /** Finalize the rewritten request body. Must be the last rewriter. */
export const finalizeBody: ProxyRequestMiddleware = (proxyReq, req) => { export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) { if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
const updatedBody = JSON.stringify(req.body); const updatedBody = JSON.stringify(req.body);
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody)); proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
(req as any).rawBody = Buffer.from(updatedBody); (req as any).rawBody = Buffer.from(updatedBody);
@@ -1,11 +1,11 @@
import type { ProxyRequestMiddleware } from "."; import type { HPMRequestCallback } from "../index";
/** /**
* For AWS requests, the body is signed earlier in the request pipeline, before * For AWS/Azure/Google requests, the body is signed earlier in the request
* the proxy middleware. This function just assigns the path and headers to the * pipeline, before the proxy middleware. This function just assigns the path
* proxy request. * and headers to the proxy request.
*/ */
export const finalizeAwsRequest: ProxyRequestMiddleware = (proxyReq, req) => { export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
if (!req.signedRequest) { if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set"); throw new Error("Expected req.signedRequest to be set");
} }
@@ -1,10 +1,10 @@
import { ProxyRequestMiddleware } from "."; import { HPMRequestCallback } from "../index";
/** /**
* Removes origin and referer headers before sending the request to the API for * Removes origin and referer headers before sending the request to the API for
* privacy reasons. * privacy reasons.
**/ **/
export const stripHeaders: ProxyRequestMiddleware = (proxyReq) => { export const stripHeaders: HPMRequestCallback = (proxyReq) => {
proxyReq.setHeader("origin", ""); proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", ""); proxyReq.setHeader("referer", "");
@@ -1,4 +1,5 @@
import { RequestHandler } from "express"; import { RequestHandler } from "express";
import { ZodIssue } from "zod";
import { initializeSseStream } from "../../../shared/streaming"; import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common"; import { classifyErrorAndSend } from "../common";
import { import {
@@ -7,6 +8,7 @@ import {
countPromptTokens, countPromptTokens,
setApiFormat, setApiFormat,
transformOutboundPayload, transformOutboundPayload,
languageFilter,
} from "."; } from ".";
type RequestPreprocessorOptions = { type RequestPreprocessorOptions = {
@@ -27,6 +29,14 @@ type RequestPreprocessorOptions = {
/** /**
* Returns a middleware function that processes the request body into the given * Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors. * API format, and then sequentially runs the given additional preprocessors.
*
* These run first in the request lifecycle, a single time per request before it
* is added to the request queue. They aren't run again if the request is
* re-attempted after a rate limit.
*
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
* It will run after these preprocessors, but before the request is sent to
* http-proxy-middleware.
*/ */
export const createPreprocessorMiddleware = ( export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0], apiFormat: Parameters<typeof setApiFormat>[0],
@@ -37,6 +47,7 @@ export const createPreprocessorMiddleware = (
...(beforeTransform ?? []), ...(beforeTransform ?? []),
transformOutboundPayload, transformOutboundPayload,
countPromptTokens, countPromptTokens,
languageFilter,
...(afterTransform ?? []), ...(afterTransform ?? []),
validateContextSize, validateContextSize,
]; ];
@@ -60,20 +71,88 @@ async function executePreprocessors(
preprocessors: RequestPreprocessor[], preprocessors: RequestPreprocessor[],
[req, res, next]: Parameters<RequestHandler> [req, res, next]: Parameters<RequestHandler>
) { ) {
handleTestMessage(req, res, next);
if (res.headersSent) return;
try { try {
for (const preprocessor of preprocessors) { for (const preprocessor of preprocessors) {
await preprocessor(req); await preprocessor(req);
} }
next(); next();
} catch (error) { } catch (error) {
if (error.constructor.name === "ZodError") {
const msg = error?.issues
?.map((issue: ZodIssue) => issue.message)
.join("; ");
req.log.info(msg, "Prompt validation failed.");
} else {
req.log.error(error, "Error while executing request preprocessor"); req.log.error(error, "Error while executing request preprocessor");
}
// If the requested has opted into streaming, the client probably won't // If the requested has opted into streaming, the client probably won't
// handle a non-eventstream response, but we haven't initialized the SSE // handle a non-eventstream response, but we haven't initialized the SSE
// stream yet as that is typically done later by the request queue. We'll // stream yet as that is typically done later by the request queue. We'll
// do that here and then call classifyErrorAndSend to use the streaming // do that here and then call classifyErrorAndSend to use the streaming
// error handler. // error handler.
initializeSseStream(res) const { stream } = req.body;
const isStreaming = stream === "true" || stream === true;
if (isStreaming && !res.headersSent) {
initializeSseStream(res);
}
classifyErrorAndSend(error as Error, req, res); classifyErrorAndSend(error as Error, req, res);
} }
} }
/**
* Bypasses the API call and returns a test message response if the request body
* is a known test message from SillyTavern. Otherwise these messages just waste
* API request quota and confuse users when the proxy is busy, because ST always
* makes them with `stream: false` (which is not allowed when the proxy is busy)
*/
const handleTestMessage: RequestHandler = (req, res) => {
const { method, body } = req;
if (method !== "POST") {
return;
}
if (isTestMessage(body)) {
req.log.info({ body }, "Received test message. Skipping API call.");
res.json({
id: "test-message",
object: "chat.completion",
created: Date.now(),
model: body.model,
// openai chat
choices: [
{
message: { role: "assistant", content: "Hello!" },
finish_reason: "stop",
index: 0,
},
],
// anthropic text
completion: "Hello!",
// anthropic chat
content: [{ type: "text", text: "Hello!" }],
proxy_note:
"This response was generated by the proxy's test message handler and did not go to the API.",
});
}
};
function isTestMessage(body: any) {
const { messages, prompt } = body;
if (messages) {
return (
messages.length === 1 &&
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
prompt?.startsWith("Hi\n\n")
);
}
}
@@ -0,0 +1,78 @@
import {
APIFormat,
AzureOpenAIKey,
keyPool,
} from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addAzureKey: RequestPreprocessor = (req) => {
const validAPIs: APIFormat[] = ["openai", "openai-image"];
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
validAPIs.includes(api)
);
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
req.key = keyPool.get(model, "azure");
req.body.model = model;
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
// OpenAI wants logprobs: true/false and top_logprobs: number
// Azure seems to just want to combine them into logprobs: number
// if (typeof req.body.logprobs === "boolean") {
// req.body.logprobs = req.body.top_logprobs || undefined;
// delete req.body.top_logprobs
// }
// Temporarily just disabling logprobs for Azure because their model support
// is random: `This model does not support the 'logprobs' parameter.`
delete req.body.logprobs;
delete req.body.top_logprobs;
}
req.log.info(
{ key: req.key.hash, model },
"Assigned Azure OpenAI key to request"
);
const cred = req.key as AzureOpenAIKey;
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
const operation =
req.outboundApi === "openai" ? "/chat/completions" : "/images/generations";
const apiVersion =
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
path: `/openai/deployments/${deploymentId}${operation}?api-version=${apiVersion}`,
headers: {
["host"]: `${resourceName}.openai.azure.com`,
["content-type"]: "application/json",
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
};
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
const [resourceName, deploymentId, apiKey] = key.key.split(":");
if (!resourceName || !deploymentId || !apiKey) {
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
}
return { resourceName, deploymentId, apiKey };
}
@@ -0,0 +1,40 @@
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addGoogleAIKey: RequestPreprocessor = (req) => {
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!apisValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model;
req.key = keyPool.get(model, "google-ai");
req.log.info(
{ key: req.key.hash, model },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
req.isStreaming = req.isStreaming || req.body.stream;
delete req.body.stream;
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(req.body),
};
};
@@ -0,0 +1,37 @@
import { hasAvailableQuota } from "../../../../shared/users/user-store";
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
import { HPMRequestCallback } from "../index";
export class QuotaExceededError extends Error {
public quotaInfo: any;
constructor(message: string, quotaInfo: any) {
super(message);
this.name = "QuotaExceededError";
this.quotaInfo = quotaInfo;
}
}
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
const subjectToQuota =
isTextGenerationRequest(req) || isImageGenerationRequest(req);
if (!subjectToQuota || !req.user) return;
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
if (
!hasAvailableQuota({
userToken: req.user.token,
model: req.body.model,
api: req.outboundApi,
requested: requestedTokens,
})
) {
throw new QuotaExceededError(
"You have exceeded your proxy token quota for this model.",
{
quota: req.user.tokenLimits,
used: req.user.tokenCounts,
requested: requestedTokens,
}
);
}
};
@@ -0,0 +1,70 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import {
AnthropicChatMessage,
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-support";
/**
* Given a request with an already-transformed body, counts the number of
* tokens and assigns the count to the request.
*/
export const countPromptTokens: RequestPreprocessor = async (req) => {
const service = req.outboundApi;
let result;
switch (service) {
case "openai": {
req.outputTokens = req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-text": {
req.outputTokens = req.body.max_tokens;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
const prompt: AnthropicChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "anthropic-text": {
req.outputTokens = req.body.max_tokens_to_sample;
const prompt: string = req.body.prompt;
result = await countTokens({ req, prompt, service });
break;
}
case "google-ai": {
req.outputTokens = req.body.generationConfig.maxOutputTokens;
const prompt: GoogleAIChatMessage[] = req.body.contents;
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai": {
req.outputTokens = req.body.max_tokens;
const prompt: MistralAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-image": {
req.outputTokens = 1;
result = await countTokens({ req, service });
break;
}
default:
assertNever(service);
}
req.promptTokens = result.token_count;
req.log.debug({ result: result }, "Counted prompt tokens.");
req.tokenizerInfo = req.tokenizerInfo ?? {};
req.tokenizerInfo = { ...req.tokenizerInfo, ...result };
};
@@ -0,0 +1,83 @@
import { Request } from "express";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { BadRequestError } from "../../../../shared/errors";
import {
MistralAIChatMessage,
OpenAIChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-support";
const rejectedClients = new Map<string, number>();
setInterval(() => {
rejectedClients.forEach((count, ip) => {
if (count > 0) {
rejectedClients.set(ip, Math.floor(count / 2));
} else {
rejectedClients.delete(ip);
}
});
}, 30000);
/**
* Block requests containing blacklisted phrases. Repeated rejections from the
* same IP address will be throttled.
*/
export const languageFilter: RequestPreprocessor = async (req) => {
if (!config.rejectPhrases.length) return;
const prompt = getPromptFromRequest(req);
const match = config.rejectPhrases.find((phrase) =>
prompt.match(new RegExp(phrase, "i"))
);
if (match) {
const ip = req.ip;
const rejections = (rejectedClients.get(req.ip) || 0) + 1;
const delay = Math.min(60000, Math.pow(2, rejections - 1) * 1000);
rejectedClients.set(ip, rejections);
req.log.warn(
{ match, ip, rejections, delay },
"Prompt contains rejected phrase"
);
await new Promise((resolve) => {
req.res!.once("close", resolve);
setTimeout(resolve, delay);
});
throw new BadRequestError(config.rejectMessage);
}
};
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "anthropic-text":
return body.prompt;
case "openai":
case "mistral-ai":
return body.messages
.map((msg: OpenAIChatMessage | MistralAIChatMessage) => {
const text = Array.isArray(msg.content)
? msg.content
.map((c) => {
if ("text" in c) return c.text;
})
.join()
: msg.content;
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "openai-text":
case "openai-image":
return body.prompt;
case "google-ai":
return body.prompt.text;
default:
assertNever(service);
}
}
@@ -1,13 +1,14 @@
import { Request } from "express"; import { Request } from "express";
import { APIFormat, LLMService } from "../../../shared/key-management"; import { APIFormat } from "../../../../shared/key-management";
import { RequestPreprocessor } from "."; import { LLMService } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
export const setApiFormat = (api: { export const setApiFormat = (api: {
inApi: Request["inboundApi"]; inApi: Request["inboundApi"];
outApi: APIFormat; outApi: APIFormat;
service: LLMService, service: LLMService;
}): RequestPreprocessor => { }): RequestPreprocessor => {
return function configureRequestApiFormat (req) { return function configureRequestApiFormat(req) {
req.inboundApi = api.inApi; req.inboundApi = api.inApi;
req.outboundApi = api.outApi; req.outboundApi = api.outApi;
req.service = api.service; req.service = api.service;
@@ -2,37 +2,63 @@ import express from "express";
import { Sha256 } from "@aws-crypto/sha256-js"; import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4"; import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http"; import { HttpRequest } from "@smithy/protocol-http";
import { keyPool } from "../../../shared/key-management"; import {
import { RequestPreprocessor } from "."; AnthropicV1TextSchema,
import { AnthropicV1CompleteSchema } from "./transform-outbound-payload"; AnthropicV1MessagesSchema,
} from "../../../../shared/api-support";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
const AMZ_HOST = const AMZ_HOST =
process.env.AMZ_HOST || "invoke-bedrock.%REGION%.amazonaws.com"; process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/** /**
* Signs an outgoing AWS request with the appropriate headers modifies the * Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path. * request object in place to fix the path.
* This happens AFTER request transformation.
*/ */
export const signAwsRequest: RequestPreprocessor = async (req) => { export const signAwsRequest: RequestPreprocessor = async (req) => {
req.key = keyPool.get("anthropic.claude-v2");
const { model, stream } = req.body; const { model, stream } = req.body;
req.key = keyPool.get(model, "aws");
req.isStreaming = stream === true || stream === "true"; req.isStreaming = stream === true || stream === "true";
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
if (req.outboundApi === "anthropic-text") {
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:"; let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.body.prompt = preamble + req.body.prompt; req.body.prompt = preamble + req.body.prompt;
}
// AWS supports only a subset of Anthropic's parameters and is more strict // AWS uses mostly the same parameters as Anthropic, with a few removed params
// about unknown parameters. // and much stricter validation on unused parameters. Rather than treating it
// as a separate schema we will use the anthropic ones and strip the unused
// parameters.
// TODO: This should happen in transform-outbound-payload.ts // TODO: This should happen in transform-outbound-payload.ts
const strippedParams = AnthropicV1CompleteSchema.pick({ let strippedParams: Record<string, unknown>;
if (req.outboundApi === "anthropic-chat") {
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
} else {
strippedParams = AnthropicV1TextSchema.pick({
prompt: true, prompt: true,
max_tokens_to_sample: true, max_tokens_to_sample: true,
stop_sequences: true, stop_sequences: true,
temperature: true, temperature: true,
top_k: true, top_k: true,
top_p: true, top_p: true,
}).parse(req.body); })
.strip()
.parse(req.body);
}
const credential = getCredentialParts(req); const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region); const host = AMZ_HOST.replace("%REGION%", credential.region);
@@ -60,6 +86,12 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
newRequest.headers["accept"] = "*/*"; newRequest.headers["accept"] = "*/*";
} }
const { key, body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
req.signedRequest = await sign(newRequest, getCredentialParts(req)); req.signedRequest = await sign(newRequest, getCredentialParts(req));
}; };
@@ -68,6 +100,7 @@ type Credential = {
secretAccessKey: string; secretAccessKey: string;
region: string; region: string;
}; };
function getCredentialParts(req: express.Request): Credential { function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":"); const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
@@ -0,0 +1,57 @@
import {
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-support";
import { BadRequestError } from "../../../../shared/errors";
import {
isImageGenerationRequest,
isTextGenerationRequest,
} from "../../common";
import { RequestPreprocessor } from "../index";
import { fixMistralPrompt } from "../../../../shared/api-support/kits/mistral-ai/request-transformers";
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed || notTransformable) return;
// TODO: this should be an APIFormatTransformer
if (req.inboundApi === "mistral-ai") {
const messages = req.body.messages;
req.body.messages = fixMistralPrompt(messages);
req.log.info(
{ old: messages.length, new: req.body.messages.length },
"Fixed Mistral prompt"
);
}
if (sameService) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request");
req.body = await transFn(req);
return;
}
throw new BadRequestError(
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
@@ -1,12 +1,13 @@
import { Request } from "express"; import { Request } from "express";
import { z } from "zod"; import { z } from "zod";
import { config } from "../../../config"; import { config } from "../../../../config";
import { assertNever } from "../../../shared/utils"; import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "."; import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic; const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI; const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
const BISON_MAX_CONTEXT = 8100; const GOOGLE_AI_MAX_CONTEXT = 32000;
const MISTRAL_AI_MAX_CONTENT = 32768;
/** /**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body * Assigns `req.promptTokens` and `req.outputTokens` based on the request body
@@ -28,12 +29,18 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
case "openai-text": case "openai-text":
proxyMax = OPENAI_MAX_CONTEXT; proxyMax = OPENAI_MAX_CONTEXT;
break; break;
case "anthropic": case "anthropic-chat":
case "anthropic-text":
proxyMax = CLAUDE_MAX_CONTEXT; proxyMax = CLAUDE_MAX_CONTEXT;
break; break;
case "google-palm": case "google-ai":
proxyMax = BISON_MAX_CONTEXT; proxyMax = GOOGLE_AI_MAX_CONTEXT;
break; break;
case "mistral-ai":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
return;
default: default:
assertNever(req.outboundApi); assertNever(req.outboundApi);
} }
@@ -42,6 +49,12 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
let modelMax: number; let modelMax: number;
if (model.match(/gpt-3.5-turbo-16k/)) { if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384; modelMax = 16384;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-(0125|1106)(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-3.5-turbo/)) { } else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096; modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) { } else if (model.match(/gpt-4-32k/)) {
@@ -52,18 +65,26 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 100000; modelMax = 100000;
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?$/)) { } else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000; modelMax = 9000;
} else if (model.match(/^claude-2/)) { } else if (model.match(/^claude-2\.0/)) {
modelMax = 100000; modelMax = 100000;
} else if (model.match(/^text-bison-\d{3}$/)) { } else if (model.match(/^claude-2/)) {
modelMax = BISON_MAX_CONTEXT; modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^gemini-\d{3}$/)) {
modelMax = GOOGLE_AI_MAX_CONTEXT;
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
modelMax = MISTRAL_AI_MAX_CONTENT;
} else if (model.match(/^anthropic\.claude-3-sonnet/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) { } else if (model.match(/^anthropic\.claude/)) {
// Not sure if AWS Claude has the same context limit as Anthropic Claude. // Not sure if AWS Claude has the same context limit as Anthropic Claude.
modelMax = 100000; modelMax = 100000;
} else { } else {
// Don't really want to throw here because I don't want to have to update req.log.warn({ model }, "Unknown model, using 200k token limit.");
// this ASAP every time a new model is released. modelMax = 200000;
req.log.warn({ model }, "Unknown model, using 100k token limit.");
modelMax = 100000;
} }
const finalMax = Math.min(proxyMax, modelMax); const finalMax = Math.min(proxyMax, modelMax);
@@ -81,10 +102,10 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
"Prompt size validated" "Prompt size validated"
); );
req.debug.prompt_tokens = promptTokens; req.tokenizerInfo.prompt_tokens = promptTokens;
req.debug.completion_tokens = outputTokens; req.tokenizerInfo.completion_tokens = outputTokens;
req.debug.max_model_tokens = modelMax; req.tokenizerInfo.max_model_tokens = modelMax;
req.debug.max_proxy_tokens = proxyMax; req.tokenizerInfo.max_proxy_tokens = proxyMax;
}; };
function assertRequestHasTokenCounts( function assertRequestHasTokenCounts(
-35
View File
@@ -1,35 +0,0 @@
import { Request } from "express";
import { ClientRequest } from "http";
import httpProxy from "http-proxy";
import { ProxyRequestMiddleware } from "./index";
type ProxyReqCallback = httpProxy.ProxyReqCallback<ClientRequest, Request>;
type RewriterOptions = {
beforeRewrite?: ProxyReqCallback[];
pipeline: ProxyRequestMiddleware[];
};
export const createOnProxyReqHandler = ({
beforeRewrite = [],
pipeline,
}: RewriterOptions): ProxyReqCallback => {
return (proxyReq, req, res, options) => {
try {
for (const validator of beforeRewrite) {
validator(proxyReq, req, res, options);
}
} catch (error) {
req.log.error(error, "Error while executing proxy request validator");
proxyReq.destroy(error);
}
try {
for (const rewriter of pipeline) {
rewriter(proxyReq, req, res, options);
}
} catch (error) {
req.log.error(error, "Error while executing proxy request rewriter");
proxyReq.destroy(error);
}
};
};
@@ -1,332 +0,0 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { OpenAIPromptMessage } from "../../../shared/tokenization";
import { isCompletionRequest } from "../common";
import { RequestPreprocessor } from ".";
import { APIFormat } from "../../../shared/key-management";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
// https://console.anthropic.com/docs/api/reference#-v1-complete
export const AnthropicV1CompleteSchema = z.object({
model: z.string(),
prompt: z.string({
required_error:
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
}),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
stop_sequences: z.array(z.string()).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.any().optional(),
});
// https://platform.openai.com/docs/api-reference/chat/create
const OpenAIV1ChatCompletionSchema = z.object({
model: z.string(),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
name: z.string().optional(),
}),
{
required_error:
"No `messages` found. Ensure you've set the correct completion endpoint.",
invalid_type_error:
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
}
),
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
n: z
.literal(1, {
errorMap: () => ({
message: "You may only request a single completion at a time.",
}),
})
.optional(),
stream: z.boolean().optional().default(false),
stop: z.union([z.string(), z.array(z.string())]).optional(),
max_tokens: z.coerce
.number()
.int()
.nullish()
.default(16)
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
logit_bias: z.any().optional(),
user: z.string().optional(),
});
const OpenAIV1TextCompletionSchema = z
.object({
model: z
.string()
.regex(
/^gpt-3.5-turbo-instruct/,
"Model must start with 'gpt-3.5-turbo-instruct'"
),
prompt: z.string({
required_error:
"No `prompt` found. Ensure you've set the correct completion endpoint.",
}),
logprobs: z.number().int().nullish().default(null),
echo: z.boolean().optional().default(false),
best_of: z.literal(1).optional(),
stop: z.union([z.string(), z.array(z.string()).max(4)]).optional(),
suffix: z.string().optional(),
})
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true }));
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateText
const PalmV1GenerateTextSchema = z.object({
model: z.string(),
prompt: z.object({ text: z.string() }),
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
stopSequences: z.array(z.string()).max(5).optional(),
});
const VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
anthropic: AnthropicV1CompleteSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"google-palm": PalmV1GenerateTextSchema,
};
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable = !isCompletionRequest(req);
if (alreadyTransformed || notTransformable) {
return;
}
if (sameService) {
const result = VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.error(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.body = openaiToAnthropic(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "google-palm") {
req.body = openaiToPalm(req);
return;
}
if (req.inboundApi === "openai" && req.outboundApi === "openai-text") {
req.body = openaiToOpenaiText(req);
return;
}
throw new Error(
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
);
};
function openaiToAnthropic(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudePrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
// Model may be overridden in `calculate-context-size.ts` to avoid having
// a circular dependency (`calculate-context-size.ts` needs an already-
// transformed request body to count tokens, but this function would like
// to know the count to select a model).
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
};
}
function openaiToOpenaiText(req: Request) {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-text request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAiChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
const transformed = { ...rest, prompt: prompt, stop: stops };
return OpenAIV1TextCompletionSchema.parse(transformed);
}
function openaiToPalm(req: Request): z.infer<typeof PalmV1GenerateTextSchema> {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse({
...body,
model: "gpt-3.5-turbo",
});
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Palm request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAiChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
z.array(z.string()).max(5).parse(stops);
return {
prompt: { text: prompt },
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
model: "text-bison-001",
topP: rest.top_p,
temperature: rest.temperature,
safetySettings: [
{ category: "HARM_CATEGORY_UNSPECIFIED", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DEROGATORY", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_TOXICITY", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_VIOLENCE", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_SEXUAL", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_MEDICAL", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DANGEROUS", threshold: "BLOCK_NONE" },
],
};
}
export function openAIMessagesToClaudePrompt(messages: OpenAIPromptMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${m.name?.trim() ? `(as ${m.name}) ` : ""}${
m.content
}`;
})
.join("") + "\n\nAssistant:"
);
}
function flattenOpenAiChatMessages(messages: OpenAIPromptMessage[]) {
// Temporary to allow experimenting with prompt strategies
const PROMPT_VERSION: number = 1;
switch (PROMPT_VERSION) {
case 1:
return (
messages
.map((m) => {
// Claude-style human/assistant turns
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "User";
}
return `\n\n${role}: ${m.content}`;
})
.join("") + "\n\nAssistant:"
);
case 2:
return messages
.map((m) => {
// Claude without prefixes (except system) and no Assistant priming
let role: string = "";
if (role === "system") {
role = "System: ";
}
return `\n\n${role}${m.content}`;
})
.join("");
default:
throw new Error(`Unknown prompt version: ${PROMPT_VERSION}`);
}
}
@@ -0,0 +1,339 @@
import express from "express";
import { APIFormat } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
import { initializeSseStream } from "../../../shared/streaming";
function getMessageContent({
title,
message,
obj,
}: {
title: string;
message: string;
obj?: Record<string, any>;
}) {
/*
Constructs a Markdown-formatted message that renders semi-nicely in most chat
frontends. For example:
**Proxy error (HTTP 404 Not Found)**
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
***
*The requested Claude model might not exist, or the key might not be provisioned for it.*
```
{
"type": "error",
"error": {
"type": "not_found_error",
"message": "model: some-invalid-model-id",
},
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
}
```
*/
const note = obj?.proxy_note || obj?.error?.message || "";
const friendlyMessage = note ? `${message}\n\n***\n\n*${note}*` : message;
const details = JSON.parse(JSON.stringify(obj ?? {}));
let stack = "";
if (details.stack) {
stack = `\n\nInclude this trace when reporting an issue.\n\`\`\`\n${details.stack}\n\`\`\``;
delete details.stack;
}
return `\n\n**${title}**\n${friendlyMessage}${
obj ? `\n\`\`\`\n${JSON.stringify(obj, null, 2)}\n\`\`\`\n${stack}` : ""
}`;
}
type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: object;
reqId: string | number | object;
model?: string;
statusCode?: number;
};
export function tryInferFormat(body: any): APIFormat | "unknown" {
if (typeof body !== "object" || !body.model) {
return "unknown";
}
if (body.model.includes("gpt")) {
return "openai";
}
if (body.model.includes("mistral")) {
return "mistral-ai";
}
if (body.model.includes("claude")) {
return body.messages?.length ? "anthropic-chat" : "anthropic-text";
}
if (body.model.includes("gemini")) {
return "google-ai";
}
return "unknown";
}
export function sendErrorToClient({
options,
req,
res,
}: {
options: ErrorGeneratorOptions;
req: express.Request;
res: express.Response;
}) {
const { format: inputFormat } = options;
// This is an error thrown before we know the format of the request, so we
// can't send a response in the format the client expects.
const format =
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
if (format === "unknown") {
return res.status(options.statusCode || 400).json({
error: options.message,
details: options.obj,
});
}
const completion = buildSpoofedCompletion({ ...options, format });
const event = buildSpoofedSSE({ ...options, format });
const isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
if (isStreaming) {
if (!res.headersSent) {
initializeSseStream(res);
}
res.write(event);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
res.status(200).json(completion);
}
}
/**
* Returns a non-streaming completion object that looks like it came from the
* service that the request is being proxied to. Used to send error messages to
* the client and have them look like normal responses, for clients with poor
* error handling.
*/
export function buildSpoofedCompletion({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
switch (format) {
case "openai":
case "mistral-ai":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "openai-text":
return {
id: "error-" + id,
object: "text_completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
};
case "anthropic-text":
return {
id: "error-" + id,
type: "completion",
completion: content,
stop_reason: title,
stop: null,
model,
};
case "anthropic-chat":
return {
id: "error-" + id,
type: "message",
role: "assistant",
content: [{ type: "text", text: content }],
model,
stop_reason: title,
stop_sequence: null,
};
case "google-ai":
// TODO: Native Google AI non-streaming responses are not supported, this
// is an untested guess at what the response should look like.
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
};
case "openai-image":
return obj;
default:
assertNever(format);
}
}
/**
* Returns an SSE message that looks like a completion event for the service
* that the request is being proxied to. Used to send error messages to the
* client in the middle of a streaming request.
*/
export function buildSpoofedSSE({
format,
title,
message,
obj,
reqId,
model = "unknown",
}: ErrorGeneratorOptions & { format: Exclude<APIFormat, "unknown"> }) {
const id = String(reqId);
const content = getMessageContent({ title, message, obj });
let event;
switch (format) {
case "openai":
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: title },
],
model,
};
break;
case "anthropic-text":
event = {
completion: content,
stop_reason: title,
truncated: false,
stop: null,
model,
log_id: "proxy-req-" + id,
};
break;
case "anthropic-chat":
event = {
type: "content_block_delta",
index: 0,
delta: { type: "text_delta", text: content },
};
break;
case "google-ai":
return JSON.stringify({
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
finishReason: title,
index: 0,
tokenCount: null,
safetyRatings: [],
},
],
});
case "openai-image":
return JSON.stringify(obj);
default:
assertNever(format);
}
if (format === "anthropic-text") {
return (
["event: completion", `data: ${JSON.stringify(event)}`].join("\n") +
"\n\n"
);
}
// ugh.
if (format === "anthropic-chat") {
return (
[
[
"event: message_start",
`data: ${JSON.stringify({
type: "message_start",
message: {
id: "error-" + id,
type: "message",
role: "assistant",
content: [],
model,
},
})}`,
].join("\n"),
[
"event: content_block_start",
`data: ${JSON.stringify({
type: "content_block_start",
index: 0,
content_block: { type: "text", text: "" },
})}`,
].join("\n"),
["event: content_block_delta", `data: ${JSON.stringify(event)}`].join(
"\n"
),
[
"event: content_block_stop",
`data: ${JSON.stringify({ type: "content_block_stop", index: 0 })}`,
].join("\n"),
[
"event: message_delta",
`data: ${JSON.stringify({
type: "message_delta",
delta: { stop_reason: title, stop_sequence: null, usage: null },
})}`,
],
[
"event: message_stop",
`data: ${JSON.stringify({ type: "message_stop" })}`,
].join("\n"),
].join("\n\n") + "\n\n"
);
}
return `data: ${JSON.stringify(event)}\n\n`;
}
@@ -1,26 +1,38 @@
import { pipeline } from "stream"; import express from "express";
import { pipeline, Readable, Transform } from "stream";
import StreamArray from "stream-json/streamers/StreamArray";
import { StringDecoder } from "string_decoder";
import { promisify } from "util"; import { promisify } from "util";
import { APIFormat, keyPool } from "../../../shared/key-management";
import { import {
buildFakeSse,
copySseResponseHeaders, copySseResponseHeaders,
initializeSseStream initializeSseStream,
} from "../../../shared/streaming"; } from "../../../shared/streaming";
import { decodeResponseBody, RawResponseBodyHandler } from "."; import type { logger } from "../../../logger";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter"; import { enqueue } from "../../queue";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer"; import { decodeResponseBody, RawResponseBodyHandler, RetryableError } from ".";
import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator"; import { EventAggregator } from "./streaming/event-aggregator";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import { BadRequestError } from "../../../shared/errors";
const pipelineAsync = promisify(pipeline); const pipelineAsync = promisify(pipeline);
/** /**
* Consume the SSE stream and forward events to the client. Once the stream is * `handleStreamedResponse` consumes and transforms a streamed response from the
* stream is closed, resolve with the full response body so that subsequent * upstream service, forwarding events to the client in their requested format.
* middleware can work with it. * After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response.
* *
* Typically we would only need of the raw response handlers to execute, but * In the event of an error, the request's streaming flag is unset and the non-
* in the event a streamed request results in a non-200 response, we need to * streaming response handler is called instead.
* fall back to the non-streaming response handler so that the error handler *
* can inspect the error response. * If the error is retryable, that handler will re-enqueue the request and also
* reset the streaming flag. Unfortunately the streaming flag is set and unset
* in multiple places, so it's hard to keep track of.
*/ */
export const handleStreamedResponse: RawResponseBodyHandler = async ( export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes, proxyRes,
@@ -33,7 +45,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
} }
if (proxyRes.statusCode! > 201) { if (proxyRes.statusCode! > 201) {
req.isStreaming = false; // Forces non-streaming response handler to execute req.isStreaming = false;
req.log.warn( req.log.warn(
{ statusCode: proxyRes.statusCode, key: hash }, { statusCode: proxyRes.statusCode, key: hash },
`Streaming request returned error status code. Falling back to non-streaming response handler.` `Streaming request returned error status code. Falling back to non-streaming response handler.`
@@ -41,25 +53,34 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
return decodeResponseBody(proxyRes, req, res); return decodeResponseBody(proxyRes, req, res);
} }
req.log.debug( req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
{ headers: proxyRes.headers, key: hash },
`Starting to proxy SSE stream.`
);
// Users waiting in the queue already have a SSE connection open for the // Typically, streaming will have already been initialized by the request
// heartbeat, so we can't always send the stream headers. // queue to send heartbeat pings.
if (!res.headersSent) { if (!res.headersSent) {
copySseResponseHeaders(proxyRes, res); copySseResponseHeaders(proxyRes, res);
initializeSseStream(res); initializeSseStream(res);
} }
const prefersNativeEvents = req.inboundApi === req.outboundApi; const prefersNativeEvents = req.inboundApi === req.outboundApi;
const contentType = proxyRes.headers["content-type"]; const streamOptions = {
contentType: proxyRes.headers["content-type"],
api: req.outboundApi,
logger: req.log,
};
const adapter = new SSEStreamAdapter({ contentType }); // Decoder turns the raw response stream into a stream of events in some
// format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// Adapter transforms the decoded events into server-sent events.
const adapter = new SSEStreamAdapter(streamOptions);
// Aggregator compiles all events into a single response object.
const aggregator = new EventAggregator({ format: req.outboundApi }); const aggregator = new EventAggregator({ format: req.outboundApi });
// Transformer converts server-sent events from one vendor's API message
// format to another.
const transformer = new SSEMessageTransformer({ const transformer = new SSEMessageTransformer({
inputFormat: req.outboundApi, // outbound from the request's perspective inputFormat: req.outboundApi, // The format of the upstream service's events
outputFormat: req.inboundApi, // The format the client requested
inputApiVersion: String(req.headers["anthropic-version"]), inputApiVersion: String(req.headers["anthropic-version"]),
logger: req.log, logger: req.log,
requestId: String(req.id), requestId: String(req.id),
@@ -74,14 +95,87 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
}); });
try { try {
await pipelineAsync(proxyRes, adapter, transformer); await Promise.race([
req.log.debug({ key: hash }, `Finished proxying SSE stream.`); handleAbortedStream(req, res),
pipelineAsync(proxyRes, decoder, adapter, transformer),
]);
req.log.debug(`Finished proxying SSE stream.`);
res.end(); res.end();
return aggregator.getFinalResponse(); return aggregator.getFinalResponse();
} catch (err) { } catch (err) {
const errorEvent = buildFakeSse("stream-error", err.message, req); if (err instanceof RetryableError) {
res.write(`${errorEvent}data: [DONE]\n\n`); keyPool.markRateLimited(req.key!);
req.log.warn(
{ key: req.key!.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error during streaming response.`
);
req.retryCount++;
await enqueue(req);
} else if (err instanceof BadRequestError) {
sendErrorToClient({
req,
res,
options: {
format: req.inboundApi,
title: "Proxy streaming error (Bad Request)",
message: `The API returned an error while streaming your request. Your prompt might not be formatted correctly.\n\n*${err.message}*`,
reqId: req.id,
model: req.body?.model,
},
});
} else {
const { message, stack, lastEvent } = err;
const eventText = JSON.stringify(lastEvent, null, 2) ?? "undefined";
const errorEvent = buildSpoofedSSE({
format: req.inboundApi,
title: "Proxy stream error",
message: "An unexpected error occurred while streaming the response.",
obj: { message, stack, lastEvent: eventText },
reqId: req.id,
model: req.body?.model,
});
res.write(errorEvent);
res.write(`data: [DONE]\n\n`);
res.end(); res.end();
}
throw err; throw err;
} }
}; };
function handleAbortedStream(req: express.Request, res: express.Response) {
return new Promise<void>((resolve) =>
res.on("close", () => {
if (!res.writableEnded) {
req.log.info("Client prematurely closed connection during stream.");
}
resolve();
})
);
}
function getDecoder(options: {
input: Readable;
api: APIFormat;
logger: typeof logger;
contentType?: string;
}) {
const { api, contentType, input, logger } = options;
if (contentType?.includes("application/vnd.amazon.eventstream")) {
return getAwsEventStreamDecoder({ input, logger });
} else if (api === "google-ai") {
return StreamArray.withParser();
} else {
// Passthrough stream, but ensures split chunks across multi-byte characters
// are handled correctly.
const stringDecoder = new StringDecoder("utf8");
return new Transform({
readableObjectMode: true,
writableObjectMode: false,
transform(chunk, _encoding, callback) {
const text = stringDecoder.write(chunk);
if (text) this.push(text);
callback();
},
});
}
}
+264 -76
View File
@@ -3,7 +3,6 @@ import { Request, Response } from "express";
import * as http from "http"; import * as http from "http";
import util from "util"; import util from "util";
import zlib from "zlib"; import zlib from "zlib";
import { logger } from "../../../logger";
import { enqueue, trackWaitTime } from "../../queue"; import { enqueue, trackWaitTime } from "../../queue";
import { HttpError } from "../../../shared/errors"; import { HttpError } from "../../../shared/errors";
import { keyPool } from "../../../shared/key-management"; import { keyPool } from "../../../shared/key-management";
@@ -14,13 +13,17 @@ import {
incrementTokenCount, incrementTokenCount,
} from "../../../shared/users/user-store"; } from "../../../shared/users/user-store";
import { assertNever } from "../../../shared/utils"; import { assertNever } from "../../../shared/utils";
import { refundLastAttempt } from "../../rate-limit";
import { import {
getCompletionFromBody, getCompletionFromBody,
isCompletionRequest, isImageGenerationRequest,
writeErrorResponse, isTextGenerationRequest,
sendProxyError,
} from "../common"; } from "../common";
import { handleStreamedResponse } from "./handle-streamed-response"; import { handleStreamedResponse } from "./handle-streamed-response";
import { logPrompt } from "./log-prompt"; import { logPrompt } from "./log-prompt";
import { saveImage } from "./save-image";
import { config } from "../../../config";
const DECODER_MAP = { const DECODER_MAP = {
gzip: util.promisify(zlib.gunzip), gzip: util.promisify(zlib.gunzip),
@@ -34,7 +37,7 @@ const isSupportedContentEncoding = (
return contentEncoding in DECODER_MAP; return contentEncoding in DECODER_MAP;
}; };
class RetryableError extends Error { export class RetryableError extends Error {
constructor(message: string) { constructor(message: string) {
super(message); super(message);
this.name = "RetryableError"; this.name = "RetryableError";
@@ -103,10 +106,12 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
} else { } else {
middlewareStack.push( middlewareStack.push(
trackRateLimit, trackRateLimit,
addProxyInfo,
handleUpstreamErrors, handleUpstreamErrors,
countResponseTokens, countResponseTokens,
incrementUsage, incrementUsage,
copyHttpHeaders, copyHttpHeaders,
saveImage,
logPrompt, logPrompt,
...apiMiddleware ...apiMiddleware
); );
@@ -149,13 +154,13 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
}; };
}; };
function reenqueueRequest(req: Request) { async function reenqueueRequest(req: Request) {
req.log.info( req.log.info(
{ key: req.key?.hash, retryCount: req.retryCount }, { key: req.key?.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error` `Re-enqueueing request due to retryable error`
); );
req.retryCount++; req.retryCount++;
enqueue(req); await enqueue(req);
} }
/** /**
@@ -185,15 +190,17 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
if (contentEncoding) { if (contentEncoding) {
if (isSupportedContentEncoding(contentEncoding)) { if (isSupportedContentEncoding(contentEncoding)) {
const decoder = DECODER_MAP[contentEncoding]; const decoder = DECODER_MAP[contentEncoding];
// @ts-ignore - started failing after upgrading TypeScript, don't care
// as it was never a problem.
body = await decoder(body); body = await decoder(body);
} else { } else {
const errorMessage = `Proxy received response with unsupported content-encoding: ${contentEncoding}`; const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
logger.warn({ contentEncoding, key: req.key?.hash }, errorMessage); req.log.warn({ contentEncoding, key: req.key?.hash }, error);
writeErrorResponse(req, res, 500, { sendProxyError(req, res, 500, "Internal Server Error", {
error: errorMessage, error,
contentEncoding, contentEncoding,
}); });
return reject(errorMessage); return reject(error);
} }
} }
@@ -203,11 +210,11 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
return resolve(json); return resolve(json);
} }
return resolve(body.toString()); return resolve(body.toString());
} catch (error: any) { } catch (e) {
const errorMessage = `Proxy received response with invalid JSON: ${error.message}`; const msg = `Proxy received response with invalid JSON: ${e.message}`;
logger.warn({ error: error.stack, key: req.key?.hash }, errorMessage); req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
writeErrorResponse(req, res, 500, { error: errorMessage }); sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
return reject(errorMessage); return reject(msg);
} }
}); });
}); });
@@ -234,6 +241,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
body body
) => { ) => {
const statusCode = proxyRes.statusCode || 500; const statusCode = proxyRes.statusCode || 500;
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
if (statusCode < 400) { if (statusCode < 400) {
return; return;
@@ -250,16 +258,16 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
} catch (parseError) { } catch (parseError) {
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy // Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
const hash = req.key?.hash; const hash = req.key?.hash;
const statusMessage = proxyRes.statusMessage || "Unknown error"; req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
logger.warn({ statusCode, statusMessage, key: hash }, parseError.message);
const errorObject = { const errorObject = {
statusCode,
statusMessage: proxyRes.statusMessage,
error: parseError.message, error: parseError.message,
proxy_note: `This is likely a temporary error with the upstream service.`, status: statusCode,
statusMessage,
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
}; };
writeErrorResponse(req, res, statusCode, errorObject);
sendProxyError(req, res, statusCode, statusMessage, errorObject);
throw new HttpError(statusCode, parseError.message); throw new HttpError(statusCode, parseError.message);
} }
@@ -268,7 +276,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
errorPayload.error?.type || errorPayload.error?.type ||
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]); getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
logger.warn( req.log.warn(
{ statusCode, type: errorType, errorPayload, key: req.key?.hash }, { statusCode, type: errorType, errorPayload, key: req.key?.hash },
`Received error response from upstream. (${proxyRes.statusMessage})` `Received error response from upstream. (${proxyRes.statusMessage})`
); );
@@ -285,12 +293,24 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
// For Anthropic, this is usually due to missing preamble. // For Anthropic, this is usually due to missing preamble.
switch (service) { switch (service) {
case "openai": case "openai":
case "google-palm": case "google-ai":
errorPayload.proxy_note = `Upstream service rejected the request as invalid. Your prompt may be too long for ${req.body?.model}.`; case "mistral-ai":
case "azure":
const filteredCodes = ["content_policy_violation", "content_filter"];
if (filteredCodes.includes(errorPayload.error?.code)) {
errorPayload.proxy_note = `Request was filtered by the upstream API's content moderation system. Modify your prompt and try again.`;
refundLastAttempt(req);
} else if (errorPayload.error?.code === "billing_hard_limit_reached") {
// For some reason, some models return this 400 error instead of the
// same 429 billing error that other models return.
await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
} else {
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
}
break; break;
case "anthropic": case "anthropic":
case "aws": case "aws":
maybeHandleMissingPreambleError(req, errorPayload); await handleAnthropicBadRequestError(req, errorPayload);
break; break;
default: default:
assertNever(service); assertNever(service);
@@ -300,7 +320,11 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
keyPool.disable(req.key!, "revoked"); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`; errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
} else if (statusCode === 403) { } else if (statusCode === 403) {
// Amazon is the only service that returns 403. if (service === "anthropic") {
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
return;
}
switch (errorType) { switch (errorType) {
case "UnrecognizedClientException": case "UnrecognizedClientException":
// Key is invalid. // Key is invalid.
@@ -308,12 +332,16 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`; errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
break; break;
case "AccessDeniedException": case "AccessDeniedException":
const isModelAccessError =
errorPayload.error?.message?.includes(`specified model ID`);
if (!isModelAccessError) {
req.log.error( req.log.error(
{ key: req.key?.hash, model: req.body?.model }, { key: req.key?.hash, model: req.body?.model },
"Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions." "Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions."
); );
keyPool.disable(req.key!, "revoked"); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key doesn't have access to the requested resource.`; }
errorPayload.proxy_note = `API key doesn't have access to the requested resource. Model ID: ${req.body?.model}`;
break; break;
default: default:
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`; errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
@@ -321,16 +349,21 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
} else if (statusCode === 429) { } else if (statusCode === 429) {
switch (service) { switch (service) {
case "openai": case "openai":
handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload); await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
break; break;
case "anthropic": case "anthropic":
handleAnthropicRateLimitError(req, errorPayload); await handleAnthropicRateLimitError(req, errorPayload);
break; break;
case "aws": case "aws":
handleAwsRateLimitError(req, errorPayload); await handleAwsRateLimitError(req, errorPayload);
break;
case "azure":
case "mistral-ai":
await handleAzureRateLimitError(req, errorPayload);
break;
case "google-ai":
await handleGoogleAIRateLimitError(req, errorPayload);
break; break;
case "google-palm":
throw new Error("Rate limit handling not implemented for PaLM");
default: default:
assertNever(service); assertNever(service);
} }
@@ -351,12 +384,18 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "anthropic": case "anthropic":
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`; errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
break; break;
case "google-palm": case "google-ai":
errorPayload.proxy_note = `The requested Google PaLM model might not exist, or the key might not be provisioned for it.`; errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
break;
case "mistral-ai":
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
break; break;
case "aws": case "aws":
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`; errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
break; break;
case "azure":
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
break;
default: default:
assertNever(service); assertNever(service);
} }
@@ -372,63 +411,75 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
); );
} }
writeErrorResponse(req, res, statusCode, errorPayload); sendProxyError(req, res, statusCode, statusMessage, errorPayload);
// This is bubbled up to onProxyRes's handler for logging but will not trigger
// a write to the response as `sendProxyError` has just done that.
throw new HttpError(statusCode, errorPayload.error?.message); throw new HttpError(statusCode, errorPayload.error?.message);
}; };
/** async function handleAnthropicBadRequestError(
* This is a workaround for a very strange issue where certain API keys seem to
* enforce more strict input validation than others -- specifically, they will
* require a `\n\nHuman:` prefix on the prompt, perhaps to prevent the key from
* being used as a generic text completion service and to enforce the use of
* the chat RLHF. This is not documented anywhere, and it's not clear why some
* keys enforce this and others don't.
* This middleware checks for that specific error and marks the key as being
* one that requires the prefix, and then re-enqueues the request.
* The exact error is:
* ```
* {
* "error": {
* "type": "invalid_request_error",
* "message": "prompt must start with \"\n\nHuman:\" turn"
* }
* }
* ```
*/
function maybeHandleMissingPreambleError(
req: Request, req: Request,
errorPayload: ProxiedErrorPayload errorPayload: ProxiedErrorPayload
) { ) {
if ( const { error } = errorPayload;
errorPayload.error?.type === "invalid_request_error" && const isMissingPreamble = error?.message.startsWith(
errorPayload.error?.message === 'prompt must start with "\n\nHuman:" turn' `prompt must start with "\n\nHuman:" turn`
) { );
// Some keys mandate a \n\nHuman: preamble, which we can add and retry
if (isMissingPreamble) {
req.log.warn( req.log.warn(
{ key: req.key?.hash }, { key: req.key?.hash },
"Request failed due to missing preamble. Key will be marked as such for subsequent requests." "Request failed due to missing preamble. Key will be marked as such for subsequent requests."
); );
keyPool.update(req.key!, { requiresPreamble: true }); keyPool.update(req.key!, { requiresPreamble: true });
reenqueueRequest(req); await reenqueueRequest(req);
throw new RetryableError("Claude request re-enqueued to add preamble."); throw new RetryableError("Claude request re-enqueued to add preamble.");
} else {
errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`;
} }
// {"type":"error","error":{"type":"invalid_request_error","message":"Usage blocked until 2024-03-01T00:00:00+00:00 due to user specified spend limits."}}
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
const isOverQuota =
error?.message?.match(/usage blocked until/i) ||
error?.message?.match(/credit balance is too low/i);
if (isOverQuota) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has hit spending limit and will be disabled."
);
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
return;
}
const isDisabled = error?.message?.match(/organization has been disabled/i);
if (isDisabled) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has been disabled."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been disabled. ${error?.message}`;
return;
}
errorPayload.proxy_note = `Unrecognized error from the API. (${error?.message})`;
} }
function handleAnthropicRateLimitError( async function handleAnthropicRateLimitError(
req: Request, req: Request,
errorPayload: ProxiedErrorPayload errorPayload: ProxiedErrorPayload
) { ) {
if (errorPayload.error?.type === "rate_limit_error") { if (errorPayload.error?.type === "rate_limit_error") {
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
reenqueueRequest(req); await reenqueueRequest(req);
throw new RetryableError("Claude rate-limited request re-enqueued."); throw new RetryableError("Claude rate-limited request re-enqueued.");
} else { } else {
errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`; errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from the API.`;
} }
} }
function handleAwsRateLimitError( async function handleAwsRateLimitError(
req: Request, req: Request,
errorPayload: ProxiedErrorPayload errorPayload: ProxiedErrorPayload
) { ) {
@@ -436,7 +487,7 @@ function handleAwsRateLimitError(
switch (errorType) { switch (errorType) {
case "ThrottlingException": case "ThrottlingException":
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
reenqueueRequest(req); await reenqueueRequest(req);
throw new RetryableError("AWS rate-limited request re-enqueued."); throw new RetryableError("AWS rate-limited request re-enqueued.");
case "ModelNotReadyException": case "ModelNotReadyException":
errorPayload.proxy_note = `The requested model is overloaded. Try again in a few seconds.`; errorPayload.proxy_note = `The requested model is overloaded. Try again in a few seconds.`;
@@ -446,14 +497,15 @@ function handleAwsRateLimitError(
} }
} }
function handleOpenAIRateLimitError( async function handleOpenAIRateLimitError(
req: Request, req: Request,
tryAgainMessage: string, tryAgainMessage: string,
errorPayload: ProxiedErrorPayload errorPayload: ProxiedErrorPayload
): Record<string, any> { ): Promise<Record<string, any>> {
const type = errorPayload.error?.type; const type = errorPayload.error?.type;
switch (type) { switch (type) {
case "insufficient_quota": case "insufficient_quota":
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
// Billing quota exceeded (key is dead, disable it) // Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!, "quota"); keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
@@ -470,10 +522,66 @@ function handleOpenAIRateLimitError(
break; break;
case "requests": case "requests":
case "tokens": case "tokens":
// Per-minute request or token rate limit is exceeded, which we can retry
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
reenqueueRequest(req); if (errorPayload.error?.message?.match(/on requests per day/)) {
// This key has a very low rate limit, so we can't re-enqueue it.
errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
break;
}
// Per-minute request or token rate limit is exceeded, which we can retry
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued."); throw new RetryableError("Rate-limited request re-enqueued.");
// WIP/nonfunctional
// case "tokens_usage_based":
// // Weird new rate limit type that seems limited to preview models.
// // Distinct from `tokens` type. Can be per-minute or per-day.
//
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
// // 10k tokens per minute is problematic, because this is much less than
// // GPT4-Turbo's max context size for a single prompt and is effectively a
// // cap on the max context size for just that key+model, which the app is
// // not able to deal with.
//
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
// // been used today, the max context for that key becomes 50k tokens until
// // the next day and becomes progressively smaller as more tokens are used.
//
// // To work around these keys we will first retry the request a few times.
// // After that we will reject the request, and if it's a per-day limit we
// // will also disable the key.
//
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
//
// const regex =
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
// const [, period, limit, used, requested] =
// errorPayload.error?.message?.match(regex) || [];
//
// req.log.warn(
// { key: req.key?.hash, period, limit, used, requested },
// "Received `tokens_usage_based` rate limit error from OpenAI."
// );
//
// if (!period || !limit || !requested) {
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
// break;
// }
//
// if (req.retryCount < 2) {
// await reenqueueRequest(req);
// throw new RetryableError("Rate-limited request re-enqueued.");
// }
//
// if (period === "min") {
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
// } else {
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
// }
//
// keyPool.markRateLimited(req.key!);
// break;
default: default:
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`; errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
break; break;
@@ -481,14 +589,56 @@ function handleOpenAIRateLimitError(
return errorPayload; return errorPayload;
} }
async function handleAzureRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const code = errorPayload.error?.code;
switch (code) {
case "429":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
default:
errorPayload.proxy_note = `Unrecognized rate limit error from Azure (${code}). Please report this.`;
break;
}
}
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
async function handleGoogleAIRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const status = errorPayload.error?.status;
switch (status) {
case "RESOURCE_EXHAUSTED":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
default:
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
break;
}
}
const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => { const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
if (isCompletionRequest(req)) { if (isTextGenerationRequest(req) || isImageGenerationRequest(req)) {
const model = req.body.model; const model = req.body.model;
const tokensUsed = req.promptTokens! + req.outputTokens!; const tokensUsed = req.promptTokens! + req.outputTokens!;
req.log.debug(
{
model,
tokensUsed,
promptTokens: req.promptTokens,
outputTokens: req.outputTokens,
},
`Incrementing usage for model`
);
keyPool.incrementUsage(req.key!, model, tokensUsed); keyPool.incrementUsage(req.key!, model, tokensUsed);
if (req.user) { if (req.user) {
incrementPromptCount(req.user.token); incrementPromptCount(req.user.token);
incrementTokenCount(req.user.token, model, tokensUsed); incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
} }
} }
}; };
@@ -499,6 +649,12 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
_res, _res,
body body
) => { ) => {
if (req.outboundApi === "openai-image") {
req.outputTokens = req.promptTokens;
req.promptTokens = 0;
return;
}
// This function is prone to breaking if the upstream API makes even minor // This function is prone to breaking if the upstream API makes even minor
// changes to the response format, especially for SSE responses. If you're // changes to the response format, especially for SSE responses. If you're
// seeing errors in this function, check the reassembled response body from // seeing errors in this function, check the reassembled response body from
@@ -513,8 +669,8 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
{ service, tokens, prevOutputTokens: req.outputTokens }, { service, tokens, prevOutputTokens: req.outputTokens },
`Counted tokens for completion` `Counted tokens for completion`
); );
if (req.debug) { if (req.tokenizerInfo) {
req.debug.completion_tokens = tokens; req.tokenizerInfo.completion_tokens = tokens;
} }
req.outputTokens = tokens.token_count; req.outputTokens = tokens.token_count;
@@ -552,6 +708,38 @@ const copyHttpHeaders: ProxyResHandlerWithBody = async (
}); });
}; };
/**
* Injects metadata into the response, such as the tokenizer used, logging
* status, upstream API endpoint used, and whether the input prompt was modified
* or transformed.
* Only used for non-streaming requests.
*/
const addProxyInfo: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
const { service, inboundApi, outboundApi, tokenizerInfo } = req;
const native = inboundApi === outboundApi;
const info: any = {
logged: config.promptLogging,
tokens: tokenizerInfo,
service,
in_api: inboundApi,
out_api: outboundApi,
prompt_transformed: !native,
};
if (req.query?.debug?.length) {
info.final_request_body = req.signedRequest?.body || req.body;
}
if (typeof body === "object") {
body.proxy = info;
}
};
function getAwsErrorType(header: string | string[] | undefined) { function getAwsErrorType(header: string | string[] | undefined) {
const val = String(header).match(/^(\w+):?/)?.[1]; const val = String(header).match(/^(\w+):?/)?.[1];
return val || String(header); return val || String(header);
+73 -16
View File
@@ -4,10 +4,18 @@ import { logQueue } from "../../../shared/prompt-logging";
import { import {
getCompletionFromBody, getCompletionFromBody,
getModelFromBody, getModelFromBody,
isCompletionRequest, isImageGenerationRequest,
isTextGenerationRequest,
} from "../common"; } from "../common";
import { ProxyResHandlerWithBody } from "."; import { ProxyResHandlerWithBody } from ".";
import { assertNever } from "../../../shared/utils"; import { assertNever } from "../../../shared/utils";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../shared/api-support";
import { APIFormat } from "../../../shared/key-management";
/** If prompt logging is enabled, enqueues the prompt for logging. */ /** If prompt logging is enabled, enqueues the prompt for logging. */
export const logPrompt: ProxyResHandlerWithBody = async ( export const logPrompt: ProxyResHandlerWithBody = async (
@@ -23,12 +31,12 @@ export const logPrompt: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (!isCompletionRequest(req)) { const loggable =
return; isTextGenerationRequest(req) || isImageGenerationRequest(req);
} if (!loggable) return;
const promptPayload = getPromptForRequest(req); const promptPayload = getPromptForRequest(req, responseBody);
const promptFlattened = flattenMessages(promptPayload); const promptFlattened = flattenMessages(promptPayload, req.outboundApi);
const response = getCompletionFromBody(req, responseBody); const response = getCompletionFromBody(req, responseBody);
const model = getModelFromBody(req, responseBody); const model = getModelFromBody(req, responseBody);
@@ -41,32 +49,81 @@ export const logPrompt: ProxyResHandlerWithBody = async (
}); });
}; };
type OaiMessage = { type OaiImageResult = {
role: "user" | "assistant" | "system"; prompt: string;
content: string; size: string;
style: string;
quality: string;
revisedPrompt?: string;
}; };
const getPromptForRequest = (req: Request): string | OaiMessage[] => { const getPromptForRequest = (
req: Request,
responseBody: Record<string, any>
):
| string
| OpenAIChatMessage[]
| AnthropicChatMessage[]
| MistralAIChatMessage[]
| OaiImageResult => {
// Since the prompt logger only runs after the request has been proxied, we // Since the prompt logger only runs after the request has been proxied, we
// can assume the body has already been transformed to the target API's // can assume the body has already been transformed to the target API's
// format. // format.
switch (req.outboundApi) { switch (req.outboundApi) {
case "openai": case "openai":
case "mistral-ai":
case "anthropic-chat":
return req.body.messages; return req.body.messages;
case "openai-text": case "openai-text":
return req.body.prompt; return req.body.prompt;
case "anthropic": case "openai-image":
return {
prompt: req.body.prompt,
size: req.body.size,
style: req.body.style,
quality: req.body.quality,
revisedPrompt: responseBody.data[0].revised_prompt,
};
case "anthropic-text":
return req.body.prompt; return req.body.prompt;
case "google-palm": case "google-ai":
return req.body.prompt.text; return req.body.prompt.text;
default: default:
assertNever(req.outboundApi); assertNever(req.outboundApi);
} }
}; };
const flattenMessages = (messages: string | OaiMessage[]): string => { const flattenMessages = (
if (typeof messages === "string") { val:
return messages.trim(); | string
| OaiImageResult
| OpenAIChatMessage[]
| AnthropicChatMessage[]
| MistralAIChatMessage[],
format: APIFormat
): string => {
if (typeof val === "string") {
return val.trim();
} }
return messages.map((m) => `${m.role}: ${m.content}`).join("\n"); if (format === "anthropic-chat") {
return flattenAnthropicMessages(val as AnthropicChatMessage[]);
}
if (Array.isArray(val)) {
return val
.map(({ content, role }) => {
const text = Array.isArray(content)
? content
.map((c) => {
if ("text" in c) return c.text;
if ("image_url" in c) return "(( Attached Image ))";
if ("source" in c) return "(( Attached Image ))";
return "(( Unsupported Content ))";
})
.join("\n")
: content;
return `${role}: ${text}`;
})
.join("\n");
}
return val.prompt.trim();
}; };
@@ -0,0 +1,33 @@
import { ProxyResHandlerWithBody } from "./index";
import {
mirrorGeneratedImage,
OpenAIImageGenerationResult,
} from "../../../shared/file-storage/mirror-generated-image";
export const saveImage: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
body
) => {
if (req.outboundApi !== "openai-image") {
return;
}
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
if (body.data) {
const prompt = body.data[0].revised_prompt ?? req.body.prompt;
const res = await mirrorGeneratedImage(
req,
prompt,
body as OpenAIImageGenerationResult
);
req.log.info(
{ urls: res.data.map((item) => item.url) },
"Saved generated image to user_content"
);
}
};
@@ -0,0 +1,49 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicChatCompletionResponse = {
id: string;
type: "message";
role: "assistant";
content: { type: "text"; text: string }[];
model: string;
stop_reason: string | null;
stop_sequence: string | null;
usage: { input_tokens: number; output_tokens: number };
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Anthropic chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForAnthropicChat(
events: OpenAIChatCompletionStreamEvent[]
): AnthropicChatCompletionResponse {
let merged: AnthropicChatCompletionResponse = {
id: "",
type: "message",
role: "assistant",
content: [],
model: "",
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 },
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.id = event.id;
acc.model = event.model;
acc.content = [{ type: "text", text: "" }];
return acc;
}
acc.stop_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.content[0].text += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -1,6 +1,6 @@
import { OpenAIChatCompletionStreamEvent } from "../index"; import { OpenAIChatCompletionStreamEvent } from "../index";
export type AnthropicCompletionResponse = { export type AnthropicTextCompletionResponse = {
completion: string; completion: string;
stop_reason: string; stop_reason: string;
truncated: boolean; truncated: boolean;
@@ -15,10 +15,10 @@ export type AnthropicCompletionResponse = {
* finalized Anthropic completion response so that non-streaming middleware * finalized Anthropic completion response so that non-streaming middleware
* can operate on it as if it were a blocking response. * can operate on it as if it were a blocking response.
*/ */
export function mergeEventsForAnthropic( export function mergeEventsForAnthropicText(
events: OpenAIChatCompletionStreamEvent[] events: OpenAIChatCompletionStreamEvent[]
): AnthropicCompletionResponse { ): AnthropicTextCompletionResponse {
let merged: AnthropicCompletionResponse = { let merged: AnthropicTextCompletionResponse = {
log_id: "", log_id: "",
exception: null, exception: null,
model: "", model: "",
@@ -0,0 +1,93 @@
import pino from "pino";
import { Duplex, Readable } from "stream";
import { EventStreamMarshaller } from "@smithy/eventstream-serde-node";
import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
import { Message } from "@smithy/eventstream-codec";
/**
* Decodes a Readable stream, such as a proxied HTTP response, into a stream of
* Message objects using the AWS SDK's EventStreamMarshaller. Error events in
* the amazon eventstream protocol are decoded as Message objects and will not
* emit an error event on the decoder stream.
*/
export function getAwsEventStreamDecoder(params: {
input: Readable;
logger: pino.Logger;
}): Duplex {
const { input, logger } = params;
const config = { utf8Encoder: toUtf8, utf8Decoder: fromUtf8 };
const eventStream = new EventStreamMarshaller(config).deserialize(
input,
async (input: Record<string, Message>) => {
const eventType = Object.keys(input)[0];
let result;
if (eventType === "chunk") {
result = input[eventType];
} else {
// AWS unmarshaller treats non-chunk (errors and exceptions) oddly.
result = { [eventType]: input[eventType] } as any;
}
return result;
}
);
return new AWSEventStreamDecoder(eventStream, { logger });
}
class AWSEventStreamDecoder extends Duplex {
private readonly asyncIterable: AsyncIterable<Message>;
private iterator: AsyncIterator<Message>;
private reading: boolean;
private logger: pino.Logger;
constructor(
asyncIterable: AsyncIterable<Message>,
options: { logger: pino.Logger }
) {
super({ ...options, objectMode: true });
this.asyncIterable = asyncIterable;
this.iterator = this.asyncIterable[Symbol.asyncIterator]();
this.reading = false;
this.logger = options.logger.child({ module: "aws-eventstream-decoder" });
}
async _read(_size: number) {
if (this.reading) return;
this.reading = true;
try {
while (true) {
const { value, done } = await this.iterator.next();
if (done) {
this.push(null);
break;
}
if (!this.push(value)) break;
}
} catch (err) {
// AWS SDK's EventStreamMarshaller emits errors in the stream itself as
// whatever our deserializer returns, which will not be Error objects
// because we want to pass the Message to the next stream for processing.
// Any actual Error thrown here is some failure during deserialization.
const isAwsError = !(err instanceof Error);
if (isAwsError) {
this.logger.warn({ err: err.headers }, "Received AWS error event");
this.push(err);
this.push(null);
} else {
this.logger.error(err, "Error during AWS stream deserialization");
this.destroy(err);
}
} finally {
this.reading = false;
}
}
_write(_chunk: any, _encoding: string, callback: () => void) {
callback();
}
_final(callback: () => void) {
callback();
}
}
@@ -1,10 +1,13 @@
import { APIFormat } from "../../../../shared/key-management"; import { APIFormat } from "../../../../shared/key-management";
import { assertNever } from "../../../../shared/utils"; import { assertNever } from "../../../../shared/utils";
import { import {
mergeEventsForAnthropic, anthropicV2ToOpenAI,
mergeEventsForAnthropicChat,
mergeEventsForAnthropicText,
mergeEventsForOpenAIChat, mergeEventsForOpenAIChat,
mergeEventsForOpenAIText, mergeEventsForOpenAIText,
OpenAIChatCompletionStreamEvent AnthropicV2StreamEvent,
OpenAIChatCompletionStreamEvent,
} from "./index"; } from "./index";
/** /**
@@ -20,22 +23,54 @@ export class EventAggregator {
this.format = format; this.format = format;
} }
addEvent(event: OpenAIChatCompletionStreamEvent) { addEvent(event: OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent) {
if (eventIsOpenAIEvent(event)) {
this.events.push(event); this.events.push(event);
} else {
// horrible special case. previously all transformers' target format was
// openai, so the event aggregator could conveniently assume all incoming
// events were in openai format.
// now we have added anthropic-chat-to-text, so aggregator needs to know
// how to collapse events from two formats.
// because that is annoying, we will simply transform anthropic events to
// openai (even if the client didn't ask for openai) so we don't have to
// write aggregation logic for anthropic chat (which is also a troublesome
// stateful format).
const openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "event-aggregator-fallback",
fallbackModel: event.model || "claude-3-fallback",
});
if (openAIEvent.event) {
this.events.push(openAIEvent.event);
}
}
} }
getFinalResponse() { getFinalResponse() {
switch (this.format) { switch (this.format) {
case "openai": case "openai":
case "google-ai":
case "mistral-ai":
return mergeEventsForOpenAIChat(this.events); return mergeEventsForOpenAIChat(this.events);
case "openai-text": case "openai-text":
return mergeEventsForOpenAIText(this.events); return mergeEventsForOpenAIText(this.events);
case "anthropic": case "anthropic-text":
return mergeEventsForAnthropic(this.events); return mergeEventsForAnthropicText(this.events);
case "google-palm": case "anthropic-chat":
throw new Error("Google PaLM API does not support streaming responses"); return mergeEventsForAnthropicChat(this.events);
case "openai-image":
throw new Error(`SSE aggregation not supported for ${this.format}`);
default: default:
assertNever(this.format); assertNever(this.format);
} }
} }
} }
function eventIsOpenAIEvent(
event: any
): event is OpenAIChatCompletionStreamEvent {
return event?.object === "chat.completion.chunk";
}
@@ -1,9 +1,17 @@
export type SSEResponseTransformArgs = { export type SSEResponseTransformArgs<S = Record<string, any>> = {
data: string; data: string;
lastPosition: number; lastPosition: number;
index: number; index: number;
fallbackId: string; fallbackId: string;
fallbackModel: string; fallbackModel: string;
state?: S;
};
export type AnthropicV2StreamEvent = {
log_id?: string;
model?: string;
completion: string;
stop_reason: string | null;
}; };
export type OpenAIChatCompletionStreamEvent = { export type OpenAIChatCompletionStreamEvent = {
@@ -16,15 +24,25 @@ export type OpenAIChatCompletionStreamEvent = {
delta: { role?: string; content?: string }; delta: { role?: string; content?: string };
finish_reason: string | null; finish_reason: string | null;
}[]; }[];
} };
export type StreamingCompletionTransformer = ( export type StreamingCompletionTransformer<
params: SSEResponseTransformArgs T = OpenAIChatCompletionStreamEvent,
) => { position: number; event?: OpenAIChatCompletionStreamEvent }; S = any,
> = (params: SSEResponseTransformArgs<S>) => {
position: number;
event?: T;
state?: S;
};
export { openAITextToOpenAIChat } from "./transformers/openai-text-to-openai"; export { openAITextToOpenAIChat } from "./transformers/openai-text-to-openai";
export { anthropicV1ToOpenAI } from "./transformers/anthropic-v1-to-openai"; export { anthropicV1ToOpenAI } from "./transformers/anthropic-v1-to-openai";
export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai"; export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat"; export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
export { mergeEventsForOpenAIText } from "./aggregators/openai-text"; export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
export { mergeEventsForAnthropic } from "./aggregators/anthropic"; export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
@@ -3,27 +3,27 @@ export type ServerSentEvent = { id?: string; type?: string; data: string };
/** Given a string of SSE data, parse it into a `ServerSentEvent` object. */ /** Given a string of SSE data, parse it into a `ServerSentEvent` object. */
export function parseEvent(event: string) { export function parseEvent(event: string) {
const buffer: ServerSentEvent = { data: "" }; const buffer: ServerSentEvent = { data: "" };
return event.split(/\r?\n/).reduce(parseLine, buffer) return event.split(/\r?\n/).reduce(parseLine, buffer);
} }
function parseLine(event: ServerSentEvent, line: string) { function parseLine(event: ServerSentEvent, line: string) {
const separator = line.indexOf(":"); const separator = line.indexOf(":");
const field = separator === -1 ? line : line.slice(0,separator); const field = separator === -1 ? line : line.slice(0, separator);
const value = separator === -1 ? "" : line.slice(separator + 1); const value = separator === -1 ? "" : line.slice(separator + 1);
switch (field) { switch (field) {
case 'id': case "id":
event.id = value.trim() event.id = value.trim();
break break;
case 'event': case "event":
event.type = value.trim() event.type = value.trim();
break break;
case 'data': case "data":
event.data += value.trimStart() event.data += value.trimStart();
break break;
default: default:
break break;
} }
return event return event;
} }
@@ -3,22 +3,25 @@ import { logger } from "../../../../logger";
import { APIFormat } from "../../../../shared/key-management"; import { APIFormat } from "../../../../shared/key-management";
import { assertNever } from "../../../../shared/utils"; import { assertNever } from "../../../../shared/utils";
import { import {
anthropicChatToOpenAI,
anthropicChatToAnthropicV2,
anthropicV1ToOpenAI, anthropicV1ToOpenAI,
AnthropicV2StreamEvent,
anthropicV2ToOpenAI, anthropicV2ToOpenAI,
googleAIToOpenAI,
OpenAIChatCompletionStreamEvent, OpenAIChatCompletionStreamEvent,
openAITextToOpenAIChat, openAITextToOpenAIChat,
passthroughToOpenAI,
StreamingCompletionTransformer, StreamingCompletionTransformer,
} from "./index"; } from "./index";
import { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
const genlog = logger.child({ module: "sse-transformer" });
type SSEMessageTransformerOptions = TransformOptions & { type SSEMessageTransformerOptions = TransformOptions & {
requestedModel: string; requestedModel: string;
requestId: string; requestId: string;
inputFormat: APIFormat; inputFormat: APIFormat;
inputApiVersion?: string; inputApiVersion?: string;
logger?: typeof logger; outputFormat?: APIFormat;
logger: typeof logger;
}; };
/** /**
@@ -27,21 +30,28 @@ type SSEMessageTransformerOptions = TransformOptions & {
*/ */
export class SSEMessageTransformer extends Transform { export class SSEMessageTransformer extends Transform {
private lastPosition: number; private lastPosition: number;
private transformState: any;
private msgCount: number; private msgCount: number;
private readonly transformFn: StreamingCompletionTransformer; private readonly inputFormat: APIFormat;
private readonly transformFn: StreamingCompletionTransformer<
// TODO: Refactor transformers to not assume only OpenAI events as output
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
>;
private readonly log; private readonly log;
private readonly fallbackId: string; private readonly fallbackId: string;
private readonly fallbackModel: string; private readonly fallbackModel: string;
constructor(options: SSEMessageTransformerOptions) { constructor(options: SSEMessageTransformerOptions) {
super({ ...options, readableObjectMode: true }); super({ ...options, readableObjectMode: true });
this.log = options.logger?.child({ module: "sse-transformer" }) ?? genlog; this.log = options.logger?.child({ module: "sse-transformer" });
this.lastPosition = 0; this.lastPosition = 0;
this.msgCount = 0; this.msgCount = 0;
this.transformFn = getTransformer( this.transformFn = getTransformer(
options.inputFormat, options.inputFormat,
options.inputApiVersion options.inputApiVersion,
options.outputFormat
); );
this.inputFormat = options.inputFormat;
this.fallbackId = options.requestId; this.fallbackId = options.requestId;
this.fallbackModel = options.requestedModel; this.fallbackModel = options.requestedModel;
this.log.debug( this.log.debug(
@@ -57,48 +67,85 @@ export class SSEMessageTransformer extends Transform {
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: Function) { _transform(chunk: Buffer, _encoding: BufferEncoding, callback: Function) {
try { try {
const originalMessage = chunk.toString(); const originalMessage = chunk.toString();
const { event: transformedMessage, position: newPosition } = const {
this.transformFn({ event: transformedMessage,
position: newPosition,
state,
} = this.transformFn({
data: originalMessage, data: originalMessage,
lastPosition: this.lastPosition, lastPosition: this.lastPosition,
index: this.msgCount++, index: this.msgCount++,
fallbackId: this.fallbackId, fallbackId: this.fallbackId,
fallbackModel: this.fallbackModel, fallbackModel: this.fallbackModel,
state: this.transformState,
}); });
this.lastPosition = newPosition; this.lastPosition = newPosition;
this.transformState = state;
// Special case for Azure OpenAI, which is 99% the same as OpenAI but
// sometimes emits an extra event at the beginning of the stream with the
// content moderation system's response to the prompt. A lot of frontends
// don't expect this and neither does our event aggregator so we drop it.
if (this.inputFormat === "openai" && this.msgCount <= 1) {
if (originalMessage.includes("prompt_filter_results")) {
this.log.debug("Dropping Azure OpenAI content moderation SSE event");
return callback();
}
}
this.emit("originalMessage", originalMessage); this.emit("originalMessage", originalMessage);
// Some events may not be transformed, e.g. ping events // Some events may not be transformed, e.g. ping events
if (!transformedMessage) return callback(); if (!transformedMessage) return callback();
if (this.msgCount === 1) { if (this.msgCount === 1 && eventIsOpenAIEvent(transformedMessage)) {
// TODO: does this need to be skipped for passthroughToOpenAI?
this.push(createInitialMessage(transformedMessage)); this.push(createInitialMessage(transformedMessage));
} }
this.push(transformedMessage); this.push(transformedMessage);
callback(); callback();
} catch (err) { } catch (err) {
err.lastEvent = chunk?.toString();
this.log.error(err, "Error transforming SSE message"); this.log.error(err, "Error transforming SSE message");
callback(err); callback(err);
} }
} }
} }
function eventIsOpenAIEvent(
event: any
): event is OpenAIChatCompletionStreamEvent {
return event?.object === "chat.completion.chunk";
}
function getTransformer( function getTransformer(
responseApi: APIFormat, responseApi: APIFormat,
version?: string version?: string,
): StreamingCompletionTransformer { // There's only one case where we're not transforming back to OpenAI, which is
// Anthropic Chat response -> Anthropic Text request. This parameter is only
// used for that case.
requestApi: APIFormat = "openai"
): StreamingCompletionTransformer<
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
> {
switch (responseApi) { switch (responseApi) {
case "openai": case "openai":
case "mistral-ai":
return passthroughToOpenAI; return passthroughToOpenAI;
case "openai-text": case "openai-text":
return openAITextToOpenAIChat; return openAITextToOpenAIChat;
case "anthropic": case "anthropic-text":
return version === "2023-01-01" return version === "2023-01-01"
? anthropicV1ToOpenAI ? anthropicV1ToOpenAI
: anthropicV2ToOpenAI; : anthropicV2ToOpenAI;
case "google-palm": case "anthropic-chat":
throw new Error("Google PaLM does not support streaming responses"); return requestApi === "anthropic-text"
? anthropicChatToAnthropicV2
: anthropicChatToOpenAI;
case "google-ai":
return googleAIToOpenAI;
case "openai-image":
throw new Error(`SSE transformation not supported for ${responseApi}`);
default: default:
assertNever(responseApi); assertNever(responseApi);
} }
@@ -1,97 +1,174 @@
import pino from "pino";
import { Transform, TransformOptions } from "stream"; import { Transform, TransformOptions } from "stream";
// @ts-ignore import { Message } from "@smithy/eventstream-codec";
import { Parser } from "lifion-aws-event-stream"; import { APIFormat } from "../../../../shared/key-management";
import { logger } from "../../../../logger"; import { RetryableError } from "../index";
import { buildSpoofedSSE } from "../error-generator";
import { BadRequestError } from "../../../../shared/errors";
const log = logger.child({ module: "sse-stream-adapter" }); type SSEStreamAdapterOptions = TransformOptions & {
contentType?: string;
type SSEStreamAdapterOptions = TransformOptions & { contentType?: string }; api: APIFormat;
type AwsEventStreamMessage = { logger: pino.Logger;
headers: { ":message-type": "event" | "exception" };
payload: { message?: string /** base64 encoded */; bytes?: string };
}; };
/** /**
* Receives either text chunks or AWS binary event stream chunks and emits * Receives a stream of events in a variety of formats and transforms them into
* full SSE events. * Server-Sent Events.
*
* This is an object-mode stream, so it expects to receive objects and will emit
* strings.
*/ */
export class SSEStreamAdapter extends Transform { export class SSEStreamAdapter extends Transform {
private readonly isAwsStream; private readonly isAwsStream;
private parser = new Parser(); private readonly isGoogleStream;
private api: APIFormat;
private partialMessage = ""; private partialMessage = "";
private textDecoder = new TextDecoder("utf8");
private log: pino.Logger;
constructor(options?: SSEStreamAdapterOptions) { constructor(options: SSEStreamAdapterOptions) {
super(options); super({ ...options, objectMode: true });
this.isAwsStream = this.isAwsStream =
options?.contentType === "application/vnd.amazon.eventstream"; options?.contentType === "application/vnd.amazon.eventstream";
this.isGoogleStream = options?.api === "google-ai";
this.parser.on("data", (data: AwsEventStreamMessage) => { this.api = options.api;
const message = this.processAwsEvent(data); this.log = options.logger.child({ module: "sse-stream-adapter" });
if (message) {
this.push(Buffer.from(message + "\n\n"), "utf8");
}
});
} }
protected processAwsEvent(event: AwsEventStreamMessage): string | null { protected processAwsMessage(message: Message): string | null {
const { payload, headers } = event; // Per amazon, headers and body are always present. headers is an object,
if (headers[":message-type"] === "exception" || !payload.bytes) { // body is a Uint8Array, potentially zero-length.
log.error( const { headers, body } = message;
{ event: JSON.stringify(event) }, const eventType = headers[":event-type"]?.value;
"Received bad streaming event from AWS" const messageType = headers[":message-type"]?.value;
); const contentType = headers[":content-type"]?.value;
const message = JSON.stringify(event); const exceptionType = headers[":exception-type"]?.value;
return getFakeErrorCompletion("proxy AWS error", message); const errorCode = headers[":error-code"]?.value;
const bodyStr = this.textDecoder.decode(body);
switch (messageType) {
case "event":
if (contentType === "application/json" && eventType === "chunk") {
const { bytes } = JSON.parse(bodyStr);
const event = Buffer.from(bytes, "base64").toString("utf8");
const eventObj = JSON.parse(event);
if ("completion" in eventObj) {
return ["event: completion", `data: ${event}`].join(`\n`);
} else { } else {
const { bytes } = payload; return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
// technically this is a transformation but we don't really distinguish }
// between aws claude and anthropic claude at the APIFormat level, so }
// these will short circuit the message transformer // noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
return [ case "exception":
"event: completion", case "error":
`data: ${Buffer.from(bytes, "base64").toString("utf8")}`, const type = String(
].join("\n"); exceptionType || errorCode || "UnknownError"
).toLowerCase();
switch (type) {
case "throttlingexception":
this.log.warn(
"AWS request throttled after streaming has already started; retrying"
);
throw new RetryableError("AWS request throttled mid-stream");
case "validationexception":
try {
const { message } = JSON.parse(bodyStr);
this.log.error({ message }, "Received AWS validation error");
this.emit(
"error",
new BadRequestError(`AWS validation error: ${message}`)
);
return null;
} catch (error) {
this.log.error(
{ body: bodyStr, error },
"Could not parse AWS validation error"
);
}
// noinspection FallThroughInSwitchStatementJS -- who knows what this is
default:
let text;
try {
text = JSON.parse(bodyStr).message;
} catch (error) {
text = bodyStr;
}
const error: any = new Error(
`Got mysterious error chunk: [${type}] ${text}`
);
error.lastEvent = text;
this.emit("error", error);
return null;
}
default:
// Amazon says this can't ever happen...
this.log.error({ message }, "Received very bad AWS stream event");
return null;
} }
} }
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: Function) { /** Processes an incoming array element from the Google AI JSON stream. */
protected processGoogleObject(data: any): string | null {
// Sometimes data has fields key and value, sometimes it's just the
// candidates array.
const candidates = data.value?.candidates ?? data.candidates ?? [{}];
try {
const hasParts = candidates[0].content?.parts?.length > 0;
if (hasParts) {
return `data: ${JSON.stringify(data)}`;
} else {
this.log.error({ event: data }, "Received bad Google AI event");
return `data: ${buildSpoofedSSE({
format: "google-ai",
title: "Proxy stream error",
message:
"The proxy received malformed or unexpected data from Google AI while streaming.",
obj: data,
reqId: "proxy-sse-adapter-message",
model: "",
})}`;
}
} catch (error) {
error.lastEvent = data;
this.emit("error", error);
}
return null;
}
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
try { try {
if (this.isAwsStream) { if (this.isAwsStream) {
this.parser.write(chunk); // `data` is a Message object
const message = this.processAwsMessage(data);
if (message) this.push(message + "\n\n");
} else if (this.isGoogleStream) {
// `data` is an element from the Google AI JSON stream
const message = this.processGoogleObject(data);
if (message) this.push(message + "\n\n");
} else { } else {
// We may receive multiple (or partial) SSE messages in a single chunk, // `data` is a string, but possibly only a partial message
// so we need to buffer and emit separate stream events for full const fullMessages = (this.partialMessage + data).split(
// messages so we can parse/transform them properly. /\r\r|\n\n|\r\n\r\n/
const str = chunk.toString("utf8"); );
const fullMessages = (this.partialMessage + str).split(/\r?\n\r?\n/);
this.partialMessage = fullMessages.pop() || ""; this.partialMessage = fullMessages.pop() || "";
for (const message of fullMessages) { for (const message of fullMessages) {
// Mixing line endings will break some clients and our request queue // Mixing line endings will break some clients and our request queue
// will have already sent \n for heartbeats, so we need to normalize // will have already sent \n for heartbeats, so we need to normalize
// to \n. // to \n.
this.push(message.replace(/\r\n/g, "\n") + "\n\n"); this.push(message.replace(/\r\n?/g, "\n") + "\n\n");
} }
} }
callback(); callback();
} catch (error) { } catch (error) {
this.emit("error", error); error.lastEvent = data?.toString() ?? "[SSEStreamAdapter] no data";
callback(error); callback(error);
} }
} }
}
function getFakeErrorCompletion(type: string, message: string) { _flush(callback: (err?: Error | null) => void) {
const content = `\`\`\`\n[${type}: ${message}]\n\`\`\`\n`; callback();
const fakeEvent = JSON.stringify({ }
log_id: "aws-proxy-sse-message",
stop_reason: type,
completion:
"\nProxy encountered an error during streaming response.\n" + content,
truncated: false,
stop: null,
model: "",
});
return ["event: completion", `data: ${fakeEvent}\n\n`].join("\n");
} }
@@ -0,0 +1,129 @@
import {
AnthropicV2StreamEvent,
StreamingCompletionTransformer,
} from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
import { logger } from "../../../../../logger";
const log = logger.child({
module: "sse-transformer",
transformer: "anthropic-chat-to-anthropic-v2",
});
export type AnthropicChatEventType =
| "message_start"
| "content_block_start"
| "content_block_delta"
| "content_block_stop"
| "message_delta"
| "message_stop";
type AnthropicChatStartEvent = {
type: "message_start";
message: {
id: string;
type: "message";
role: "assistant";
content: [];
model: string;
stop_reason: null;
stop_sequence: null;
usage: { input_tokens: number; output_tokens: number };
};
};
type AnthropicChatContentBlockStartEvent = {
type: "content_block_start";
index: number;
content_block: { type: "text"; text: string };
};
export type AnthropicChatContentBlockDeltaEvent = {
type: "content_block_delta";
index: number;
delta: { type: "text_delta"; text: string };
};
type AnthropicChatContentBlockStopEvent = {
type: "content_block_stop";
index: number;
};
type AnthropicChatMessageDeltaEvent = {
type: "message_delta";
delta: {
stop_reason: string;
stop_sequence: null;
usage: { output_tokens: number };
};
};
type AnthropicChatMessageStopEvent = {
type: "message_stop";
};
type AnthropicChatTransformerState = { content: string };
/**
* Transforms an incoming Anthropic Chat SSE to an equivalent Anthropic V2
* Text SSE.
* For now we assume there is only one content block and message delta. In the
* future Anthropic may add multi-turn responses or multiple content blocks
* (probably for multimodal responses, image generation, etc) but as far as I
* can tell this is not yet implemented.
*/
export const anthropicChatToAnthropicV2: StreamingCompletionTransformer<
AnthropicV2StreamEvent,
AnthropicChatTransformerState
> = (params) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data || !rawEvent.type) {
return { position: -1 };
}
const deltaEvent = asAnthropicChatDelta(rawEvent);
if (!deltaEvent) {
return { position: -1 };
}
const newEvent = {
log_id: params.fallbackId,
model: params.fallbackModel,
completion: deltaEvent.delta.text,
stop_reason: null,
};
return { position: -1, event: newEvent };
};
export function asAnthropicChatDelta(
event: ServerSentEvent
): AnthropicChatContentBlockDeltaEvent | null {
if (
!event.type ||
!["content_block_start", "content_block_delta"].includes(event.type)
) {
return null;
}
try {
const parsed = JSON.parse(event.data);
if (parsed.type === "content_block_delta") {
return parsed;
} else if (parsed.type === "content_block_start") {
return {
type: "content_block_delta",
index: parsed.index,
delta: { type: "text_delta", text: parsed.content_block?.text ?? "" },
};
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Invalid event type");
}
} catch (error) {
log.warn({ error: error.stack, event }, "Received invalid event");
}
return null;
}
@@ -0,0 +1,45 @@
import { StreamingCompletionTransformer } from "../index";
import { parseEvent } from "../parse-sse";
import { logger } from "../../../../../logger";
import { asAnthropicChatDelta } from "./anthropic-chat-to-anthropic-v2";
const log = logger.child({
module: "sse-transformer",
transformer: "anthropic-chat-to-openai",
});
/**
* Transforms an incoming Anthropic Chat SSE to an equivalent OpenAI
* chat.completion.chunks SSE.
*/
export const anthropicChatToOpenAI: StreamingCompletionTransformer = (
params
) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data || !rawEvent.type) {
return { position: -1 };
}
const deltaEvent = asAnthropicChatDelta(rawEvent);
if (!deltaEvent) {
return { position: -1 };
}
const newEvent = {
id: params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: params.index,
delta: { content: deltaEvent.delta.text },
finish_reason: null,
},
],
};
return { position: -1, event: newEvent };
};
@@ -1,4 +1,7 @@
import { StreamingCompletionTransformer } from "../index"; import {
AnthropicV2StreamEvent,
StreamingCompletionTransformer,
} from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse"; import { parseEvent, ServerSentEvent } from "../parse-sse";
import { logger } from "../../../../../logger"; import { logger } from "../../../../../logger";
@@ -7,13 +10,6 @@ const log = logger.child({
transformer: "anthropic-v2-to-openai", transformer: "anthropic-v2-to-openai",
}); });
type AnthropicV2StreamEvent = {
log_id?: string;
model?: string;
completion: string;
stop_reason: string;
};
/** /**
* Transforms an incoming Anthropic SSE (2023-06-01 API) to an equivalent * Transforms an incoming Anthropic SSE (2023-06-01 API) to an equivalent
* OpenAI chat.completion.chunk SSE. * OpenAI chat.completion.chunk SSE.
@@ -0,0 +1,76 @@
import { StreamingCompletionTransformer } from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
import { logger } from "../../../../../logger";
const log = logger.child({
module: "sse-transformer",
transformer: "google-ai-to-openai",
});
type GoogleAIStreamEvent = {
candidates: {
content: { parts: { text: string }[]; role: string };
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
index: number;
tokenCount?: number;
safetyRatings: { category: string; probability: string }[];
}[];
};
/**
* Transforms an incoming Google AI SSE to an equivalent OpenAI
* chat.completion.chunk SSE.
*/
export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
const { data, index } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data || rawEvent.data === "[DONE]") {
return { position: -1 };
}
const completionEvent = asCompletion(rawEvent);
if (!completionEvent) {
return { position: -1 };
}
const parts = completionEvent.candidates[0].content.parts;
let content = parts[0]?.text ?? "";
// If this is the first chunk, try stripping speaker names from the response
// e.g. "John: Hello" -> "Hello"
if (index === 0) {
content = content.replace(/^(.*?): /, "").trim();
}
const newEvent = {
id: "goo-" + params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: 0,
delta: { content },
finish_reason: completionEvent.candidates[0].finishReason ?? null,
},
],
};
return { position: -1, event: newEvent };
};
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
try {
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
if (parsed.candidates?.length > 0) {
return parsed;
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Missing required fields");
}
} catch (error) {
log.warn({ error: error.stack, event }, "Received invalid event");
}
return null;
}
+125
View File
@@ -0,0 +1,125 @@
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
import {
getMistralAIModelFamily,
MistralAIModelFamily,
ModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeBody,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
// https://docs.mistral.ai/platform/endpoints
export const KNOWN_MISTRAL_AI_MODELS = [
// Mistral 7b (open weight, legacy)
"open-mistral-7b",
"mistral-tiny-2312",
// Mixtral 8x7b (open weight, legacy)
"open-mixtral-8x7b",
"mistral-small-2312",
// Mixtral Small (newer 8x7b, closed weight)
"mistral-small-latest",
"mistral-small-2402",
// Mistral Medium
"mistral-medium-latest",
"mistral-medium-2312",
// Mistral Large
"mistral-large-latest",
"mistral-large-2402",
// Deprecated identifiers (2024-05-01)
"mistral-tiny",
"mistral-small",
"mistral-medium",
];
let modelsCache: any = null;
let modelsCacheTime = 0;
export function generateModelList(models = KNOWN_MISTRAL_AI_MODELS) {
let available = new Set<MistralAIModelFamily>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "mistral-ai") continue;
key.modelFamilies.forEach((family) =>
available.add(family as MistralAIModelFamily)
);
}
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
return models
.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "mistral-ai",
}))
.filter((model) => available.has(getMistralAIModelFamily(model.id)));
}
const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60){
return res.status(200).json(modelsCache);
}
const result = generateModelList();
modelsCache = { object: "list", data: result };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
};
const mistralAIResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const mistralAIProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.mistral.ai",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({
pipeline: [addKey, finalizeBody],
}),
proxyRes: createOnProxyResHandler([mistralAIResponseHandler]),
error: handleProxyError,
},
}),
});
const mistralAIRouter = Router();
mistralAIRouter.get("/v1/models", handleModelRequest);
// General chat completion endpoint.
mistralAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "mistral-ai",
outApi: "mistral-ai",
service: "mistral-ai",
}),
mistralAIProxy
);
export const mistralAI = mistralAIRouter;
+136
View File
@@ -0,0 +1,136 @@
import { RequestHandler, Router, Request } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createPreprocessorMiddleware,
finalizeBody,
createOnProxyReqHandler,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { generateModelList } from "./openai";
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
let modelListCache: any = null;
let modelListValid = 0;
const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelListValid < 1000 * 60) {
return res.status(200).json(modelListCache);
}
const result = generateModelList(KNOWN_MODELS);
modelListCache = { object: "list", data: result };
modelListValid = new Date().getTime();
res.status(200).json(modelListCache);
};
const openaiImagesResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "openai") {
req.log.info("Transforming OpenAI image response to OpenAI chat format");
newBody = transformResponseForChat(
body as OpenAIImageGenerationResult,
req
);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a DALL-E image generation response into a chat response, simply
* embedding the image URL into the chat message as a Markdown image.
*/
function transformResponseForChat(
imageBody: OpenAIImageGenerationResult,
req: Request
): Record<string, any> {
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
const content = imageBody.data
.map((item) => {
const { url, b64_json } = item;
if (b64_json) {
return `![${prompt}](data:image/png;base64,${b64_json})`;
} else {
return `![${prompt}](${url})`;
}
})
.join("\n\n");
return {
id: "dalle-" + req.id,
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: 0,
completion_tokens: req.outputTokens,
total_tokens: req.outputTokens,
},
choices: [
{
message: { role: "assistant", content },
finish_reason: "stop",
index: 0,
},
],
};
}
const openaiImagesProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.openai.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
pathRewrite: {
"^/v1/chat/completions": "/v1/images/generations",
},
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
error: handleProxyError,
},
}),
});
const openaiImagesRouter = Router();
openaiImagesRouter.get("/v1/models", handleModelRequest);
openaiImagesRouter.post(
"/v1/images/generations",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "openai",
}),
openaiImagesProxy
);
openaiImagesRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai",
outApi: "openai-image",
service: "openai",
}),
openaiImagesProxy
);
export const openaiImage = openaiImagesRouter;
+54 -62
View File
@@ -1,52 +1,43 @@
import { RequestHandler, Router } from "express"; import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware"; import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config"; import { config } from "../config";
import { keyPool } from "../shared/key-management"; import { keyPool, OpenAIKey } from "../shared/key-management";
import { import {
getOpenAIModelFamily,
ModelFamily, ModelFamily,
OpenAIModelFamily, OpenAIModelFamily,
getOpenAIModelFamily,
} from "../shared/models"; } from "../shared/models";
import { logger } from "../logger"; import { logger } from "../logger";
import { createQueueMiddleware } from "./queue"; import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit"; import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common"; import { handleProxyError } from "./middleware/common";
import { import {
RequestPreprocessor,
addKey, addKey,
addKeyForEmbeddingsRequest, addKeyForEmbeddingsRequest,
applyQuotaLimits,
blockZoomerOrigins,
createEmbeddingsPreprocessorMiddleware, createEmbeddingsPreprocessorMiddleware,
createOnProxyReqHandler,
createPreprocessorMiddleware, createPreprocessorMiddleware,
finalizeBody, finalizeBody,
forceModel, forceModel,
languageFilter, RequestPreprocessor,
limitCompletions,
stripHeaders,
createOnProxyReqHandler,
} from "./middleware/request"; } from "./middleware/request";
import { import {
createOnProxyResHandler, createOnProxyResHandler,
ProxyResHandlerWithBody, ProxyResHandlerWithBody,
} from "./middleware/response"; } from "./middleware/response";
let modelsCache: any = null; // https://platform.openai.com/docs/models/overview
let modelsCacheTime = 0; export const KNOWN_OPENAI_MODELS = [
"gpt-4-turbo-preview",
function getModelsResponse() { "gpt-4-0125-preview",
if (new Date().getTime() - modelsCacheTime < 1000 * 60) { "gpt-4-1106-preview",
return modelsCache; "gpt-4-vision-preview",
}
// https://platform.openai.com/docs/models/overview
const knownModels = [
"gpt-4", "gpt-4",
"gpt-4-0613", "gpt-4-0613",
"gpt-4-0314", // EOL 2024-06-13 "gpt-4-0314", // EOL 2024-06-13
"gpt-4-32k", "gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-32k-0314", // EOL 2024-06-13 "gpt-4-32k-0314", // EOL 2024-06-13
"gpt-4-32k-0613",
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-0301", // EOL 2024-06-13 "gpt-3.5-turbo-0301", // EOL 2024-06-13
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613",
@@ -55,19 +46,29 @@ function getModelsResponse() {
"gpt-3.5-turbo-instruct", "gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914", "gpt-3.5-turbo-instruct-0914",
"text-embedding-ada-002", "text-embedding-ada-002",
]; ];
let available = new Set<OpenAIModelFamily>(); let modelsCache: any = null;
let modelsCacheTime = 0;
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
// Get available families and snapshots
let availableFamilies = new Set<OpenAIModelFamily>();
const availableSnapshots = new Set<string>();
for (const key of keyPool.list()) { for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "openai") continue; if (key.isDisabled || key.service !== "openai") continue;
key.modelFamilies.forEach((family) => const asOpenAIKey = key as OpenAIKey;
available.add(family as OpenAIModelFamily) asOpenAIKey.modelFamilies.forEach((f) => availableFamilies.add(f));
); asOpenAIKey.modelSnapshots.forEach((s) => availableSnapshots.add(s));
} }
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
const models = knownModels // Remove disabled families
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
availableFamilies = new Set(
[...availableFamilies].filter((x) => allowed.has(x))
);
return models
.map((id) => ({ .map((id) => ({
id, id,
object: "model", object: "model",
@@ -86,16 +87,26 @@ function getModelsResponse() {
root: id, root: id,
parent: null, parent: null,
})) }))
.filter((model) => available.has(getOpenAIModelFamily(model.id))); .filter((model) => {
// First check if the family is available
const hasFamily = availableFamilies.has(getOpenAIModelFamily(model.id));
if (!hasFamily) return false;
modelsCache = { object: "list", data: models }; // Then for snapshots, ensure the specific snapshot is available
modelsCacheTime = new Date().getTime(); const isSnapshot = model.id.match(/-\d{4}(-preview)?$/);
if (!isSnapshot) return true;
return modelsCache; return availableSnapshots.has(model.id);
});
} }
const handleModelRequest: RequestHandler = (_req, res) => { const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse()); if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return res.status(200).json(modelsCache);
}
const result = generateModelList();
modelsCache = { object: "list", data: result };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
}; };
/** Handles some turbo-instruct special cases. */ /** Handles some turbo-instruct special cases. */
@@ -127,22 +138,13 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object"); throw new Error("Expected body to be an object");
} }
if (config.promptLogging) { let newBody = body;
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
}
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") { if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
req.log.info("Transforming Turbo-Instruct response to Chat format"); req.log.info("Transforming Turbo-Instruct response to Chat format");
body = transformTurboInstructResponse(body); newBody = transformTurboInstructResponse(body);
} }
// TODO: Remove once tokenization is stable res.status(200).json({ ...newBody, proxy: body.proxy });
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
res.status(200).json(body);
}; };
/** Only used for non-streaming responses. */ /** Only used for non-streaming responses. */
@@ -163,29 +165,19 @@ function transformTurboInstructResponse(
return transformed; return transformed;
} }
const openaiProxy = createQueueMiddleware( const openaiProxy = createQueueMiddleware({
createProxyMiddleware({ proxyMiddleware: createProxyMiddleware({
target: "https://api.openai.com", target: "https://api.openai.com",
changeOrigin: true, changeOrigin: true,
selfHandleResponse: true, selfHandleResponse: true,
logger, logger,
on: { on: {
proxyReq: createOnProxyReqHandler({ proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
pipeline: [
applyQuotaLimits,
addKey,
languageFilter,
limitCompletions,
blockZoomerOrigins,
stripHeaders,
finalizeBody,
],
}),
proxyRes: createOnProxyResHandler([openaiResponseHandler]), proxyRes: createOnProxyResHandler([openaiResponseHandler]),
error: handleProxyError, error: handleProxyError,
}, },
}) }),
); });
const openaiEmbeddingsProxy = createProxyMiddleware({ const openaiEmbeddingsProxy = createProxyMiddleware({
target: "https://api.openai.com", target: "https://api.openai.com",
@@ -194,7 +186,7 @@ const openaiEmbeddingsProxy = createProxyMiddleware({
logger, logger,
on: { on: {
proxyReq: createOnProxyReqHandler({ proxyReq: createOnProxyReqHandler({
pipeline: [addKeyForEmbeddingsRequest, stripHeaders, finalizeBody], pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
}), }),
error: handleProxyError, error: handleProxyError,
}, },
-183
View File
@@ -1,183 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import * as http from "http";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
applyQuotaLimits,
blockZoomerOrigins,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeBody,
forceModel,
languageFilter,
stripHeaders,
} from "./middleware/request";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.googlePalmKey) return { object: "list", data: [] };
const bisonVariants = ["text-bison-001"];
const models = bisonVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
permission: [],
root: "palm",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const palmResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
if (config.promptLogging) {
const host = req.get("host");
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
}
if (req.inboundApi === "openai") {
req.log.info("Transforming Google PaLM response to OpenAI format");
body = transformPalmResponse(body, req);
}
// TODO: Remove once tokenization is stable
if (req.debug) {
body.proxy_tokenizer_debug_info = req.debug;
}
// TODO: PaLM has no streaming capability which will pose a problem here if
// requests wait in the queue for too long. Probably need to fake streaming
// and return the entire completion in one stream event using the other
// response handler.
res.status(200).json(body);
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformPalmResponse(
palmRespBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "plm-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: palmRespBody.candidates[0].output,
},
finish_reason: null, // palm doesn't return this
index: 0,
},
],
};
}
function reassignPathForPalmModel(proxyReq: http.ClientRequest, req: Request) {
if (req.body.stream) {
throw new Error("Google PaLM API doesn't support streaming requests");
}
// PaLM API specifies the model in the URL path, not the request body. This
// doesn't work well with our rewriter architecture, so we need to manually
// fix it here.
// POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateText
// POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateMessage
// The chat api (generateMessage) is not very useful at this time as it has
// few params and no adjustable safety settings.
proxyReq.path = proxyReq.path.replace(
/^\/v1\/chat\/completions/,
`/v1beta2/models/${req.body.model}:generateText`
);
}
const googlePalmProxy = createQueueMiddleware(
createProxyMiddleware({
target: "https://generativelanguage.googleapis.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({
beforeRewrite: [reassignPathForPalmModel],
pipeline: [
applyQuotaLimits,
addKey,
languageFilter,
blockZoomerOrigins,
stripHeaders,
finalizeBody,
],
}),
proxyRes: createOnProxyResHandler([palmResponseHandler]),
error: handleProxyError,
},
})
);
const palmRouter = Router();
palmRouter.get("/v1/models", handleModelRequest);
// OpenAI-to-Google PaLM compatibility endpoint.
palmRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-palm", service: "google-palm" },
{ afterTransform: [forceModel("text-bison-001")] }
),
googlePalmProxy
);
export const googlePalm = palmRouter;
+313 -152
View File
@@ -4,10 +4,6 @@
* a given key has generated, so our queue will simply retry requests that fail * a given key has generated, so our queue will simply retry requests that fail
* with a non-billing related 429 over and over again until they succeed. * with a non-billing related 429 over and over again until they succeed.
* *
* Dequeueing can operate in one of two modes:
* - 'fair': requests are dequeued in the order they were enqueued.
* - 'random': requests are dequeued randomly, not really a queue at all.
*
* When a request to a proxied endpoint is received, we create a closure around * When a request to a proxied endpoint is received, we create a closure around
* the call to http-proxy-middleware and attach it to the request. This allows * the call to http-proxy-middleware and attach it to the request. This allows
* us to pause the request until we have a key available. Further, if the * us to pause the request until we have a key available. Further, if the
@@ -15,18 +11,21 @@
* back in the queue and it will be retried later using the same closure. * back in the queue and it will be retried later using the same closure.
*/ */
import crypto from "crypto";
import type { Handler, Request } from "express"; import type { Handler, Request } from "express";
import { keyPool, SupportedModel } from "../shared/key-management"; import { BadRequestError, TooManyRequestsError } from "../shared/errors";
import { keyPool } from "../shared/key-management";
import { import {
getClaudeModelFamily, getModelFamilyForRequest,
getGooglePalmModelFamily, MODEL_FAMILIES,
getOpenAIModelFamily,
ModelFamily, ModelFamily,
} from "../shared/models"; } from "../shared/models";
import { buildFakeSse, initializeSseStream } from "../shared/streaming"; import { initializeSseStream } from "../shared/streaming";
import { assertNever } from "../shared/utils";
import { logger } from "../logger"; import { logger } from "../logger";
import { AGNAI_DOT_CHAT_IP } from "./rate-limit"; import { getUniqueIps, SHARED_IP_ADDRESSES } from "./rate-limit";
import { RequestPreprocessor } from "./middleware/request";
import { handleProxyError } from "./middleware/common";
import { sendErrorToClient } from "./middleware/response/error-generator";
const queue: Request[] = []; const queue: Request[] = [];
const log = logger.child({ module: "request-queue" }); const log = logger.child({ module: "request-queue" });
@@ -35,57 +34,65 @@ const log = logger.child({ module: "request-queue" });
const AGNAI_CONCURRENCY_LIMIT = 5; const AGNAI_CONCURRENCY_LIMIT = 5;
/** Maximum number of queue slots for individual users. */ /** Maximum number of queue slots for individual users. */
const USER_CONCURRENCY_LIMIT = 1; const USER_CONCURRENCY_LIMIT = 1;
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
const MAX_HEARTBEAT_SIZE =
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
const HEARTBEAT_INTERVAL =
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
const PAYLOAD_SCALE_FACTOR = parseFloat(
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
);
const QUEUE_JOIN_TIMEOUT = 5000;
/** /**
* Returns a unique identifier for a request. This is used to determine if a * Returns an identifier for a request. This is used to determine if a
* request is already in the queue. * request is already in the queue.
*
* This can be (in order of preference): * This can be (in order of preference):
* - user token assigned by the proxy operator * - user token assigned by the proxy operator
* - x-risu-tk header, if the request is from RisuAI.xyz * - x-risu-tk header, if the request is from RisuAI.xyz
* - 'shared-ip' if the request is from a shared IP address like Agnai.chat
* - IP address * - IP address
*/ */
function getIdentifier(req: Request) { function getIdentifier(req: Request) {
if (req.user) { if (req.user) return req.user.token;
return req.user.token; if (req.risuToken) return req.risuToken;
} if (isFromSharedIp(req)) return "shared-ip";
if (req.risuToken) {
return req.risuToken;
}
return req.ip; return req.ip;
} }
const sameUserPredicate = (incoming: Request) => (queued: Request) => { const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
const queuedId = getIdentifier(queued); getIdentifier(queued) === getIdentifier(incoming);
const incomingId = getIdentifier(incoming);
return queuedId === incomingId;
};
export function enqueue(req: Request) { const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
const enqueuedRequestCount = queue.filter(sameUserPredicate(req)).length;
export async function enqueue(req: Request) {
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
let isGuest = req.user?.token === undefined; let isGuest = req.user?.token === undefined;
// All Agnai.chat requests come from the same IP, so we allow them to have // Requests from shared IP addresses such as Agnai.chat are exempt from IP-
// more spots in the queue. Can't make it unlimited because people will // based rate limiting but can only occupy a certain number of slots in the
// intentionally abuse it. // queue. Authenticated users always get a single spot in the queue.
// Authenticated users always get a single spot in the queue. const isSharedIp = isFromSharedIp(req);
const isAgnai = AGNAI_DOT_CHAT_IP.includes(req.ip);
const maxConcurrentQueuedRequests = const maxConcurrentQueuedRequests =
isGuest && isAgnai ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT; isGuest && isSharedIp ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT;
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) { if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
if (isAgnai) { if (isSharedIp) {
// Re-enqueued requests are not counted towards the limit since they // Re-enqueued requests are not counted towards the limit since they
// already made it through the queue once. // already made it through the queue once.
if (req.retryCount === 0) { if (req.retryCount === 0) {
throw new Error("Too many agnai.chat requests are already queued"); throw new TooManyRequestsError(
"Too many agnai.chat requests are already queued"
);
} }
} else { } else {
throw new Error("Your IP or token already has a request in the queue"); throw new TooManyRequestsError(
"Your IP or user token already has another request in the queue."
);
} }
} }
queue.push(req);
req.queueOutTime = 0;
// shitty hack to remove hpm's event listeners on retried requests // shitty hack to remove hpm's event listeners on retried requests
removeProxyMiddlewareEventListeners(req); removeProxyMiddlewareEventListeners(req);
@@ -96,33 +103,26 @@ export function enqueue(req: Request) {
if (stream === "true" || stream === true || req.isStreaming) { if (stream === "true" || stream === true || req.isStreaming) {
const res = req.res!; const res = req.res!;
if (!res.headersSent) { if (!res.headersSent) {
initStreaming(req); await initStreaming(req);
} }
req.heartbeatInterval = setInterval(() => { registerHeartbeat(req);
if (process.env.NODE_ENV === "production") { } else if (getProxyLoad() > LOAD_THRESHOLD) {
if (!req.query.badSseParser) req.res!.write(": queue heartbeat\n\n"); throw new BadRequestError(
} else { "Due to heavy traffic on this proxy, you must enable streaming in your chat client to use this endpoint."
req.log.info(`Sending heartbeat to request in queue.`); );
const partition = getPartitionForRequest(req);
const avgWait = Math.round(getEstimatedWaitTime(partition) / 1000);
const currentDuration = Math.round((Date.now() - req.startTime) / 1000);
const debugMsg = `queue length: ${queue.length}; elapsed time: ${currentDuration}s; avg wait: ${avgWait}s`;
req.res!.write(buildFakeSse("heartbeat", debugMsg, req));
}
}, 10000);
} }
// Register a handler to remove the request from the queue if the connection queue.push(req);
// is aborted or closed before it is dequeued. req.queueOutTime = 0;
const removeFromQueue = () => { const removeFromQueue = () => {
req.log.info(`Removing aborted request from queue.`); req.log.info(`Removing aborted request from queue.`);
const index = queue.indexOf(req); const index = queue.indexOf(req);
if (index !== -1) { if (index !== -1) {
queue.splice(index, 1); queue.splice(index, 1);
} }
if (req.heartbeatInterval) { if (req.heartbeatInterval) clearInterval(req.heartbeatInterval);
clearInterval(req.heartbeatInterval); if (req.monitorInterval) clearInterval(req.monitorInterval);
}
}; };
req.onAborted = removeFromQueue; req.onAborted = removeFromQueue;
req.res!.once("close", removeFromQueue); req.res!.once("close", removeFromQueue);
@@ -130,37 +130,26 @@ export function enqueue(req: Request) {
if (req.retryCount ?? 0 > 0) { if (req.retryCount ?? 0 > 0) {
req.log.info({ retries: req.retryCount }, `Enqueued request for retry.`); req.log.info({ retries: req.retryCount }, `Enqueued request for retry.`);
} else { } else {
req.log.info(`Enqueued new request.`); const size = req.socket.bytesRead;
} const endpoint = req.url?.split("?")[0];
} req.log.info({ size, endpoint }, `Enqueued new request.`);
function getPartitionForRequest(req: Request): ModelFamily {
// There is a single request queue, but it is partitioned by model family.
// Model families are typically separated on cost/rate limit boundaries so
// they should be treated as separate queues.
const model = (req.body.model as SupportedModel) ?? "gpt-3.5-turbo";
// Weird special case for AWS because they serve multiple models from
// different vendors, even if currently only one is supported.
if (req.service === "aws") {
return "aws-claude";
}
switch (req.outboundApi) {
case "anthropic":
return getClaudeModelFamily(model);
case "openai":
case "openai-text":
return getOpenAIModelFamily(model);
case "google-palm":
return getGooglePalmModelFamily(model);
default:
assertNever(req.outboundApi);
} }
} }
function getQueueForPartition(partition: ModelFamily): Request[] { function getQueueForPartition(partition: ModelFamily): Request[] {
return queue.filter((req) => getPartitionForRequest(req) === partition); return queue
.filter((req) => getModelFamilyForRequest(req) === partition)
.sort((a, b) => {
// Certain requests are exempted from IP-based rate limiting because they
// come from a shared IP address. To prevent these requests from starving
// out other requests during periods of high traffic, we sort them to the
// end of the queue.
const aIsExempted = isFromSharedIp(a);
const bIsExempted = isFromSharedIp(b);
if (aIsExempted && !bIsExempted) return 1;
if (!aIsExempted && bIsExempted) return -1;
return 0;
});
} }
export function dequeue(partition: ModelFamily): Request | undefined { export function dequeue(partition: ModelFamily): Request | undefined {
@@ -180,9 +169,8 @@ export function dequeue(partition: ModelFamily): Request | undefined {
req.onAborted = undefined; req.onAborted = undefined;
} }
if (req.heartbeatInterval) { if (req.heartbeatInterval) clearInterval(req.heartbeatInterval);
clearInterval(req.heartbeatInterval); if (req.monitorInterval) clearInterval(req.monitorInterval);
}
// Track the time leaving the queue now, but don't add it to the wait times // Track the time leaving the queue now, but don't add it to the wait times
// yet because we don't know if the request will succeed or fail. We track // yet because we don't know if the request will succeed or fail. We track
@@ -201,40 +189,23 @@ export function dequeue(partition: ModelFamily): Request | undefined {
function processQueue() { function processQueue() {
// This isn't completely correct, because a key can service multiple models. // This isn't completely correct, because a key can service multiple models.
// Currently if a key is locked out on one model it will also stop servicing // Currently if a key is locked out on one model it will also stop servicing
// the others, because we only track one rate limit per key. // the others, because we only track rate limits for the key as a whole.
// TODO: `getLockoutPeriod` uses model names instead of model families
// TODO: genericize this it's really ugly
const gpt432kLockout = keyPool.getLockoutPeriod("gpt-4-32k");
const gpt4Lockout = keyPool.getLockoutPeriod("gpt-4");
const turboLockout = keyPool.getLockoutPeriod("gpt-3.5-turbo");
const claudeLockout = keyPool.getLockoutPeriod("claude-v1");
const palmLockout = keyPool.getLockoutPeriod("text-bison-001");
const awsClaudeLockout = keyPool.getLockoutPeriod("anthropic.claude-v2");
const reqs: (Request | undefined)[] = []; const reqs: (Request | undefined)[] = [];
if (gpt432kLockout === 0) { MODEL_FAMILIES.forEach((modelFamily) => {
reqs.push(dequeue("gpt4-32k")); const lockout = keyPool.getLockoutPeriod(modelFamily);
} if (lockout === 0) {
if (gpt4Lockout === 0) { reqs.push(dequeue(modelFamily));
reqs.push(dequeue("gpt4"));
}
if (turboLockout === 0) {
reqs.push(dequeue("turbo"));
}
if (claudeLockout === 0) {
reqs.push(dequeue("claude"));
}
if (palmLockout === 0) {
reqs.push(dequeue("bison"));
}
if (awsClaudeLockout === 0) {
reqs.push(dequeue("aws-claude"));
} }
});
reqs.filter(Boolean).forEach((req) => { reqs.filter(Boolean).forEach((req) => {
if (req?.proceed) { if (req?.proceed) {
req.log.info({ retries: req.retryCount }, `Dequeuing request.`); const modelFamily = getModelFamilyForRequest(req!);
req.log.info(
{ retries: req.retryCount, partition: modelFamily },
`Dequeuing request.`
);
req.proceed(); req.proceed();
} }
}); });
@@ -267,38 +238,93 @@ function cleanQueue() {
} }
export function start() { export function start() {
MODEL_FAMILIES.forEach((modelFamily) => {
historicalEmas.set(modelFamily, 0);
currentEmas.set(modelFamily, 0);
estimates.set(modelFamily, 0);
});
processQueue(); processQueue();
cleanQueue(); cleanQueue();
log.info(`Started request queue.`); log.info(`Started request queue.`);
} }
let waitTimes: { partition: ModelFamily; start: number; end: number }[] = []; let waitTimes: {
partition: ModelFamily;
start: number;
end: number;
isDeprioritized: boolean;
}[] = [];
/** Adds a successful request to the list of wait times. */ /** Adds a successful request to the list of wait times. */
export function trackWaitTime(req: Request) { export function trackWaitTime(req: Request) {
waitTimes.push({ waitTimes.push({
partition: getPartitionForRequest(req), partition: getModelFamilyForRequest(req),
start: req.startTime!, start: req.startTime!,
end: req.queueOutTime ?? Date.now(), end: req.queueOutTime ?? Date.now(),
isDeprioritized: isFromSharedIp(req),
}); });
} }
/** Returns average wait time in milliseconds. */ const WAIT_TIME_INTERVAL = 3000;
export function getEstimatedWaitTime(partition: ModelFamily) { const ALPHA_HISTORICAL = 0.2;
const now = Date.now(); const ALPHA_CURRENT = 0.3;
const recentWaits = waitTimes.filter( const historicalEmas: Map<ModelFamily, number> = new Map();
(wt) => wt.partition === partition && now - wt.end < 300 * 1000 const currentEmas: Map<ModelFamily, number> = new Map();
); const estimates: Map<ModelFamily, number> = new Map();
if (recentWaits.length === 0) {
return 0;
}
return ( export function getEstimatedWaitTime(partition: ModelFamily) {
recentWaits.reduce((sum, wt) => sum + wt.end - wt.start, 0) / return estimates.get(partition) ?? 0;
recentWaits.length
);
} }
/**
* Returns estimated wait time for the given queue partition in milliseconds.
* Requests which are deprioritized are not included in the calculation as they
* would skew the results due to their longer wait times.
*/
function calculateWaitTime(partition: ModelFamily) {
const now = Date.now();
const recentWaits = waitTimes
.filter((wait) => {
const isSamePartition = wait.partition === partition;
const isRecent = now - wait.end < 300 * 1000;
const isNormalPriority = !wait.isDeprioritized;
return isSamePartition && isRecent && isNormalPriority;
})
.map((wait) => wait.end - wait.start);
const recentAverage = recentWaits.length
? recentWaits.reduce((sum, wait) => sum + wait, 0) / recentWaits.length
: 0;
const historicalEma = historicalEmas.get(partition) ?? 0;
historicalEmas.set(
partition,
ALPHA_HISTORICAL * recentAverage + (1 - ALPHA_HISTORICAL) * historicalEma
);
const currentWaits = queue
.filter((req) => {
const isSamePartition = getModelFamilyForRequest(req) === partition;
const isNormalPriority = !isFromSharedIp(req);
return isSamePartition && isNormalPriority;
})
.map((req) => now - req.startTime!);
const longestCurrentWait = Math.max(...currentWaits, 0);
const currentEma = currentEmas.get(partition) ?? 0;
currentEmas.set(
partition,
ALPHA_CURRENT * longestCurrentWait + (1 - ALPHA_CURRENT) * currentEma
);
return (historicalEma + currentEma) / 2;
}
setInterval(() => {
MODEL_FAMILIES.forEach((modelFamily) => {
estimates.set(modelFamily, calculateWaitTime(modelFamily));
});
}, WAIT_TIME_INTERVAL);
export function getQueueLength(partition: ModelFamily | "all" = "all") { export function getQueueLength(partition: ModelFamily | "all" = "all") {
if (partition === "all") { if (partition === "all") {
return queue.length; return queue.length;
@@ -307,20 +333,47 @@ export function getQueueLength(partition: ModelFamily | "all" = "all") {
return modelQueue.length; return modelQueue.length;
} }
export function createQueueMiddleware(proxyMiddleware: Handler): Handler { export function createQueueMiddleware({
return (req, res, next) => { beforeProxy,
req.proceed = () => { proxyMiddleware,
}: {
beforeProxy?: RequestPreprocessor;
proxyMiddleware: Handler;
}): Handler {
return async (req, res, next) => {
req.proceed = async () => {
if (beforeProxy) {
try {
// Hack to let us run asynchronous middleware before the
// http-proxy-middleware handler. This is used to sign AWS requests
// before they are proxied, as the signing is asynchronous.
// Unlike RequestPreprocessors, this runs every time the request is
// dequeued, not just the first time.
await beforeProxy(req);
} catch (err) {
return handleProxyError(err, req, res);
}
}
proxyMiddleware(req, res, next); proxyMiddleware(req, res, next);
}; };
try { try {
enqueue(req); await enqueue(req);
} catch (err: any) { } catch (err: any) {
req.res!.status(429).json({ const title =
type: "proxy_error", err.status === 429
? "Proxy queue error (too many concurrent requests)"
: "Proxy queue error (streaming required)";
sendErrorToClient({
options: {
title,
message: err.message, message: err.message,
stack: err.stack, format: req.inboundApi,
proxy_note: `Only one request can be queued at a time. If you don't have another request queued, your IP or user token might be in use by another request.`, reqId: req.id,
model: req.body?.model,
},
req,
res,
}); });
} }
}; };
@@ -329,35 +382,61 @@ export function createQueueMiddleware(proxyMiddleware: Handler): Handler {
function killQueuedRequest(req: Request) { function killQueuedRequest(req: Request) {
if (!req.res || req.res.writableEnded) { if (!req.res || req.res.writableEnded) {
req.log.warn(`Attempted to terminate request that has already ended.`); req.log.warn(`Attempted to terminate request that has already ended.`);
queue.splice(queue.indexOf(req), 1);
return; return;
} }
const res = req.res; const res = req.res;
try { try {
const message = `Your request has been terminated by the proxy because it has been in the queue for more than 5 minutes. The queue is currently ${queue.length} requests long.`; const message = `Your request has been terminated by the proxy because it has been in the queue for more than 5 minutes.`;
if (res.headersSent) { sendErrorToClient({
const fakeErrorEvent = buildFakeSse("proxy queue error", message, req); options: {
res.write(fakeErrorEvent); title: "Proxy queue error (request killed)",
res.end(); message,
} else { format: req.inboundApi,
res.status(500).json({ error: message }); reqId: req.id,
} model: req.body?.model,
},
req,
res,
});
} catch (e) { } catch (e) {
req.log.error(e, `Error killing stalled request.`); req.log.error(e, `Error killing stalled request.`);
} }
} }
function initStreaming(req: Request) { async function initStreaming(req: Request) {
const res = req.res!; const res = req.res!;
initializeSseStream(res); initializeSseStream(res);
if (req.query.badSseParser) { const joinMsg = `: joining queue at position ${
// Some clients have a broken SSE parser that doesn't handle comments queue.length
// correctly. These clients can pass ?badSseParser=true to }\n\n${getHeartbeatPayload()}`;
// disable comments in the SSE stream.
return;
}
res.write(`: joining queue at position ${queue.length}\n\n`); let drainTimeout: NodeJS.Timeout;
const welcome = new Promise<void>((resolve, reject) => {
const onDrain = () => {
clearTimeout(drainTimeout);
req.log.debug(`Client finished consuming join message.`);
res.off("drain", onDrain);
resolve();
};
drainTimeout = setTimeout(() => {
res.off("drain", onDrain);
res.destroy();
reject(new Error("Unreponsive streaming client; killing connection"));
}, QUEUE_JOIN_TIMEOUT);
if (!res.write(joinMsg)) {
req.log.warn("Kernel buffer is full; holding client request.");
res.once("drain", onDrain);
} else {
clearTimeout(drainTimeout);
resolve();
}
});
await welcome;
} }
/** /**
@@ -413,3 +492,85 @@ function removeProxyMiddlewareEventListeners(req: Request) {
req.removeListener("error", reqOnError as any); req.removeListener("error", reqOnError as any);
} }
} }
export function registerHeartbeat(req: Request) {
const res = req.res!;
let isBufferFull = false;
let bufferFullCount = 0;
req.heartbeatInterval = setInterval(() => {
if (isBufferFull) {
bufferFullCount++;
if (bufferFullCount >= 3) {
req.log.error("Heartbeat skipped too many times; killing connection.");
res.destroy();
} else {
req.log.warn({ bufferFullCount }, "Heartbeat skipped; buffer is full.");
}
return;
}
const data = getHeartbeatPayload();
if (!res.write(data)) {
isBufferFull = true;
res.once("drain", () => (isBufferFull = false));
}
}, HEARTBEAT_INTERVAL);
monitorHeartbeat(req);
}
function monitorHeartbeat(req: Request) {
const res = req.res!;
let lastBytesSent = 0;
req.monitorInterval = setInterval(() => {
const bytesSent = res.socket?.bytesWritten ?? 0;
const bytesSinceLast = bytesSent - lastBytesSent;
req.log.debug(
{
previousBytesSent: lastBytesSent,
currentBytesSent: bytesSent,
},
"Heartbeat monitor check."
);
lastBytesSent = bytesSent;
const minBytes = Math.floor(getHeartbeatSize() / 2);
if (bytesSinceLast < minBytes) {
req.log.warn(
{ minBytes, bytesSinceLast },
"Queued request is not processing heartbeats enough data or server is overloaded; killing connection."
);
res.destroy();
}
}, HEARTBEAT_INTERVAL * 2);
}
/** Sends larger heartbeats when the queue is overloaded */
function getHeartbeatSize() {
const load = getProxyLoad();
if (load <= LOAD_THRESHOLD) {
return MIN_HEARTBEAT_SIZE;
} else {
const excessLoad = load - LOAD_THRESHOLD;
const size =
MIN_HEARTBEAT_SIZE + Math.pow(excessLoad * PAYLOAD_SCALE_FACTOR, 2);
if (size > MAX_HEARTBEAT_SIZE) return MAX_HEARTBEAT_SIZE;
return size;
}
}
function getHeartbeatPayload() {
const size = getHeartbeatSize();
const data =
process.env.NODE_ENV === "production"
? crypto.randomBytes(size).toString("base64")
: `payload size: ${size}`;
return `: queue heartbeat ${data}\n\n`;
}
function getProxyLoad() {
return Math.max(getUniqueIps(), queue.length);
}
+67 -30
View File
@@ -1,28 +1,34 @@
import { Request, Response, NextFunction } from "express"; import { Request, Response, NextFunction } from "express";
import { config } from "../config"; import { config } from "../config";
export const AGNAI_DOT_CHAT_IP = [ export const SHARED_IP_ADDRESSES = new Set([
// Agnai.chat
"157.230.249.32", // old "157.230.249.32", // old
"157.245.148.56", "157.245.148.56",
"174.138.29.50", "174.138.29.50",
"209.97.162.44", "209.97.162.44",
]; ]);
const RATE_LIMIT_ENABLED = Boolean(config.modelRateLimit);
const RATE_LIMIT = Math.max(1, config.modelRateLimit);
const ONE_MINUTE_MS = 60 * 1000; const ONE_MINUTE_MS = 60 * 1000;
const lastAttempts = new Map<string, number[]>(); type Timestamp = number;
/** Tracks time of last attempts from each IP address or token. */
const lastAttempts = new Map<string, Timestamp[]>();
/** Tracks time of exempted attempts from shared IPs like Agnai.chat. */
const exemptedRequests: Timestamp[] = [];
const expireOldAttempts = (now: number) => (attempt: number) => const isRecentAttempt = (now: Timestamp) => (attempt: Timestamp) =>
attempt > now - ONE_MINUTE_MS; attempt > now - ONE_MINUTE_MS;
const getTryAgainInMs = (ip: string) => { const getTryAgainInMs = (ip: string, type: "text" | "image") => {
const now = Date.now(); const now = Date.now();
const attempts = lastAttempts.get(ip) || []; const attempts = lastAttempts.get(ip) || [];
const validAttempts = attempts.filter(expireOldAttempts(now)); const validAttempts = attempts.filter(isRecentAttempt(now));
if (validAttempts.length >= RATE_LIMIT) { const limit =
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
if (validAttempts.length >= limit) {
return validAttempts[0] - now + ONE_MINUTE_MS; return validAttempts[0] - now + ONE_MINUTE_MS;
} else { } else {
lastAttempts.set(ip, [...validAttempts, now]); lastAttempts.set(ip, [...validAttempts, now]);
@@ -30,21 +36,25 @@ const getTryAgainInMs = (ip: string) => {
} }
}; };
const getStatus = (ip: string) => { const getStatus = (ip: string, type: "text" | "image") => {
const now = Date.now(); const now = Date.now();
const attempts = lastAttempts.get(ip) || []; const attempts = lastAttempts.get(ip) || [];
const validAttempts = attempts.filter(expireOldAttempts(now)); const validAttempts = attempts.filter(isRecentAttempt(now));
const limit =
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
return { return {
remaining: Math.max(0, RATE_LIMIT - validAttempts.length), remaining: Math.max(0, limit - validAttempts.length),
reset: validAttempts.length > 0 ? validAttempts[0] + ONE_MINUTE_MS : now, reset: validAttempts.length > 0 ? validAttempts[0] + ONE_MINUTE_MS : now,
}; };
}; };
/** Prunes attempts and IPs that are no longer relevant after one minutes. */ /** Prunes attempts and IPs that are no longer relevant after one minute. */
const clearOldAttempts = () => { const clearOldAttempts = () => {
const now = Date.now(); const now = Date.now();
for (const [ip, attempts] of lastAttempts.entries()) { for (const [ip, attempts] of lastAttempts.entries()) {
const validAttempts = attempts.filter(expireOldAttempts(now)); const validAttempts = attempts.filter(isRecentAttempt(now));
if (validAttempts.length === 0) { if (validAttempts.length === 0) {
lastAttempts.delete(ip); lastAttempts.delete(ip);
} else { } else {
@@ -54,8 +64,25 @@ const clearOldAttempts = () => {
}; };
setInterval(clearOldAttempts, 10 * 1000); setInterval(clearOldAttempts, 10 * 1000);
export const getUniqueIps = () => { /** Prunes exempted requests which are older than one minute. */
return lastAttempts.size; const clearOldExemptions = () => {
const now = Date.now();
const validExemptions = exemptedRequests.filter(isRecentAttempt(now));
exemptedRequests.splice(0, exemptedRequests.length, ...validExemptions);
};
setInterval(clearOldExemptions, 10 * 1000);
export const getUniqueIps = () => lastAttempts.size;
/**
* Can be used to manually remove the most recent attempt from an IP address,
* ie. in case a prompt triggered OpenAI's content filter and therefore did not
* result in a generation.
*/
export const refundLastAttempt = (req: Request) => {
const key = req.user?.token || req.risuToken || req.ip;
const attempts = lastAttempts.get(key) || [];
attempts.pop();
}; };
export const ipLimiter = async ( export const ipLimiter = async (
@@ -63,36 +90,46 @@ export const ipLimiter = async (
res: Response, res: Response,
next: NextFunction next: NextFunction
) => { ) => {
if (!RATE_LIMIT_ENABLED) return next(); const imageLimit = config.imageModelRateLimit;
const textLimit = config.textModelRateLimit;
if (!textLimit && !imageLimit) return next();
if (req.user?.type === "special") return next(); if (req.user?.type === "special") return next();
// Exempt Agnai.chat from rate limiting since it's shared between a lot of // Exempts Agnai.chat from IP-based rate limiting because its IPs are shared
// users. Dunno how to prevent this from being abused without some sort of // by many users. Instead, the request queue will limit the number of such
// identifier sent from Agnaistic to identify specific users. // requests that may wait in the queue at a time, and sorts them to the end to
if (AGNAI_DOT_CHAT_IP.includes(req.ip)) { // let individual users go first.
req.log.info("Exempting Agnai request from rate limiting."); if (SHARED_IP_ADDRESSES.has(req.ip)) {
next(); exemptedRequests.push(Date.now());
return; req.log.info(
{ ip: req.ip, recentExemptions: exemptedRequests.length },
"Exempting Agnai request from rate limiting."
);
return next();
} }
const type = (req.baseUrl + req.path).includes("openai-image")
? "image"
: "text";
const limit = type === "image" ? imageLimit : textLimit;
// If user is authenticated, key rate limiting by their token. Otherwise, key // If user is authenticated, key rate limiting by their token. Otherwise, key
// rate limiting by their IP address. Mitigates key sharing. // rate limiting by their IP address. Mitigates key sharing.
const rateLimitKey = req.user?.token || req.risuToken || req.ip; const rateLimitKey = req.user?.token || req.risuToken || req.ip;
const { remaining, reset } = getStatus(rateLimitKey); const { remaining, reset } = getStatus(rateLimitKey, type);
res.set("X-RateLimit-Limit", config.modelRateLimit.toString()); res.set("X-RateLimit-Limit", limit.toString());
res.set("X-RateLimit-Remaining", remaining.toString()); res.set("X-RateLimit-Remaining", remaining.toString());
res.set("X-RateLimit-Reset", reset.toString()); res.set("X-RateLimit-Reset", reset.toString());
const tryAgainInMs = getTryAgainInMs(rateLimitKey); const tryAgainInMs = getTryAgainInMs(rateLimitKey, type);
if (tryAgainInMs > 0) { if (tryAgainInMs > 0) {
res.set("Retry-After", tryAgainInMs.toString()); res.set("Retry-After", tryAgainInMs.toString());
res.status(429).json({ res.status(429).json({
error: { error: {
type: "proxy_rate_limited", type: "proxy_rate_limited",
message: `This proxy is rate limited to ${ message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${Math.ceil(
config.modelRateLimit
} prompts per minute. Please try again in ${Math.ceil(
tryAgainInMs / 1000 tryAgainInMs / 1000
)} seconds.`, )} seconds.`,
}, },
+31 -4
View File
@@ -2,9 +2,13 @@ import express, { Request, Response, NextFunction } from "express";
import { gatekeeper } from "./gatekeeper"; import { gatekeeper } from "./gatekeeper";
import { checkRisuToken } from "./check-risu-token"; import { checkRisuToken } from "./check-risu-token";
import { openai } from "./openai"; import { openai } from "./openai";
import { openaiImage } from "./openai-image";
import { anthropic } from "./anthropic"; import { anthropic } from "./anthropic";
import { googlePalm } from "./palm"; import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { aws } from "./aws"; import { aws } from "./aws";
import { azure } from "./azure";
import { sendErrorToClient } from "./middleware/response/error-generator";
const proxyRouter = express.Router(); const proxyRouter = express.Router();
proxyRouter.use((req, _res, next) => { proxyRouter.use((req, _res, next) => {
@@ -16,8 +20,8 @@ proxyRouter.use((req, _res, next) => {
next(); next();
}); });
proxyRouter.use( proxyRouter.use(
express.json({ limit: "1536kb" }), express.json({ limit: "100mb" }),
express.urlencoded({ extended: true, limit: "1536kb" }) express.urlencoded({ extended: true, limit: "100mb" })
); );
proxyRouter.use(gatekeeper); proxyRouter.use(gatekeeper);
proxyRouter.use(checkRisuToken); proxyRouter.use(checkRisuToken);
@@ -27,9 +31,12 @@ proxyRouter.use((req, _res, next) => {
next(); next();
}); });
proxyRouter.use("/openai", addV1, openai); proxyRouter.use("/openai", addV1, openai);
proxyRouter.use("/openai-image", addV1, openaiImage);
proxyRouter.use("/anthropic", addV1, anthropic); proxyRouter.use("/anthropic", addV1, anthropic);
proxyRouter.use("/google-palm", addV1, googlePalm); proxyRouter.use("/google-ai", addV1, googleAI);
proxyRouter.use("/mistral-ai", addV1, mistralAI);
proxyRouter.use("/aws/claude", addV1, aws); proxyRouter.use("/aws/claude", addV1, aws);
proxyRouter.use("/azure/openai", addV1, azure);
// Redirect browser requests to the homepage. // Redirect browser requests to the homepage.
proxyRouter.get("*", (req, res, next) => { proxyRouter.get("*", (req, res, next) => {
const isBrowser = req.headers["user-agent"]?.includes("Mozilla"); const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
@@ -39,6 +46,26 @@ proxyRouter.get("*", (req, res, next) => {
next(); next();
} }
}); });
// Handle 404s.
proxyRouter.use((req, res) => {
sendErrorToClient({
req,
res,
options: {
title: "Proxy error (HTTP 404 Not Found)",
message: "The requested proxy endpoint does not exist.",
model: req.body?.model,
reqId: req.id,
format: "unknown",
obj: {
proxy_note:
"Your chat client is using the wrong endpoint. Check the Service Info page for the list of available endpoints.",
requested_url: req.originalUrl,
},
},
});
});
export { proxyRouter as proxyRouter }; export { proxyRouter as proxyRouter };
function addV1(req: Request, res: Response, next: NextFunction) { function addV1(req: Request, res: Response, next: NextFunction) {
+72 -34
View File
@@ -1,23 +1,29 @@
import { assertConfigIsValid, config } from "./config"; import { assertConfigIsValid, config, USER_ASSETS_DIR } from "./config";
import "source-map-support/register"; import "source-map-support/register";
import checkDiskSpace from "check-disk-space";
import express from "express"; import express from "express";
import cors from "cors"; import cors from "cors";
import path from "path"; import path from "path";
import pinoHttp from "pino-http"; import pinoHttp from "pino-http";
import os from "os";
import childProcess from "child_process"; import childProcess from "child_process";
import { logger } from "./logger"; import { logger } from "./logger";
import { setupAssetsDir } from "./shared/file-storage/setup-assets-dir";
import { keyPool } from "./shared/key-management"; import { keyPool } from "./shared/key-management";
import { adminRouter } from "./admin/routes"; import { adminRouter } from "./admin/routes";
import { proxyRouter } from "./proxy/routes"; import { proxyRouter } from "./proxy/routes";
import { handleInfoPage } from "./info-page"; import { infoPageRouter } from "./info-page";
import { IMAGE_GEN_MODELS } from "./shared/models";
import { userRouter } from "./user/routes";
import { logQueue } from "./shared/prompt-logging"; import { logQueue } from "./shared/prompt-logging";
import { start as startRequestQueue } from "./proxy/queue"; import { start as startRequestQueue } from "./proxy/queue";
import { init as initUserStore } from "./shared/users/user-store"; import { init as initUserStore } from "./shared/users/user-store";
import { init as initTokenizers } from "./shared/tokenization"; import { init as initTokenizers } from "./shared/tokenization";
import { checkOrigin } from "./proxy/check-origin"; import { checkOrigin } from "./proxy/check-origin";
import { userRouter } from "./user/routes"; import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
const PORT = config.port; const PORT = config.port;
const BIND_ADDRESS = config.bindAddress;
const app = express(); const app = express();
// middleware // middleware
@@ -25,9 +31,7 @@ app.use(
pinoHttp({ pinoHttp({
quietReqLogger: true, quietReqLogger: true,
logger, logger,
autoLogging: { autoLogging: { ignore: ({ url }) => ["/health"].includes(url as string) },
ignore: ({ url }) => ["/health"].includes(url as string),
},
redact: { redact: {
paths: [ paths: [
"req.headers.cookie", "req.headers.cookie",
@@ -40,13 +44,15 @@ app.use(
], ],
censor: "********", censor: "********",
}, },
customProps: (req) => {
const user = (req as express.Request).user;
if (user) return { userToken: `...${user.token.slice(-5)}` };
return {};
},
}) })
); );
// TODO: Detect (or support manual configuration of) whether the app is behind app.set("trust proxy", Number(config.trustedProxies));
// a load balancer/reverse proxy, which is necessary to determine request IP
// addresses correctly.
app.set("trust proxy", true);
app.set("view engine", "ejs"); app.set("view engine", "ejs");
app.set("views", [ app.set("views", [
@@ -55,32 +61,42 @@ app.set("views", [
path.join(__dirname, "shared/views"), path.join(__dirname, "shared/views"),
]); ]);
app.use("/user_content", express.static(USER_ASSETS_DIR, { maxAge: "2h" }));
app.get("/health", (_req, res) => res.sendStatus(200)); app.get("/health", (_req, res) => res.sendStatus(200));
app.use(cors()); app.use(cors());
app.use(checkOrigin); app.use(checkOrigin);
// routes
app.get("/", handleInfoPage);
app.use("/admin", adminRouter); app.use("/admin", adminRouter);
app.use("/proxy", proxyRouter); app.use(config.proxyEndpointRoute, proxyRouter);
app.use("/user", userRouter); app.use("/user", userRouter);
if (config.staticServiceInfo) {
app.get("/", (_req, res) => res.sendStatus(200));
} else {
app.use("/", infoPageRouter);
}
// 500 and 404 app.use(
app.use((err: any, _req: unknown, res: express.Response, _next: unknown) => { (err: any, req: express.Request, res: express.Response, _next: unknown) => {
if (err.status) { if (!err.status) {
res.status(err.status).json({ error: err.message }); logger.error(err, "Unhandled error in request");
} else { }
logger.error(err);
res.status(500).json({ sendErrorToClient({
error: { req,
type: "proxy_error", res,
message: err.message, options: {
stack: err.stack, title: `Proxy error (HTTP ${err.status})`,
proxy_note: `Reverse proxy encountered an internal server error.`, message:
"Reverse proxy encountered an unexpected error while processing your request.",
reqId: req.id,
statusCode: err.status,
obj: { error: err.message, stack: err.stack },
format: "unknown",
}, },
}); });
} }
}); );
app.use((_req: unknown, res: express.Response) => { app.use((_req: unknown, res: express.Response) => {
res.status(404).json({ error: "Not found" }); res.status(404).json({ error: "Not found" });
}); });
@@ -96,25 +112,36 @@ async function start() {
await initTokenizers(); await initTokenizers();
if (config.allowedModelFamilies.some((f) => IMAGE_GEN_MODELS.includes(f))) {
await setupAssetsDir();
}
if (config.gatekeeper === "user_token") { if (config.gatekeeper === "user_token") {
await initUserStore(); await initUserStore();
} }
if (config.promptLogging) { if (config.promptLogging) {
logger.info("Starting prompt logging..."); logger.info("Starting prompt logging...");
logQueue.start(); await logQueue.start();
} }
logger.info("Starting request queue..."); logger.info("Starting request queue...");
startRequestQueue(); startRequestQueue();
app.listen(PORT, async () => { const diskSpace = await checkDiskSpace(
logger.info({ port: PORT }, "Now listening for connections."); __dirname.startsWith("/app") ? "/app" : os.homedir()
);
app.listen(PORT, BIND_ADDRESS, () => {
logger.info(
{ port: PORT, interface: BIND_ADDRESS },
"Now listening for connections."
);
registerUncaughtExceptionHandler(); registerUncaughtExceptionHandler();
}); });
logger.info( logger.info(
{ build: process.env.BUILD_INFO, nodeEnv: process.env.NODE_ENV }, { build: process.env.BUILD_INFO, nodeEnv: process.env.NODE_ENV, diskSpace },
"Startup complete." "Startup complete."
); );
} }
@@ -142,7 +169,18 @@ function registerUncaughtExceptionHandler() {
* didn't set it to something misleading. * didn't set it to something misleading.
*/ */
async function setBuildInfo() { async function setBuildInfo() {
// Render .dockerignore's the .git directory but provides info in the env // For CI builds, use the env vars set during the build process
if (process.env.GITGUD_BRANCH) {
const sha = process.env.GITGUD_COMMIT?.slice(0, 7) || "unknown SHA";
const branch = process.env.GITGUD_BRANCH;
const repo = process.env.GITGUD_PROJECT;
const buildInfo = `[ci] ${sha} (${branch}@${repo})`;
process.env.BUILD_INFO = buildInfo;
logger.info({ build: buildInfo }, "Using build info from CI image.");
return;
}
// For render, the git directory is dockerignore'd so we use env vars
if (process.env.RENDER) { if (process.env.RENDER) {
const sha = process.env.RENDER_GIT_COMMIT?.slice(0, 7) || "unknown SHA"; const sha = process.env.RENDER_GIT_COMMIT?.slice(0, 7) || "unknown SHA";
const branch = process.env.RENDER_GIT_BRANCH || "unknown branch"; const branch = process.env.RENDER_GIT_BRANCH || "unknown branch";
@@ -153,10 +191,10 @@ async function setBuildInfo() {
return; return;
} }
// For huggingface and bare metal deployments, we can get the info from git
try { try {
// Ignore git's complaints about dubious directory ownership on Huggingface
// (which evidently runs dockerized Spaces on Windows with weird NTFS perms)
if (process.env.SPACE_ID) { if (process.env.SPACE_ID) {
// TODO: may not be necessary anymore with adjusted Huggingface dockerfile
childProcess.execSync("git config --global --add safe.directory /app"); childProcess.execSync("git config --global --add safe.directory /app");
} }
@@ -176,7 +214,7 @@ async function setBuildInfo() {
let [sha, branch, remote, status] = await Promise.all(promises); let [sha, branch, remote, status] = await Promise.all(promises);
remote = remote.match(/.*[\/:]([\w-]+)\/([\w\-\.]+?)(?:\.git)?$/) || []; remote = remote.match(/.*[\/:]([\w-]+)\/([\w\-.]+?)(?:\.git)?$/) || [];
const repo = remote.slice(-2).join("/"); const repo = remote.slice(-2).join("/");
status = status status = status
// ignore Dockerfile changes since that's how the user deploys the app // ignore Dockerfile changes since that's how the user deploys the app
+476
View File
@@ -0,0 +1,476 @@
import { config, listConfig } from "./config";
import {
AnthropicKey,
AwsBedrockKey,
AzureOpenAIKey,
GoogleAIKey,
keyPool,
OpenAIKey,
} from "./shared/key-management";
import {
AnthropicModelFamily,
assertIsKnownModelFamily,
AwsBedrockModelFamily,
AzureOpenAIModelFamily,
GoogleAIModelFamily,
LLM_SERVICES,
LLMService,
MistralAIModelFamily,
MODEL_FAMILY_SERVICE,
ModelFamily,
OpenAIModelFamily,
} from "./shared/models";
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
import { getUniqueIps } from "./proxy/rate-limit";
import { assertNever } from "./shared/utils";
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
import { MistralAIKey } from "./shared/key-management/mistral-ai/provider";
const CACHE_TTL = 2000;
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
k.service === "azure";
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsGoogleAIKey = (k: KeyPoolKey): k is GoogleAIKey =>
k.service === "google-ai";
const keyIsMistralAIKey = (k: KeyPoolKey): k is MistralAIKey =>
k.service === "mistral-ai";
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
/** Stats aggregated across all keys for a given service. */
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
/** Stats aggregated across all keys for a given model family. */
type ModelAggregates = {
active: number;
trial?: number;
revoked?: number;
overQuota?: number;
pozzed?: number;
awsLogged?: number;
awsSonnet?: number;
awsHaiku?: number;
queued: number;
queueTime: string;
tokens: number;
};
/** All possible combinations of model family and aggregate type. */
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
type AllStats = {
proompts: number;
tokens: number;
tokenCost: number;
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
};
type BaseFamilyInfo = {
usage?: string;
activeKeys: number;
revokedKeys?: number;
proomptersInQueue?: number;
estimatedQueueTime?: string;
};
type OpenAIInfo = BaseFamilyInfo & {
trialKeys?: number;
overQuotaKeys?: number;
};
type AnthropicInfo = BaseFamilyInfo & {
prefilledKeys?: number;
overQuotaKeys?: number;
};
type AwsInfo = BaseFamilyInfo & {
privacy?: string;
sonnetKeys?: number;
haikuKeys?: number;
};
// prettier-ignore
export type ServiceInfo = {
uptime: number;
endpoints: {
openai?: string;
openai2?: string;
anthropic?: string;
"anthropic-claude-3"?: string;
"google-ai"?: string;
"mistral-ai"?: string;
aws?: string;
azure?: string;
"openai-image"?: string;
"azure-image"?: string;
};
proompts?: number;
tookens?: string;
proomptersNow?: number;
status?: string;
config: ReturnType<typeof listConfig>;
build: string;
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
& { [f in AwsBedrockModelFamily]?: AwsInfo }
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
// https://stackoverflow.com/a/66661477
// type DeepKeyOf<T> = (
// [T] extends [never]
// ? ""
// : T extends object
// ? {
// [K in Exclude<keyof T, symbol>]: `${K}${DotPrefix<DeepKeyOf<T[K]>>}`;
// }[Exclude<keyof T, symbol>]
// : ""
// ) extends infer D
// ? Extract<D, string>
// : never;
// type DotPrefix<T extends string> = T extends "" ? "" : `.${T}`;
// type ServiceInfoPath = `{${DeepKeyOf<ServiceInfo>}}`;
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
openai: {
openai: `%BASE%/openai`,
openai2: `%BASE%/openai/turbo-instruct`,
"openai-image": `%BASE%/openai-image`,
},
anthropic: {
anthropic: `%BASE%/anthropic`,
"anthropic-sonnet (⚠️Temporary: for Claude 3 Sonnet)": `%BASE%/anthropic/sonnet`,
"anthropic-opus (⚠️Temporary: for Claude 3 Opus)": `%BASE%/anthropic/opus`,
},
"google-ai": {
"google-ai": `%BASE%/google-ai`,
},
"mistral-ai": {
"mistral-ai": `%BASE%/mistral-ai`,
},
aws: {
aws: `%BASE%/aws/claude`,
"aws-sonnet (⚠️Temporary: for AWS Claude 3 Sonnet)": `%BASE%/aws/claude/sonnet`,
},
azure: {
azure: `%BASE%/azure/openai`,
"azure-image": `%BASE%/azure/openai`,
},
};
const modelStats = new Map<ModelAggregateKey, number>();
const serviceStats = new Map<keyof AllStats, number>();
let cachedInfo: ServiceInfo | undefined;
let cacheTime = 0;
export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
if (cacheTime + CACHE_TTL > Date.now()) return cachedInfo!;
const keys = keyPool.list();
const accessibleFamilies = new Set(
keys
.flatMap((k) => k.modelFamilies)
.filter((f) => config.allowedModelFamilies.includes(f))
.concat("turbo")
);
modelStats.clear();
serviceStats.clear();
keys.forEach(addKeyToAggregates);
const endpoints = getEndpoints(baseUrl, accessibleFamilies);
const trafficStats = getTrafficStats();
const { serviceInfo, modelFamilyInfo } =
getServiceModelStats(accessibleFamilies);
const status = getStatus();
if (config.staticServiceInfo && !forAdmin) {
delete trafficStats.proompts;
delete trafficStats.tookens;
delete trafficStats.proomptersNow;
for (const family of Object.keys(modelFamilyInfo)) {
assertIsKnownModelFamily(family);
delete modelFamilyInfo[family]?.proomptersInQueue;
delete modelFamilyInfo[family]?.estimatedQueueTime;
delete modelFamilyInfo[family]?.usage;
}
}
return (cachedInfo = {
uptime: Math.floor(process.uptime()),
endpoints,
...trafficStats,
...serviceInfo,
status,
...modelFamilyInfo,
config: listConfig(),
build: process.env.BUILD_INFO || "dev",
});
}
function getStatus() {
if (!config.checkKeys) return "Key checking is disabled.";
let unchecked = 0;
for (const service of LLM_SERVICES) {
unchecked += serviceStats.get(`${service}__uncheckedKeys`) || 0;
}
return unchecked ? `Checking ${unchecked} keys...` : undefined;
}
function getEndpoints(baseUrl: string, accessibleFamilies: Set<ModelFamily>) {
const endpoints: Record<string, string> = {};
const keys = keyPool.list();
for (const service of LLM_SERVICES) {
if (!keys.some((k) => k.service === service)) {
continue;
}
for (const [name, url] of Object.entries(SERVICE_ENDPOINTS[service])) {
endpoints[name] = url.replace("%BASE%", baseUrl);
}
if (service === "openai" && !accessibleFamilies.has("dall-e")) {
delete endpoints["openai-image"];
}
if (service === "azure" && !accessibleFamilies.has("azure-dall-e")) {
delete endpoints["azure-image"];
}
}
return endpoints;
}
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
function getTrafficStats(): TrafficStats {
const tokens = serviceStats.get("tokens") || 0;
const tokenCost = serviceStats.get("tokenCost") || 0;
return {
proompts: serviceStats.get("proompts") || 0,
tookens: `${prettyTokens(tokens)}${getCostSuffix(tokenCost)}`,
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
};
}
function getServiceModelStats(accessibleFamilies: Set<ModelFamily>) {
const serviceInfo: {
[s in LLMService as `${s}${"Keys" | "Orgs"}`]?: number;
} = {};
const modelFamilyInfo: { [f in ModelFamily]?: BaseFamilyInfo } = {};
for (const service of LLM_SERVICES) {
const hasKeys = serviceStats.get(`${service}__keys`) || 0;
if (!hasKeys) continue;
serviceInfo[`${service}Keys`] = hasKeys;
accessibleFamilies.forEach((f) => {
if (MODEL_FAMILY_SERVICE[f] === service) {
modelFamilyInfo[f] = getInfoForFamily(f);
}
});
if (service === "openai" && config.checkKeys) {
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
}
}
return { serviceInfo, modelFamilyInfo };
}
function getUniqueOpenAIOrgs(keys: KeyPoolKey[]) {
const orgIds = new Set(
keys.filter((k) => k.service === "openai").map((k: any) => k.organizationId)
);
return orgIds.size;
}
function increment<T extends keyof AllStats | ModelAggregateKey>(
map: Map<T, number>,
key: T,
delta = 1
) {
map.set(key, (map.get(key) || 0) + delta);
}
function addKeyToAggregates(k: KeyPoolKey) {
increment(serviceStats, "proompts", k.promptCount);
increment(serviceStats, "openai__keys", k.service === "openai" ? 1 : 0);
increment(serviceStats, "anthropic__keys", k.service === "anthropic" ? 1 : 0);
increment(serviceStats, "google-ai__keys", k.service === "google-ai" ? 1 : 0);
increment(
serviceStats,
"mistral-ai__keys",
k.service === "mistral-ai" ? 1 : 0
);
increment(serviceStats, "aws__keys", k.service === "aws" ? 1 : 0);
increment(serviceStats, "azure__keys", k.service === "azure" ? 1 : 0);
let sumTokens = 0;
let sumCost = 0;
switch (k.service) {
case "openai":
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
increment(
serviceStats,
"openai__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "azure":
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
});
break;
case "anthropic": {
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
increment(modelStats, `${f}__pozzed`, k.isPozzed ? 1 : 0);
});
increment(
serviceStats,
"anthropic__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
break;
}
case "google-ai": {
if (!keyIsGoogleAIKey(k)) throw new Error("Invalid key type");
const family = "gemini-pro";
sumTokens += k["gemini-proTokens"];
sumCost += getTokenCostUsd(family, k["gemini-proTokens"]);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${family}__tokens`, k["gemini-proTokens"]);
break;
}
case "mistral-ai": {
if (!keyIsMistralAIKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
});
break;
}
case "aws": {
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
const family = "aws-claude";
sumTokens += k["aws-claudeTokens"];
sumCost += getTokenCostUsd(family, k["aws-claudeTokens"]);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${family}__tokens`, k["aws-claudeTokens"]);
increment(modelStats, `${family}__awsSonnet`, k.sonnetEnabled ? 1 : 0);
increment(modelStats, `${family}__awsHaiku`, k.haikuEnabled ? 1 : 0);
// Ignore revoked keys for aws logging stats, but include keys where the
// logging status is unknown.
const countAsLogged =
k.lastChecked && !k.isDisabled && k.awsLoggingStatus !== "disabled";
increment(modelStats, `${family}__awsLogged`, countAsLogged ? 1 : 0);
break;
}
default:
assertNever(k.service);
}
increment(serviceStats, "tokens", sumTokens);
increment(serviceStats, "tokenCost", sumCost);
}
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const tokens = modelStats.get(`${family}__tokens`) || 0;
const cost = getTokenCostUsd(family, tokens);
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo = {
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
activeKeys: modelStats.get(`${family}__active`) || 0,
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
};
// Add service-specific stats to the info object.
if (config.checkKeys) {
const service = MODEL_FAMILY_SERVICE[family];
switch (service) {
case "openai":
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
// Delete trial/revoked keys for non-turbo families.
// Trials are turbo 99% of the time, and if a key is invalid we don't
// know what models it might have had assigned to it.
if (family !== "turbo") {
delete info.trialKeys;
delete info.revokedKeys;
}
break;
case "anthropic":
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.prefilledKeys = modelStats.get(`${family}__pozzed`) || 0;
break;
case "aws":
info.sonnetKeys = modelStats.get(`${family}__awsSonnet`) || 0;
info.haikuKeys = modelStats.get(`${family}__awsHaiku`) || 0;
const logged = modelStats.get(`${family}__awsLogged`) || 0;
if (logged > 0) {
info.privacy = config.allowAwsLogging
? `${logged} active keys are potentially logged.`
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
}
break;
}
}
// Add queue stats to the info object.
const queue = getQueueInformation(family);
info.proomptersInQueue = queue.proomptersInQueue;
info.estimatedQueueTime = queue.estimatedQueueTime;
return info;
}
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
function getQueueInformation(partition: ModelFamily) {
const waitMs = getEstimatedWaitTime(partition);
const waitTime =
waitMs < 60000
? `${Math.round(waitMs / 1000)}sec`
: `${Math.round(waitMs / 60000)}min, ${Math.round(
(waitMs % 60000) / 1000
)}sec`;
return {
proomptersInQueue: getQueueLength(partition),
estimatedQueueTime: waitMs > 2000 ? waitTime : "no wait",
};
}
+84
View File
@@ -0,0 +1,84 @@
import type { Request, Response } from "express";
import { z } from "zod";
import { APIFormat } from "../key-management";
import { AnthropicV1MessagesSchema } from "./kits/anthropic-chat/schema";
import { AnthropicV1TextSchema } from "./kits/anthropic-text/schema";
import { transformOpenAIToAnthropicText } from "./kits/anthropic-text/request-transformers";
import {
transformAnthropicTextToAnthropicChat,
transformOpenAIToAnthropicChat,
} from "./kits/anthropic-chat/request-transformers";
import { GoogleAIV1GenerateContentSchema } from "./kits/google-ai/schema";
import { transformOpenAIToGoogleAI } from "./kits/google-ai/request-transformers";
import { MistralAIV1ChatCompletionsSchema } from "./kits/mistral-ai/schema";
import { OpenAIV1ChatCompletionSchema } from "./kits/openai/schema";
import { OpenAIV1ImagesGenerationSchema } from "./kits/openai-image/schema";
import { transformOpenAIToOpenAIImage } from "./kits/openai-image/request-transformers";
import { OpenAIV1TextCompletionSchema } from "./kits/openai-text/schema";
import { transformOpenAIToOpenAIText } from "./kits/openai-text/request-transformers";
export type APIRequestTransformer<Z extends z.ZodType<any, any>> = (
req: Request
) => Promise<z.infer<Z>>;
export type APIResponseTransformer<Z extends z.ZodType<any, any>> = (
res: Response
) => Promise<z.infer<Z>>;
/** Represents a transformation from one API format to another. */
type APITransformation = `${APIFormat}->${APIFormat}`;
type APIRequestTransformerMap = {
[key in APITransformation]?: APIRequestTransformer<any>;
};
type APIResponseTransformerMap = {
[key in APITransformation]?: APIResponseTransformer<any>;
};
export const API_REQUEST_TRANSFORMERS: APIRequestTransformerMap = {
"anthropic-text->anthropic-chat": transformAnthropicTextToAnthropicChat,
"openai->anthropic-chat": transformOpenAIToAnthropicChat,
"openai->anthropic-text": transformOpenAIToAnthropicText,
"openai->openai-text": transformOpenAIToOpenAIText,
"openai->openai-image": transformOpenAIToOpenAIImage,
"openai->google-ai": transformOpenAIToGoogleAI,
};
export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
"anthropic-chat": AnthropicV1MessagesSchema,
"anthropic-text": AnthropicV1TextSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
};
export { AnthropicChatMessage } from "./kits/anthropic-chat/schema";
export { AnthropicV1MessagesSchema } from "./kits/anthropic-chat/schema";
export { AnthropicV1TextSchema } from "./kits/anthropic-text/schema";
export interface APIFormatKit<T extends APIFormat, P> {
name: T;
/** Zod schema for validating requests in this format. */
requestValidator: z.ZodSchema<any>;
/** Flattens non-sting prompts (such as message arrays) into a single string. */
promptStringifier: (prompt: P) => string;
/** Counts the number of tokens in a prompt. */
promptTokenCounter: (prompt: P, model: string) => Promise<number>;
/** Counts the number of tokens in a completion. */
completionTokenCounter: (
completion: string,
model: string
) => Promise<number>;
/** Functions which transform requests from other formats into this format. */
requestTransformers: APIRequestTransformerMap;
/** Functions which transform responses from this format into other formats. */
responseTransformers: APIResponseTransformerMap;
}
export { GoogleAIChatMessage } from "./kits/google-ai";
export { MistralAIChatMessage } from "./kits/mistral-ai";
export { OpenAIChatMessage } from "./kits/openai/schema";
export { flattenAnthropicMessages } from "./kits/anthropic-chat/stringifier";
+4
View File
@@ -0,0 +1,4 @@
# API Kits
This directory contains "kits" for each supported language model API. Each kit implements the `APIFormatKit` interface and provides functionality that the proxy application needs to be able to validate requests, transform prompts and responses, tokenize text, and so forth.
## Structure
@@ -0,0 +1,290 @@
import { AnthropicChatMessage, AnthropicV1MessagesSchema } from "./schema";
import { AnthropicV1TextSchema, APIRequestTransformer, OpenAIChatMessage } from "../../index";
import { BadRequestError } from "../../../errors";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
/**
* Represents the union of all content types without the `string` shorthand
* for `text` content.
*/
type AnthropicChatMessageContentWithoutString = Exclude<
AnthropicChatMessage["content"],
string
>;
/** Represents a message with all shorthand `string` content expanded. */
type ConvertedAnthropicChatMessage = AnthropicChatMessage & {
content: AnthropicChatMessageContentWithoutString;
};
export const transformOpenAIToAnthropicChat: APIRequestTransformer<
typeof AnthropicV1MessagesSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic Chat request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const { messages: newMessages, system } =
openAIMessagesToClaudeChatPrompt(messages);
return {
system,
messages: newMessages,
model: rest.model,
max_tokens: rest.max_tokens,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
stop_sequences: typeof rest.stop === "string" ? [rest.stop] : rest.stop,
...(rest.user ? { metadata: { user_id: rest.user } } : {}),
// Anthropic supports top_k, but OpenAI does not
// OpenAI supports frequency_penalty, presence_penalty, logit_bias, n, seed,
// and function calls, but Anthropic does not.
};
};
/**
* Converts an older Anthropic Text Completion prompt to the newer Messages API
* by splitting the flat text into messages.
*/
export const transformAnthropicTextToAnthropicChat: APIRequestTransformer<
typeof AnthropicV1MessagesSchema
> = async (req) => {
const { body } = req;
const result = AnthropicV1TextSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid Anthropic Text-to-Anthropic Chat request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
validateAnthropicTextPrompt(prompt);
// Iteratively slice the prompt into messages. Start from the beginning and
// look for the next `\n\nHuman:` or `\n\nAssistant:`. Anything before the
// first human message is a system message.
let index = prompt.indexOf("\n\nHuman:");
let remaining = prompt.slice(index);
const system = prompt.slice(0, index);
const messages: AnthropicChatMessage[] = [];
while (remaining) {
const isHuman = remaining.startsWith("\n\nHuman:");
// Multiple messages from the same role are not permitted in Messages API.
// We collect all messages until the next message from the opposite role.
const thisRole = isHuman ? "\n\nHuman:" : "\n\nAssistant:";
const nextRole = isHuman ? "\n\nAssistant:" : "\n\nHuman:";
const nextIndex = remaining.indexOf(nextRole);
// Collect text up to the next message, or the end of the prompt for the
// Assistant prefill if present.
const msg = remaining
.slice(0, nextIndex === -1 ? undefined : nextIndex)
.replace(thisRole, "")
.trimStart();
const role = isHuman ? "user" : "assistant";
messages.push({ role, content: msg });
remaining = remaining.slice(nextIndex);
if (nextIndex === -1) break;
}
// fix "messages: final assistant content cannot end with trailing whitespace"
const lastMessage = messages[messages.length - 1];
if (
lastMessage.role === "assistant" &&
typeof lastMessage.content === "string"
) {
messages[messages.length - 1].content = lastMessage.content.trimEnd();
}
return {
model,
system,
messages,
max_tokens: max_tokens_to_sample,
...rest,
};
};
function validateAnthropicTextPrompt(prompt: string) {
if (!prompt.includes("\n\nHuman:") || !prompt.includes("\n\nAssistant:")) {
throw new BadRequestError(
"Prompt must contain at least one human and one assistant message."
);
}
// First human message must be before first assistant message
const firstHuman = prompt.indexOf("\n\nHuman:");
const firstAssistant = prompt.indexOf("\n\nAssistant:");
if (firstAssistant < firstHuman) {
throw new BadRequestError(
"First Assistant message must come after the first Human message."
);
}
}
function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
messages: AnthropicChatMessage[];
system: string;
} {
// Similar formats, but Claude doesn't use `name` property and doesn't have
// a `system` role. Also, Claude does not allow consecutive messages from
// the same role, so we need to merge them.
// 1. Collect all system messages up to the first non-system message and set
// that as the `system` prompt.
// 2. Iterate through messages and:
// - If the message is from system, reassign it to assistant with System:
// prefix.
// - If message is from same role as previous, append it to the previous
// message rather than creating a new one.
// - Otherwise, create a new message and prefix with `name` if present.
// TODO: When a Claude message has multiple `text` contents, does the internal
// message flattening insert newlines between them? If not, we may need to
// do that here...
let firstNonSystem = -1;
const result: { messages: ConvertedAnthropicChatMessage[]; system: string } =
{ messages: [], system: "" };
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
const isSystem = isSystemOpenAIRole(msg.role);
if (firstNonSystem === -1 && isSystem) {
// Still merging initial system messages into the system prompt
result.system += getFirstTextContent(msg.content) + "\n";
continue;
}
if (firstNonSystem === -1 && !isSystem) {
// Encountered the first non-system message
firstNonSystem = i;
if (msg.role === "assistant") {
// There is an annoying rule that the first message must be from the user.
// This is commonly not the case with roleplay prompts that start with a
// block of system messages followed by an assistant message. We will try
// to reconcile this by splicing the last line of the system prompt into
// a beginning user message -- this is *commonly* ST's [Start a new chat]
// nudge, which works okay as a user message.
// Find the last non-empty line in the system prompt
const execResult = /(?:[^\r\n]*\r?\n)*([^\r\n]+)(?:\r?\n)*/d.exec(
result.system
);
let text = "";
if (execResult) {
text = execResult[1];
// Remove last line from system so it doesn't get duplicated
const [_, [lastLineStart]] = execResult.indices || [];
result.system = result.system.slice(0, lastLineStart);
} else {
// This is a bad prompt; there's no system content to move to user and
// it starts with assistant. We don't have any good options.
text = "[ Joining chat... ]";
}
result.messages.push({
role: "user",
content: [{ type: "text", text }],
});
}
}
const last = result.messages[result.messages.length - 1];
// I have to handle tools as system messages to be exhaustive here but the
// experience will be bad.
const role = isSystemOpenAIRole(msg.role) ? "assistant" : msg.role;
// Here we will lose the original name if it was a system message, but that
// is generally okay because the system message is usually a prompt and not
// a character in the chat.
const name = msg.role === "system" ? "System" : msg.name?.trim();
const content = convertOpenAIContent(msg.content);
// Prepend the display name to the first text content in the current message
// if it exists. We don't need to add the name to every content block.
if (name?.length) {
const firstTextContent = content.find((c) => c.type === "text");
if (firstTextContent && "text" in firstTextContent) {
// This mutates the element in `content`.
firstTextContent.text = `${name}: ${firstTextContent.text}`;
}
}
// Merge messages if necessary. If two assistant roles are consecutive but
// had different names, the final converted assistant message will have
// multiple characters in it, but the name prefixes should assist the model
// in differentiating between speakers.
if (last && last.role === role) {
last.content.push(...content);
} else {
result.messages.push({ role, content });
}
}
result.system = result.system.trimEnd();
return result;
}
function isSystemOpenAIRole(
role: OpenAIChatMessage["role"]
): role is "system" | "function" | "tool" {
return ["system", "function", "tool"].includes(role);
}
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
if (typeof content === "string") return content;
for (const c of content) {
if ("text" in c) return c.text;
}
return "[ No text content in this message ]";
}
function convertOpenAIContent(
content: OpenAIChatMessage["content"]
): AnthropicChatMessageContentWithoutString {
if (typeof content === "string") {
return [{ type: "text", text: content.trimEnd() }];
}
return content.map((c) => {
if ("text" in c) {
return { type: "text", text: c.text.trimEnd() };
} else if ("image_url" in c) {
const url = c.image_url.url;
try {
const mimeType = url.split(";")[0].split(":")[1];
const data = url.split(",")[1];
return {
type: "image",
source: { type: "base64", media_type: mimeType, data },
};
} catch (e) {
return {
type: "text",
text: `[ Unsupported image URL: ${url.slice(0, 200)} ]`,
};
}
} else {
const type = String((c as any)?.type);
return { type: "text", text: `[ Unsupported content type: ${type} ]` };
}
});
}
@@ -0,0 +1,52 @@
import { z } from "zod";
import { config } from "../../../../config";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
export const AnthropicV1BaseSchema = z
.object({
model: z.string().max(100),
stop_sequences: z.array(z.string().max(500)).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.object({ user_id: z.string().optional() }).optional(),
})
.strip();
const AnthropicV1MessageMultimodalContentSchema = z.array(
z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
}),
])
);
// https://docs.anthropic.com/claude/reference/messages_post
export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
z.object({
messages: z.array(
z.object({
role: z.enum(["user", "assistant"]),
content: z.union([
z.string(),
AnthropicV1MessageMultimodalContentSchema,
]),
})
),
max_tokens: z
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
system: z.string().optional(),
})
);
export type AnthropicChatMessage = z.infer<
typeof AnthropicV1MessagesSchema
>["messages"][0];
@@ -0,0 +1,21 @@
import { AnthropicChatMessage } from "./schema";
export function flattenAnthropicMessages(
messages: AnthropicChatMessage[]
): string {
return messages
.map((msg) => {
const name = msg.role === "user" ? "\n\nHuman: " : "\n\nAssistant: ";
const parts = Array.isArray(msg.content)
? msg.content
: [{ type: "text", text: msg.content }];
return `${name}: ${parts
.map((part) =>
part.type === "text"
? part.text
: `[Omitted multimodal content of type ${part.type}]`
)
.join("\n")}`;
})
.join("\n\n");
}
@@ -0,0 +1,73 @@
import {
AnthropicV1TextSchema,
APIRequestTransformer,
OpenAIChatMessage,
} from "../../index";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
import { flattenOpenAIMessageContent } from "../openai/stringifier";
export const transformOpenAIToAnthropicText: APIRequestTransformer<
typeof AnthropicV1TextSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic Text request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudeTextPrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
model: rest.model,
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
};
};
function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
const name = m.name?.trim();
const content = flattenOpenAIMessageContent(m.content);
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${name ? `(as ${name}) ` : ""}${content}`;
})
.join("") + "\n\nAssistant:"
);
}
@@ -0,0 +1,16 @@
import { z } from "zod";
import { AnthropicV1BaseSchema } from "../anthropic-chat/schema";
import { config } from "../../../../config";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
// https://docs.anthropic.com/claude/reference/complete_post [deprecated]
export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
z.object({
prompt: z.string(),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
})
);
@@ -0,0 +1 @@
export { GoogleAIChatMessage } from "./schema";
@@ -0,0 +1,92 @@
import { APIRequestTransformer, GoogleAIChatMessage } from "../../index";
import { GoogleAIV1GenerateContentSchema } from "./schema";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
import { flattenOpenAIMessageContent } from "../openai/stringifier";
export const transformOpenAIToGoogleAI: APIRequestTransformer<
typeof GoogleAIV1GenerateContentSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse({
...body,
model: "gpt-3.5-turbo",
});
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Google AI request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const foundNames = new Set<string>();
const contents = messages
.map((m) => {
const role = m.role === "assistant" ? "model" : "user";
// Detects character names so we can set stop sequences for them as Gemini
// is prone to continuing as the next character.
// If names are not available, we'll still try to prefix the message
// with generic names so we can set stops for them but they don't work
// as well as real names.
const text = flattenOpenAIMessageContent(m.content);
const propName = m.name?.trim();
const textName =
m.role === "system" ? "" : text.match(/^(.{0,50}?): /)?.[1]?.trim();
const name =
propName || textName || (role === "model" ? "Character" : "User");
foundNames.add(name);
// Prefixing messages with their character name seems to help avoid
// Gemini trying to continue as the next character, or at the very least
// ensures it will hit the stop sequence. Otherwise it will start a new
// paragraph and switch perspectives.
// The response will be very likely to include this prefix so frontends
// will need to strip it out.
const textPrefix = textName ? "" : `${name}: `;
return {
parts: [{ text: textPrefix + text }],
role: m.role === "assistant" ? ("model" as const) : ("user" as const),
};
})
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
const last = acc[acc.length - 1];
if (last?.role === msg.role) {
last.parts[0].text += "\n\n" + msg.parts[0].text;
} else {
acc.push(msg);
}
return acc;
}, []);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
stops = [...new Set(stops)].slice(0, 5);
return {
model: "gemini-pro",
stream: rest.stream,
contents,
tools: [],
generationConfig: {
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
topP: rest.top_p,
topK: 40, // openai schema doesn't have this, google ai defaults to 40
temperature: rest.temperature,
},
safetySettings: [
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
],
};
};
@@ -0,0 +1,34 @@
import { z } from "zod";
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
export const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
contents: z.array(
z.object({
parts: z.array(z.object({ text: z.string() })),
role: z.enum(["user", "model"]),
})
),
tools: z.array(z.object({})).max(0).optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
generationConfig: z.object({
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
}),
})
.strip();
export type GoogleAIChatMessage = z.infer<
typeof GoogleAIV1GenerateContentSchema
>["contents"][0];
@@ -0,0 +1 @@
export { MistralAIChatMessage } from "./schema";
@@ -0,0 +1,35 @@
import { MistralAIChatMessage } from "./schema";
export function fixMistralPrompt(
messages: MistralAIChatMessage[]
): MistralAIChatMessage[] {
// Mistral uses OpenAI format but has some additional requirements:
// - Only one system message per request, and it must be the first message if
// present.
// - Final message must be a user message.
// - Cannot have multiple messages from the same role in a row.
// While frontends should be able to handle this, we can fix it here in the
// meantime.
return messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
if (acc.length === 0) {
acc.push(msg);
return acc;
}
const copy = { ...msg };
// Reattribute subsequent system messages to the user
if (msg.role === "system") {
copy.role = "user";
}
// Consolidate multiple messages from the same role
const last = acc[acc.length - 1];
if (last.role === copy.role) {
last.content += "\n\n" + copy.content;
} else {
acc.push(copy);
}
return acc;
}, []);
}
@@ -0,0 +1,28 @@
// https://docs.mistral.ai/api#operation/createChatCompletion
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "../openai/schema";
export const MistralAIV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
})
),
temperature: z.number().optional().default(0.7),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
safe_prompt: z.boolean().optional().default(false),
random_seed: z.number().int().optional(),
});
export type MistralAIChatMessage = z.infer<
typeof MistralAIV1ChatCompletionsSchema
>["messages"][0];

Some files were not shown because too many files have changed in this diff Show More