Compare commits
316 Commits
sqlite-users
..
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 9c0a4fd3a7 | |||
| bc85a71c2a | |||
| 1604246cf1 | |||
| 82028d14b7 | |||
| f23315d233 | |||
| 1bf6d6ac99 | |||
| 09ce6a70d2 | |||
| 0f8581d340 | |||
| e8c5d06cd7 | |||
| 20c9920199 | |||
| 253a2af13f | |||
| 2af4a02b15 | |||
| c8dab8786a | |||
| 9cc86c2d68 | |||
| e974da8a58 | |||
| f114469057 | |||
| 6e02db4bd7 | |||
| 1f9af4374d | |||
| 79a7dee586 | |||
| e1bd960bb7 | |||
| 867fda430b | |||
| bbd2b88503 | |||
| 08400db220 | |||
| 5249e1c904 | |||
| c18df6a546 | |||
| ceedb52478 | |||
| fa13d06f45 | |||
| 0c0dc09020 | |||
| 317ef03ab4 | |||
| 7def7c17e4 | |||
| e201c2cf5e | |||
| edbbf056a0 | |||
| 4c214305af | |||
| cb8f2669ac | |||
| ed737e43a5 | |||
| 2bc1a7dbea | |||
| 5ad22145a0 | |||
| b7ad5f1dae | |||
| 2405be71c1 | |||
| aec3927c94 | |||
| ec82599e24 | |||
| 21294abd8e | |||
| ca4a1f3252 | |||
| e0270f99ee | |||
| 38e2980419 | |||
| 0102c7a6a5 | |||
| b89439287e | |||
| 508bb3e08b | |||
| a17d087928 | |||
| 3f32a9b14d | |||
| 3e11b0bf49 | |||
| 64d26c5c6c | |||
| 41bc4998fc | |||
| 4e3fb9d152 | |||
| 8c98fca56d | |||
| 2389b30e68 | |||
| c066a7d46b | |||
| 7b3cf409e4 | |||
| 74cbafbb3b | |||
| 0411b4c3a6 | |||
| 5988cd7e45 | |||
| f80873ef8a | |||
| 45c0b99f20 | |||
| 692da2b457 | |||
| 32bc797216 | |||
| 1a6ce7ea04 | |||
| fdba7cd7e4 | |||
| 64d2f78526 | |||
| 566d42da07 | |||
| 74bb88daa3 | |||
| 2ea5fdf902 | |||
| d5ec6fe1f9 | |||
| ce9c8ec8b6 | |||
| 29323fd7bf | |||
| af162e567a | |||
| 87c6dd90cb | |||
| 1d8b13ba70 | |||
| 8344fd2e2a | |||
| 8c30088383 | |||
| dde0183d7d | |||
| d64edbb3b7 | |||
| 5f0b5cc4e5 | |||
| 99269b7cd6 | |||
| 6870a36a6e | |||
| 45535de6ae | |||
| 8ea6fe463b | |||
| 4fd5d08ed8 | |||
| 4496afe7a1 | |||
| cc0ece32d0 | |||
| be8accbc37 | |||
| 1d6f3dbf10 | |||
| f2b55ebabb | |||
| 6374bfdee1 | |||
| eb66f6b149 | |||
| 551a13498b | |||
| 780b885aeb | |||
| d9645025c9 | |||
| c1cb395020 | |||
| 80d09f470b | |||
| 44338652fd | |||
| 8ef272f8b3 | |||
| 9c804c0560 | |||
| 2dc7fda2dd | |||
| 68b199e712 | |||
| 1b110d3269 | |||
| abfde6f684 | |||
| d2d6ff3d52 | |||
| a5eda7685b | |||
| cbca37dd77 | |||
| fc55518cd1 | |||
| 925a81de43 | |||
| 989bfc0ca3 | |||
| a1c04234ab | |||
| dc0e7498e8 | |||
| 6628498d5e | |||
| 31f9b4d536 | |||
| afe6ad8ac9 | |||
| a16d66a45b | |||
| 465b13e5fb | |||
| 6c8b19651d | |||
| d3292d8a76 | |||
| dab5c1bbf0 | |||
| 2ffce3eff8 | |||
| 8197192223 | |||
| 7e6857fcf5 | |||
| 719cbc3cfa | |||
| 3beea5dcfc | |||
| 9213b7088b | |||
| 86ed19af99 | |||
| bb75cc668c | |||
| a6d095dcda | |||
| 588aaae5d9 | |||
| 6eec7ff7e6 | |||
| 272b812db3 | |||
| 0bcc0c1037 | |||
| af58d25fb5 | |||
| 15dc2514ee | |||
| d650038f7e | |||
| 6efe09b62e | |||
| 14a1203be7 | |||
| 1e8f55f96d | |||
| 2f8538519b | |||
| 1b7ce423a6 | |||
| 799a73655c | |||
| 96645ba529 | |||
| de631d3d91 | |||
| bf2c0dd3d9 | |||
| 2415be7c51 | |||
| 4c9a3678ae | |||
| 19df23f342 | |||
| 85fafb8edb | |||
| 5eb4858c69 | |||
| 8081d9516d | |||
| 5473ef903e | |||
| 568288c180 | |||
| 65f4e14d3b | |||
| 6479cefe07 | |||
| 94e2c907b5 | |||
| af53fc9913 | |||
| e6cc393296 | |||
| a9811c2886 | |||
| 64e07a0429 | |||
| 83676caa8b | |||
| a76f8a3c87 | |||
| ecae252df4 | |||
| d951989a57 | |||
| 9deafb445b | |||
| ee1d8ab1a2 | |||
| c2bfcdc744 | |||
| 24b6a090d8 | |||
| 758ccbf23b | |||
| 4ad3c217a4 | |||
| ab1fb89ab9 | |||
| ac79935205 | |||
| 2b7c901951 | |||
| ad13928383 | |||
| a3869c2d67 | |||
| 6ebc2f5126 | |||
| d551f86020 | |||
| 7cfaf5777e | |||
| 4f6ef38222 | |||
| d21b232a8e | |||
| 72c9516679 | |||
| fcaad65ccb | |||
| b3d4650275 | |||
| 70c7f2aae9 | |||
| aecc934fad | |||
| a8d36f832e | |||
| c1db122016 | |||
| e9bd6127a4 | |||
| e230e9acec | |||
| 239f95e8a1 | |||
| 17475447a0 | |||
| d2b37b8455 | |||
| cec66cdc44 | |||
| a5c9e95929 | |||
| c5d4fe44e6 | |||
| 8ed883eaff | |||
| 6de338c6ac | |||
| 45576db441 | |||
| bcc83f30d9 | |||
| e5a26215e1 | |||
| cd6cc76a46 | |||
| 613bb789fb | |||
| f1c698388e | |||
| 75605a2bfb | |||
| 58e67d40e2 | |||
| 796b4eee47 | |||
| 0f482e67d2 | |||
| 496ec09905 | |||
| f522dba6a3 | |||
| 25ba8447d9 | |||
| 91b8c01a9d | |||
| 82b88764ba | |||
| 6ea9235ff8 | |||
| 372ad85283 | |||
| c2f5d2fbf3 | |||
| c264413495 | |||
| 8d27082ad0 | |||
| e2b602fd52 | |||
| b00fb88cab | |||
| 1cc281f6fe | |||
| 8f4d00ed26 | |||
| 36e2430a8f | |||
| 28447d0811 | |||
| 6d54cbc785 | |||
| 9d7a4f4b51 | |||
| 3496a2a9bd | |||
| 5072638ec2 | |||
| 8a325a1e0b | |||
| 5eeb2875b4 | |||
| c67dad1617 | |||
| fe61745e24 | |||
| 251ea6d412 | |||
| 55f7337ea4 | |||
| f3b876887e | |||
| 49c578f4dc | |||
| 4190d5fef6 | |||
| 1644e82f25 | |||
| 0bbdc0b841 | |||
| c4a633a5d6 | |||
| 0c6ec3254f | |||
| 13aa55cd3d | |||
| ba4532b38d | |||
| b57627e69b | |||
| 536803853a | |||
| ad0a3c0936 | |||
| 161f5aba3e | |||
| 514d1b7e31 | |||
| 22d7f966c6 | |||
| cfb6353c65 | |||
| a7fed3136e | |||
| 29638cf26e | |||
| ee26e7be65 | |||
| ff0d3dfdcd | |||
| 81a3ae1746 | |||
| 4dfd57fcb4 | |||
| d21e274358 | |||
| 6e97e036b2 | |||
| 7a4a16dd2f | |||
| f1cfa644c5 | |||
| 6a908b09cb | |||
| 86772ab32a | |||
| bd87ca60f7 | |||
| ac1897fd17 | |||
| 2a6f85e2e2 | |||
| ffcaa23511 | |||
| 1d5b8efa23 | |||
| 905273abf2 | |||
| ac92a19946 | |||
| 96fe974ad0 | |||
| 578615fbd2 | |||
| 5dc4050e52 | |||
| cf615ee62c | |||
| ee61f9be2b | |||
| 0c448cb59d | |||
| 51a9ccceb2 | |||
| ce490efd7d | |||
| 5000e59a61 | |||
| d54acad6ad | |||
| 5e1fffe07d | |||
| f7fd5f00f2 | |||
| 6d323f6ea1 | |||
| 2959ed3f7f | |||
| b58e7cb830 | |||
| f531272b00 | |||
| 6c45c92ea0 | |||
| b7cd326d2a | |||
| 6c9f302fb9 | |||
| 9ab1e7d0ce | |||
| 81f8dc2613 | |||
| 0c936e97fe | |||
| 29ed07492e | |||
| 2f7315379c | |||
| e91532f4f7 | |||
| ca58770458 | |||
| 9a3cca6b80 | |||
| 584bb3fbc7 | |||
| 2aa19e5b09 | |||
| f242777596 | |||
| edc0d094e2 | |||
| 994b30dcce | |||
| e3d1ab51d1 | |||
| ff38eda066 | |||
| 84b917f726 | |||
| 5871025245 | |||
| b4fb97ca5c | |||
| eb700d3da6 | |||
| d706d4c59d | |||
| 0ea43f61c2 | |||
| ca4321b4cb | |||
| 7660ed8b94 | |||
| 55f1bbed3b | |||
| 57fd17ede0 | |||
| 9d00b8a9de | |||
| 155e185c6e |
+111
-55
@@ -8,12 +8,32 @@
|
|||||||
# Use production mode unless you are developing locally.
|
# Use production mode unless you are developing locally.
|
||||||
NODE_ENV=production
|
NODE_ENV=production
|
||||||
|
|
||||||
|
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# General settings:
|
# General settings:
|
||||||
|
|
||||||
# The title displayed on the info page.
|
# The title displayed on the info page.
|
||||||
# SERVER_TITLE=Coom Tunnel
|
# SERVER_TITLE=Coom Tunnel
|
||||||
|
|
||||||
|
# URL for the image displayed on the login page.
|
||||||
|
# If not set, no image will be displayed.
|
||||||
|
# LOGIN_IMAGE_URL=https://example.com/your-logo.png
|
||||||
|
|
||||||
|
# Whether to enable the token-based or password-based login for the main info page.
|
||||||
|
# Defaults to true. Set to false to disable login and make the info page public.
|
||||||
|
# ENABLE_INFO_PAGE_LOGIN=true
|
||||||
|
|
||||||
|
# Authentication mode for the service info page. (token | password)
|
||||||
|
# If 'token', any valid user token is used (requires GATEKEEPER='user_token' mode).
|
||||||
|
# If 'password', SERVICE_INFO_PASSWORD is used.
|
||||||
|
# Defaults to 'token' if ENABLE_INFO_PAGE_LOGIN is true.
|
||||||
|
# SERVICE_INFO_AUTH_MODE=token
|
||||||
|
|
||||||
|
# Password for the service info page if SERVICE_INFO_AUTH_MODE is 'password'.
|
||||||
|
# SERVICE_INFO_PASSWORD=your-service-info-password
|
||||||
|
|
||||||
# The route name used to proxy requests to APIs, relative to the Web site root.
|
# The route name used to proxy requests to APIs, relative to the Web site root.
|
||||||
# PROXY_ENDPOINT_ROUTE=/proxy
|
# PROXY_ENDPOINT_ROUTE=/proxy
|
||||||
|
|
||||||
@@ -24,27 +44,75 @@ NODE_ENV=production
|
|||||||
|
|
||||||
# Max number of context tokens a user can request at once.
|
# Max number of context tokens a user can request at once.
|
||||||
# Increase this if your proxy allow GPT 32k or 128k context
|
# Increase this if your proxy allow GPT 32k or 128k context
|
||||||
# MAX_CONTEXT_TOKENS_OPENAI=16384
|
# MAX_CONTEXT_TOKENS_OPENAI=32768
|
||||||
|
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
|
||||||
|
|
||||||
# Max number of output tokens a user can request at once.
|
# Max number of output tokens a user can request at once.
|
||||||
# MAX_OUTPUT_TOKENS_OPENAI=400
|
# MAX_OUTPUT_TOKENS_OPENAI=1024
|
||||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
|
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
|
||||||
|
|
||||||
# Whether to show the estimated cost of consumed tokens on the info page.
|
# Whether to show the estimated cost of consumed tokens on the info page.
|
||||||
# SHOW_TOKEN_COSTS=false
|
# SHOW_TOKEN_COSTS=false
|
||||||
|
|
||||||
# Whether to automatically check API keys for validity.
|
# Whether to automatically check API keys for validity.
|
||||||
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
|
# Disabled by default in local development mode, but enabled in production.
|
||||||
# by default in production mode.
|
|
||||||
# CHECK_KEYS=true
|
# CHECK_KEYS=true
|
||||||
|
|
||||||
# Which model types users are allowed to access.
|
# Which model types users are allowed to access.
|
||||||
# The following model families are recognized:
|
# The following model families are recognized:
|
||||||
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | aws-claude-opus | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-dall-e
|
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
|
||||||
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
|
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
|
||||||
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
|
# | mistral-small | mistral-medium | mistral-large | aws-claude |
|
||||||
# 'azure-dall-e' to the list of allowed model families.
|
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
|
||||||
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
|
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
|
||||||
|
# | azure-gpt45 | azure-o1-mini | azure-o3-mini | deepseek | xai | o3 | o4-mini | gpt41 | gpt41-mini | gpt41-nano
|
||||||
|
# By default, all models are allowed
|
||||||
|
# To dissalow any, uncomment line below and edit
|
||||||
|
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt45,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o,azure-gpt45,azure-o1-mini,azure-o3-mini,deepseek
|
||||||
|
|
||||||
|
# Which services can be used to process prompts containing images via multimodal
|
||||||
|
# models. The following services are recognized:
|
||||||
|
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai | xai
|
||||||
|
# Do not enable this feature unless all users are trusted, as you will be liable
|
||||||
|
# for any user-submitted images containing illegal content.
|
||||||
|
# By default, no image services are allowed and image prompts are rejected.
|
||||||
|
# ALLOWED_VISION_SERVICES=
|
||||||
|
|
||||||
|
# Whether prompts should be logged to Google Sheets.
|
||||||
|
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||||
|
# PROMPT_LOGGING=false
|
||||||
|
|
||||||
|
# Specifies the number of proxies or load balancers in front of the server.
|
||||||
|
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
||||||
|
# For any other deployments, please see config.ts as the correct configuration
|
||||||
|
# depends on your setup. Misconfiguring this value can result in problems
|
||||||
|
# accurately tracking IP addresses and enforcing rate limits.
|
||||||
|
# TRUSTED_PROXIES=1
|
||||||
|
|
||||||
|
# Whether cookies should be set without the Secure flag, for hosts that don't
|
||||||
|
# support SSL. True by default in development, false in production.
|
||||||
|
# USE_INSECURE_COOKIES=false
|
||||||
|
|
||||||
|
# Reorganizes requests in the queue according to their token count, placing
|
||||||
|
# larger prompts further back. The penalty is determined by (promptTokens *
|
||||||
|
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
|
||||||
|
# When there is no queue or it is very short, the effect is negligible (this
|
||||||
|
# setting only reorders the queue, it does not artificially delay requests).
|
||||||
|
# TOKENS_PUNISHMENT_FACTOR=0.0
|
||||||
|
|
||||||
|
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
||||||
|
# CAPTCHA_MODE=none
|
||||||
|
# POW_TOKEN_HOURS=24
|
||||||
|
# POW_TOKEN_MAX_IPS=2
|
||||||
|
# POW_DIFFICULTY_LEVEL=low
|
||||||
|
# POW_CHALLENGE_TIMEOUT=30
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------------------
|
||||||
|
# Blocking settings:
|
||||||
|
# Allows blocking requests depending on content, referers, or IP addresses.
|
||||||
|
# This is a convenience feature; if you need more robust functionality it is
|
||||||
|
# highly recommended to put this application behind nginx or Cloudflare, as they
|
||||||
|
# will have better performance.
|
||||||
|
|
||||||
# IP addresses or CIDR blocks from which requests will be blocked.
|
# IP addresses or CIDR blocks from which requests will be blocked.
|
||||||
# IP_BLACKLIST=10.0.0.1/24
|
# IP_BLACKLIST=10.0.0.1/24
|
||||||
@@ -54,34 +122,12 @@ NODE_ENV=production
|
|||||||
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
||||||
# Destination to redirect blocked requests to.
|
# Destination to redirect blocked requests to.
|
||||||
# BLOCK_REDIRECT="https://roblox.com/"
|
# BLOCK_REDIRECT="https://roblox.com/"
|
||||||
|
# Comma-separated list of phrases that will be rejected. Surround phrases with
|
||||||
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
|
# quotes if they contain commas. You can use regular expression tokens.
|
||||||
# Surround phrases with quotes if they contain commas.
|
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
|
||||||
# Avoid short or common phrases as this tests the entire prompt.
|
|
||||||
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
|
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
|
||||||
# Message to show when requests are rejected.
|
# Message to show when requests are rejected.
|
||||||
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
|
# REJECT_MESSAGE="You can't say that here."
|
||||||
|
|
||||||
# Whether prompts should be logged to Google Sheets.
|
|
||||||
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
|
||||||
# PROMPT_LOGGING=false
|
|
||||||
|
|
||||||
# The port and network interface to listen on.
|
|
||||||
# PORT=7860
|
|
||||||
# BIND_ADDRESS=0.0.0.0
|
|
||||||
|
|
||||||
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
|
|
||||||
# USE_INSECURE_COOKIES=false
|
|
||||||
|
|
||||||
# Detail level of logging. (trace | debug | info | warn | error)
|
|
||||||
# LOG_LEVEL=info
|
|
||||||
|
|
||||||
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
|
||||||
# CAPTCHA_MODE=none
|
|
||||||
# POW_TOKEN_HOURS=24
|
|
||||||
# POW_TOKEN_MAX_IPS=2
|
|
||||||
# POW_DIFFICULTY_LEVEL=low
|
|
||||||
# POW_CHALLENGE_TIMEOUT=30
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Optional settings for user management, access control, and quota enforcement:
|
# Optional settings for user management, access control, and quota enforcement:
|
||||||
@@ -90,8 +136,11 @@ NODE_ENV=production
|
|||||||
|
|
||||||
# Which access control method to use. (none | proxy_key | user_token)
|
# Which access control method to use. (none | proxy_key | user_token)
|
||||||
# GATEKEEPER=none
|
# GATEKEEPER=none
|
||||||
# Which persistence method to use. (memory | firebase_rtdb)
|
# Which persistence method to use. (memory | firebase_rtdb | sqlite)
|
||||||
# GATEKEEPER_STORE=memory
|
# GATEKEEPER_STORE=memory
|
||||||
|
# If using sqlite store, path to the SQLite database file for user data.
|
||||||
|
# Defaults to data/user-store.sqlite in the project directory.
|
||||||
|
# SQLITE_USER_STORE_PATH=data/user-store.sqlite3
|
||||||
|
|
||||||
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
|
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
|
||||||
# MAX_IPS_PER_USER=0
|
# MAX_IPS_PER_USER=0
|
||||||
@@ -102,29 +151,34 @@ NODE_ENV=production
|
|||||||
# ALLOW_NICKNAME_CHANGES=true
|
# ALLOW_NICKNAME_CHANGES=true
|
||||||
|
|
||||||
# Default token quotas for each model family. (0 for unlimited)
|
# Default token quotas for each model family. (0 for unlimited)
|
||||||
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
|
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
|
||||||
# which is similar to the cost of GPT-4 Turbo.
|
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
|
||||||
|
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
|
||||||
|
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
|
||||||
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
|
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
|
||||||
# See `docs/dall-e-configuration.md` for more information.
|
# See `docs/dall-e-configuration.md` for more information.
|
||||||
# TOKEN_QUOTA_TURBO=0
|
|
||||||
# TOKEN_QUOTA_GPT4=0
|
|
||||||
# TOKEN_QUOTA_GPT4_32K=0
|
|
||||||
# TOKEN_QUOTA_GPT4_TURBO=0
|
|
||||||
# TOKEN_QUOTA_DALL_E=0
|
# TOKEN_QUOTA_DALL_E=0
|
||||||
# TOKEN_QUOTA_CLAUDE=0
|
|
||||||
# TOKEN_QUOTA_GEMINI_PRO=0
|
|
||||||
# TOKEN_QUOTA_AWS_CLAUDE=0
|
|
||||||
|
|
||||||
# How often to refresh token quotas. (hourly | daily)
|
# How often to refresh token quotas. (hourly | daily)
|
||||||
# Leave unset to never automatically refresh quotas.
|
# Leave unset to never automatically refresh quotas.
|
||||||
# QUOTA_REFRESH_PERIOD=daily
|
# QUOTA_REFRESH_PERIOD=daily
|
||||||
|
|
||||||
# Specifies the number of proxies or load balancers in front of the server.
|
# -------------------------------------------------------------------------------
|
||||||
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
# HTTP agent settings:
|
||||||
# For any other deployments, please see config.ts as the correct configuration
|
# If you need to change how the proxy makes requests to other servers, such
|
||||||
# depends on your setup. Misconfiguring this value can result in problems
|
# as when checking keys or forwarding users' requests to external services,
|
||||||
# accurately tracking IP addresses and enforcing rate limits.
|
# you can configure an alternative HTTP agent. Otherwise the default OS settings
|
||||||
# TRUSTED_PROXIES=1
|
# will be used.
|
||||||
|
|
||||||
|
# The name of the network interface to use. The first external IPv4 address
|
||||||
|
# belonging to this interface will be used for outgoing requests.
|
||||||
|
# HTTP_AGENT_INTERFACE=enp0s3
|
||||||
|
|
||||||
|
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
|
||||||
|
# Note that if your proxy server issues a self-signed certificate, you may need
|
||||||
|
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
|
||||||
|
# that variable in your environment, not in this file.
|
||||||
|
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Secrets and keys:
|
# Secrets and keys:
|
||||||
@@ -133,23 +187,25 @@ NODE_ENV=production
|
|||||||
|
|
||||||
# You can add multiple API keys by separating them with a comma.
|
# You can add multiple API keys by separating them with a comma.
|
||||||
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
|
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
|
||||||
|
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
|
||||||
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
|
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
|
||||||
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
|
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
|
||||||
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
|
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
|
||||||
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
|
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
|
||||||
|
GCP_CREDENTIALS=project-id:client-email:region:private-key
|
||||||
|
|
||||||
# With proxy_key gatekeeper, the password users must provide to access the API.
|
# With proxy_key gatekeeper, the password users must provide to access the API.
|
||||||
# PROXY_KEY=your-secret-key
|
# PROXY_KEY=your-secret-key
|
||||||
|
|
||||||
# With user_token gatekeeper, the admin password used to manage users.
|
# With user_token gatekeeper, the admin password used to manage users.
|
||||||
# ADMIN_KEY=your-very-secret-key
|
# ADMIN_KEY=your-very-secret-key
|
||||||
# To restrict access to the admin interface to specific IP addresses, set the
|
# Restrict access to the admin interface to specific IP addresses, specified
|
||||||
# ADMIN_WHITELIST environment variable to a comma-separated list of CIDR blocks.
|
# as a comma-separated list of CIDR ranges.
|
||||||
# ADMIN_WHITELIST=0.0.0.0/0
|
# ADMIN_WHITELIST=0.0.0.0/0
|
||||||
|
|
||||||
|
|
||||||
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
|
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
|
||||||
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
||||||
|
|||||||
+1
-1
@@ -7,5 +7,5 @@
|
|||||||
build
|
build
|
||||||
greeting.md
|
greeting.md
|
||||||
node_modules
|
node_modules
|
||||||
|
.windsurfrules
|
||||||
http-client.private.env.json
|
http-client.private.env.json
|
||||||
|
|||||||
@@ -0,0 +1,33 @@
|
|||||||
|
You are a Senior Full Stack Developer and an Expert in ReactJS, NextJS, JavaScript, TypeScript, HTML, CSS and modern UI/UX frameworks (e.g., TailwindCSS, Shadcn, Radix). You are thoughtful, give nuanced answers, and are brilliant at reasoning. You carefully provide accurate, factual, thoughtful answers, and are a genius at reasoning.
|
||||||
|
|
||||||
|
- Follow the user’s requirements carefully & to the letter.
|
||||||
|
- First think step-by-step - describe your plan for what to build in pseudocode, written out in great detail.
|
||||||
|
- Confirm, then write code!
|
||||||
|
- Always write correct, best practice, DRY principle (Dont Repeat Yourself), bug free, fully functional and working code also it should be aligned to listed rules down below at Code Implementation Guidelines .
|
||||||
|
- Focus on easy and readability code, over being performant.
|
||||||
|
- Fully implement all requested functionality.
|
||||||
|
- Leave NO todo’s, placeholders or missing pieces.
|
||||||
|
- Ensure code is complete! Verify thoroughly finalised.
|
||||||
|
- Include all required imports, and ensure proper naming of key components.
|
||||||
|
- Be concise Minimize any other prose.
|
||||||
|
- If you think there might not be a correct answer, you say so.
|
||||||
|
- If you do not know the answer, say so, instead of guessing.
|
||||||
|
|
||||||
|
### Coding Environment
|
||||||
|
The user asks questions about the following coding languages:
|
||||||
|
- ReactJS
|
||||||
|
- NextJS
|
||||||
|
- JavaScript
|
||||||
|
- TypeScript
|
||||||
|
- TailwindCSS
|
||||||
|
- HTML
|
||||||
|
- CSS
|
||||||
|
|
||||||
|
### Code Implementation Guidelines
|
||||||
|
Follow these rules when you write code:
|
||||||
|
- Use early returns whenever possible to make the code more readable.
|
||||||
|
- Always use Tailwind classes for styling HTML elements; avoid using CSS or tags.
|
||||||
|
- Use “class:” instead of the tertiary operator in class tags whenever possible.
|
||||||
|
- Use descriptive variable and function/const names. Also, event functions should be named with a “handle” prefix, like “handleClick” for onClick and “handleKeyDown” for onKeyDown.
|
||||||
|
- Implement accessibility features on elements. For example, a tag should have a tabindex=“0”, aria-label, on:click, and on:keydown, and similar attributes.
|
||||||
|
- Use consts instead of functions, for example, “const toggle = () =>”. Also, define a type if possible.
|
||||||
@@ -0,0 +1,321 @@
|
|||||||
|
# Project Codebase Guide
|
||||||
|
|
||||||
|
This document serves as a guide and index for the project codebase, designed to help developers and AI agents quickly understand its structure, components, and how to contribute.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Project Overview](#project-overview)
|
||||||
|
2. [Directory Structure](#directory-structure)
|
||||||
|
3. [Core Components](#core-components)
|
||||||
|
* [Configuration (`src/config.ts`)](#configuration)
|
||||||
|
* [Server Entry Point (`src/server.ts`)](#server-entry-point)
|
||||||
|
* [Proxy Layer (`src/proxy/`)](#proxy-layer)
|
||||||
|
* [User Management (`src/user/`)](#user-management)
|
||||||
|
* [Admin Interface (`src/admin/`)](#admin-interface)
|
||||||
|
* [Shared Utilities (`src/shared/`)](#shared-utilities)
|
||||||
|
4. [Proxy Functionality](#proxy-functionality)
|
||||||
|
* [Routing (`src/proxy/routes.ts`)](#proxy-routing)
|
||||||
|
* [Supported Models & Providers](#supported-models--providers)
|
||||||
|
* [Middleware (`src/proxy/middleware/`)](#proxy-middleware)
|
||||||
|
* [Adding New Models](#adding-new-models)
|
||||||
|
* [Adding New APIs/Providers](#adding-new-apisproviders)
|
||||||
|
5. [Model Management](#model-management)
|
||||||
|
* [Model Family Definitions](#model-family-definitions)
|
||||||
|
* [Adding OpenAI Models](#adding-openai-models)
|
||||||
|
* [Model Mapping & Routing](#model-mapping--routing)
|
||||||
|
* [Service Information](#service-information)
|
||||||
|
* [Step-by-Step Guide for Adding a New Model](#step-by-step-guide-for-adding-a-new-model)
|
||||||
|
* [Model Patterns and Versioning](#model-patterns-and-versioning)
|
||||||
|
* [Response Format Handling](#response-format-handling)
|
||||||
|
6. [Key Management](#key-management)
|
||||||
|
* [Key Pool System](#key-pool-system)
|
||||||
|
* [Provider-Specific Key Management](#provider-specific-key-management)
|
||||||
|
* [Key Rotation and Health Checks](#key-rotation-and-health-checks)
|
||||||
|
7. [Data Management](#data-management)
|
||||||
|
* [Database (`src/shared/database/`)](#database)
|
||||||
|
* [File Storage (`src/shared/file-storage/`)](#file-storage)
|
||||||
|
8. [Authentication & Authorization](#authentication--authorization)
|
||||||
|
9. [Logging & Monitoring](#logging--monitoring)
|
||||||
|
10. [Deployment](#deployment)
|
||||||
|
11. [Contributing](#contributing)
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
This project provides a proxy layer for various Large Language Models (LLMs) and potentially other AI APIs. It aims to offer a unified interface, manage API keys securely, handle rate limiting, usage tracking, and potentially add features like response caching or prompt modification.
|
||||||
|
|
||||||
|
## Directory Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── .env.example # Example environment variables
|
||||||
|
├── .gitattributes # Git attributes
|
||||||
|
├── .gitignore # Git ignore rules
|
||||||
|
├── .husky/ # Git hooks
|
||||||
|
├── .prettierrc # Code formatting rules
|
||||||
|
├── CODEBASE_GUIDE.md # This file
|
||||||
|
├── README.md # Project README
|
||||||
|
├── data/ # Data files (e.g., SQLite DB)
|
||||||
|
├── docker/ # Docker configuration
|
||||||
|
├── docs/ # Documentation files
|
||||||
|
├── http-client.env.json # HTTP client environment
|
||||||
|
├── package-lock.json # NPM lock file
|
||||||
|
├── package.json # Project dependencies and scripts
|
||||||
|
├── patches/ # Patches for dependencies
|
||||||
|
├── public/ # Static assets served by the web server
|
||||||
|
├── render.yaml # Render deployment configuration
|
||||||
|
├── scripts/ # Utility scripts
|
||||||
|
├── src/ # Source code
|
||||||
|
│ ├── admin/ # Admin interface logic
|
||||||
|
│ ├── config.ts # Application configuration
|
||||||
|
│ ├── info-page.ts # Logic for the info page
|
||||||
|
│ ├── logger.ts # Logging setup
|
||||||
|
│ ├── proxy/ # Core proxy logic for different providers
|
||||||
|
│ ├── server.ts # Express server setup and main entry point
|
||||||
|
│ ├── service-info.ts # Service information logic
|
||||||
|
│ ├── shared/ # Shared utilities, types, and modules
|
||||||
|
│ └── user/ # User management logic
|
||||||
|
├── tsconfig.json # TypeScript configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
## Core Components
|
||||||
|
|
||||||
|
### Configuration (`src/config.ts`)
|
||||||
|
|
||||||
|
* Loads environment variables and defines application settings.
|
||||||
|
* Contains configuration for database connections, API keys (placeholders/retrieval methods), logging levels, rate limits, etc.
|
||||||
|
* Uses `dotenv` and potentially a schema validation library (like Zod) to ensure required variables are present.
|
||||||
|
|
||||||
|
### Server Entry Point (`src/server.ts`)
|
||||||
|
|
||||||
|
* Initializes the Express application.
|
||||||
|
* Sets up core middleware (e.g., body parsing, CORS, logging).
|
||||||
|
* Mounts routers for different parts of the application (admin, user, proxy).
|
||||||
|
* Starts the HTTP server.
|
||||||
|
|
||||||
|
### Proxy Layer (`src/proxy/`)
|
||||||
|
|
||||||
|
* The heart of the application, handling requests to downstream AI APIs.
|
||||||
|
* Contains individual modules for each supported provider (e.g., `openai.ts`, `anthropic.ts`).
|
||||||
|
* Handles request transformation, authentication against the target API, and response handling.
|
||||||
|
* Uses middleware for common proxy tasks.
|
||||||
|
|
||||||
|
### User Management (`src/user/`)
|
||||||
|
|
||||||
|
* Handles user registration, login, session management, and potentially API key generation/management for end-users.
|
||||||
|
* Likely interacts with the database (`src/shared/database/`).
|
||||||
|
|
||||||
|
### Admin Interface (`src/admin/`)
|
||||||
|
|
||||||
|
* Provides an interface for administrators to manage users, monitor usage, configure settings, etc.
|
||||||
|
* May have its own set of routes and views.
|
||||||
|
|
||||||
|
### Shared Utilities (`src/shared/`)
|
||||||
|
|
||||||
|
* Contains reusable code across different modules.
|
||||||
|
* `api-schemas/`: Zod schemas for API request/response validation.
|
||||||
|
* `database/`: Database connection, schemas (e.g., Prisma), and query logic.
|
||||||
|
* `errors.ts`: Custom error classes.
|
||||||
|
* `key-management/`: Logic for managing API keys (if applicable).
|
||||||
|
* `models.ts`: Core data models/types used throughout the application.
|
||||||
|
* `prompt-logging/`: Logic for logging prompts and responses.
|
||||||
|
* `tokenization/`: Utilities for counting tokens.
|
||||||
|
* `utils.ts`: General utility functions.
|
||||||
|
|
||||||
|
## Proxy Functionality
|
||||||
|
|
||||||
|
### Proxy Routing (`src/proxy/routes.ts`)
|
||||||
|
|
||||||
|
* Defines the API endpoints for the proxy service (e.g., `/v1/chat/completions`).
|
||||||
|
* Maps incoming requests to the appropriate provider-specific handler based on the request path, headers, or body content (e.g., model requested).
|
||||||
|
* Applies relevant middleware (authentication, rate limiting, queuing, etc.).
|
||||||
|
|
||||||
|
### Supported Models & Providers
|
||||||
|
|
||||||
|
* **OpenAI:** Handled in `src/proxy/openai.ts`. Supports models like GPT-4, GPT-3.5-turbo, as well as o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini). Handles chat completions and potentially image generation (`src/proxy/openai-image.ts`).
|
||||||
|
* **Anthropic:** Handled in `src/proxy/anthropic.ts`. Supports Claude models. May use AWS Bedrock (`src/proxy/aws-claude.ts`) or Anthropic's direct API.
|
||||||
|
* **Google AI / Vertex AI:** Handled in `src/proxy/google-ai.ts` and `src/proxy/gcp.ts`. Supports Gemini models (gemini-flash, gemini-pro, gemini-ultra).
|
||||||
|
* **Mistral AI:** Handled in `src/proxy/mistral-ai.ts`. Supports Mistral models via their API or potentially AWS (`src/proxy/aws-mistral.ts`).
|
||||||
|
* **Azure OpenAI:** Handled in `src/proxy/azure.ts`. Provides an alternative endpoint for OpenAI models via Azure.
|
||||||
|
* **Deepseek:** Handled in `src/proxy/deepseek.ts`.
|
||||||
|
* **Xai:** Handled in `src/proxy/xai.ts`.
|
||||||
|
* **AWS (General):** `src/proxy/aws.ts` might contain shared AWS logic (e.g., authentication).
|
||||||
|
|
||||||
|
### Middleware (`src/proxy/middleware/`)
|
||||||
|
|
||||||
|
* **`gatekeeper.ts`:** Likely handles initial request validation, authentication, and authorization checks before hitting provider logic. Checks origin (`check-origin.ts`), potentially custom tokens (`check-risu-token.ts`).
|
||||||
|
* **`rate-limit.ts`:** Implements rate limiting logic, potentially per-user or per-key.
|
||||||
|
* **`queue.ts`:** Manages request queuing, possibly to handle concurrency limits or prioritize requests.
|
||||||
|
|
||||||
|
### Adding New Models
|
||||||
|
|
||||||
|
1. **Identify the Provider:** Determine if the new model belongs to an existing provider (e.g., a new OpenAI model) or a new one.
|
||||||
|
2. **Update Provider Logic (if existing):**
|
||||||
|
* Modify the relevant provider file (e.g., `src/proxy/openai.ts`).
|
||||||
|
* Update model lists or logic that selects/validates models.
|
||||||
|
* Adjust any request/response transformations if the new model has a different API schema.
|
||||||
|
* Update model information in shared files like `src/shared/models.ts` if necessary.
|
||||||
|
3. **Update Routing (if necessary):** Modify `src/proxy/routes.ts` if the new model requires a different endpoint or routing logic.
|
||||||
|
4. **Configuration:** Add any new API keys or configuration parameters to `.env.example` and `src/config.ts`.
|
||||||
|
5. **Testing:** Add unit or integration tests for the new model.
|
||||||
|
|
||||||
|
### Adding New APIs/Providers
|
||||||
|
|
||||||
|
1. **Create Provider Module:** Create a new file in `src/proxy/` (e.g., `src/proxy/new-provider.ts`).
|
||||||
|
2. **Implement Handler:**
|
||||||
|
* Write the core logic to handle requests for this provider. This typically involves:
|
||||||
|
* Receiving the standardized request from the router.
|
||||||
|
* Transforming the request into the format expected by the new provider's API.
|
||||||
|
* Authenticating with the new provider's API (fetching keys from config).
|
||||||
|
* Making the API call (consider using a robust HTTP client like `axios` or `node-fetch`).
|
||||||
|
* Handling streaming responses if applicable (using helpers from `src/shared/streaming.ts`).
|
||||||
|
* Transforming the provider's response back into a standardized format.
|
||||||
|
* Handling errors gracefully.
|
||||||
|
3. **Add Routing:**
|
||||||
|
* Import the new handler in `src/proxy/routes.ts`.
|
||||||
|
* Add new routes or modify existing routing logic to direct requests to the new handler based on model name, path, or other criteria.
|
||||||
|
* Apply necessary middleware (gatekeeper, rate limiter, queue).
|
||||||
|
4. **Create Key Management:**
|
||||||
|
* Create a new directory in `src/shared/key-management/` for the provider.
|
||||||
|
* Implement provider-specific key management (key checkers, token counters).
|
||||||
|
5. **Configuration:**
|
||||||
|
* Add configuration variables (API keys, base URLs) to `.env.example` and `src/config.ts`.
|
||||||
|
* Update `src/config.ts` to load and validate the new variables.
|
||||||
|
6. **Model Information:** Add details about the new provider and its models to `src/shared/models.ts` or similar shared locations.
|
||||||
|
7. **Tokenization (if applicable):** If token counting is needed, add or update tokenization logic in `src/shared/tokenization/`.
|
||||||
|
8. **Testing:** Implement thorough tests for the new provider integration.
|
||||||
|
9. **Documentation:** Update this guide and any other relevant documentation.
|
||||||
|
|
||||||
|
## Model Management
|
||||||
|
|
||||||
|
### Model Family Definitions
|
||||||
|
|
||||||
|
* **Model Family Definitions:** The project uses a family-based approach to group similar models together. These are defined in `src/shared/models.ts`.
|
||||||
|
* Each model is part of a model family (e.g., "gpt4", "claude", "gemini-pro") which helps with routing, key management, and feature support.
|
||||||
|
* The `MODEL_FAMILIES` array contains all supported model families, and the `MODEL_FAMILY_SERVICE` mapping connects each family to its provider service.
|
||||||
|
|
||||||
|
### Adding OpenAI Models
|
||||||
|
|
||||||
|
When adding new OpenAI models to the codebase, there are several files that must be updated:
|
||||||
|
|
||||||
|
1. **Update Model Types (`src/shared/models.ts`):**
|
||||||
|
- Add the new model to the `OpenAIModelFamily` type
|
||||||
|
- Add the model to the `MODEL_FAMILIES` array
|
||||||
|
- Add the Azure variants for the model if applicable
|
||||||
|
- Add the model to `MODEL_FAMILY_SERVICE` mapping
|
||||||
|
- Update `OPENAI_MODEL_FAMILY_MAP` with regex patterns to match the model names
|
||||||
|
|
||||||
|
2. **Update Context Size Limits (`src/proxy/middleware/request/preprocessors/validate-context-size.ts`):**
|
||||||
|
- Add regex matching for the new model
|
||||||
|
- Set the appropriate context token limit for the model
|
||||||
|
|
||||||
|
3. **Update Token Cost Tracking (`src/shared/stats.ts`):**
|
||||||
|
- Add pricing information for the new model in the `getTokenCostUsd` function
|
||||||
|
- Include both input and output prices in the comments for clarity
|
||||||
|
|
||||||
|
4. **Update Feature Support Checks (`src/proxy/openai.ts`):**
|
||||||
|
- If the model supports special features like the reasoning API parameter (`isO1Model` function), update the appropriate function
|
||||||
|
- For model feature detection, prefer using regex patterns over explicit lists when possible, as this handles date-stamped versions better
|
||||||
|
|
||||||
|
5. **Update Display Names (`src/info-page.ts`):**
|
||||||
|
- Add friendly display names for the new models in the `MODEL_FAMILY_FRIENDLY_NAME` object
|
||||||
|
|
||||||
|
6. **Update Key Management Provider Files:**
|
||||||
|
- For OpenAI keys in `src/shared/key-management/openai/provider.ts`, add token counters for the new models
|
||||||
|
- For Azure OpenAI keys in `src/shared/key-management/azure/provider.ts`, add token counters for the Azure versions
|
||||||
|
|
||||||
|
### Model Patterns and Versioning
|
||||||
|
|
||||||
|
The codebase handles several patterns for model naming and versioning:
|
||||||
|
|
||||||
|
1. **Date-stamped Models:** Many models include date stamps (e.g., `gpt-4-0125-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$`.
|
||||||
|
|
||||||
|
2. **O-Series Models:** OpenAI's o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini) follow a different naming convention. The codebase handles these with dedicated model families and regex patterns.
|
||||||
|
|
||||||
|
3. **Preview/Non-Preview Variants:** Some models have preview variants (e.g., `gpt-4.5-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$`.
|
||||||
|
|
||||||
|
When adding new models, try to follow the existing patterns for consistency.
|
||||||
|
|
||||||
|
### Response Format Handling
|
||||||
|
|
||||||
|
The codebase includes special handling for different API response formats:
|
||||||
|
|
||||||
|
1. **Chat vs. Text Completions:** There's transformation logic in `openai.ts` to convert between chat completions and text completions formats (`transformTurboInstructResponse`).
|
||||||
|
|
||||||
|
2. **Newer API Formats:** For newer APIs like the Responses API, there's transformation logic (`transformResponsesApiResponse`) to convert responses to a format compatible with existing clients.
|
||||||
|
|
||||||
|
When adding support for new models or APIs, consider whether transformation is needed to maintain compatibility with existing clients.
|
||||||
|
|
||||||
|
## Key Management
|
||||||
|
|
||||||
|
### Key Pool System
|
||||||
|
|
||||||
|
The project uses a sophisticated key pool system (`src/shared/key-management/key-pool.ts`) to manage API keys for different providers. Key features include:
|
||||||
|
|
||||||
|
* **Key Selection:** The system selects the appropriate key based on model family, region preferences, and other criteria.
|
||||||
|
* **Rotation:** Keys are rotated to distribute usage and avoid hitting rate limits.
|
||||||
|
* **Health Checks:** Keys are checked periodically to ensure they're still valid and within rate limits.
|
||||||
|
|
||||||
|
### Provider-Specific Key Management
|
||||||
|
|
||||||
|
Each provider has its own key management module in `src/shared/key-management/`:
|
||||||
|
|
||||||
|
* **Key Checkers:** Each provider implements key checkers to validate keys and check their status.
|
||||||
|
* **Token Counters:** Providers implement token counting logic specific to their pricing model.
|
||||||
|
* **Models Support:** Keys are associated with specific model families they support.
|
||||||
|
|
||||||
|
When adding a new model or provider, you'll need to update or create the appropriate key management files.
|
||||||
|
|
||||||
|
### Key Rotation and Health Checks
|
||||||
|
|
||||||
|
The key pool system includes logic for:
|
||||||
|
|
||||||
|
* **Rotation Strategy:** Keys are selected based on a prioritization strategy (`prioritize-keys.ts`).
|
||||||
|
* **Disabling Unhealthy Keys:** Keys that fail health checks are temporarily disabled.
|
||||||
|
* **Rate Limit Awareness:** The system tracks usage to avoid hitting provider rate limits.
|
||||||
|
|
||||||
|
## Data Management
|
||||||
|
|
||||||
|
### Database (`src/shared/database/`)
|
||||||
|
|
||||||
|
* Likely uses Prisma or a similar ORM.
|
||||||
|
* Defines database schemas (e.g., for users, API keys, usage logs).
|
||||||
|
* Provides functions for interacting with the database.
|
||||||
|
* Configuration is managed in `src/config.ts`.
|
||||||
|
|
||||||
|
### File Storage (`src/shared/file-storage/`)
|
||||||
|
|
||||||
|
* May be used for storing logs, cached data, or user-uploaded files.
|
||||||
|
* Could integrate with local storage or cloud providers (e.g., S3, GCS).
|
||||||
|
|
||||||
|
## Authentication & Authorization
|
||||||
|
|
||||||
|
* **User Auth:** Handled in `src/user/` potentially using sessions (`src/shared/with-session.ts`) or JWTs.
|
||||||
|
* **Proxy Auth:** The `gatekeeper.ts` middleware likely verifies incoming requests to the proxy endpoints. This could involve checking:
|
||||||
|
* Custom API keys stored in the database (`src/shared/database/`).
|
||||||
|
* Specific tokens (`check-risu-token.ts`).
|
||||||
|
* HMAC signatures (`src/shared/hmac-signing.ts`).
|
||||||
|
* Origin checks (`check-origin.ts`).
|
||||||
|
* **Downstream Auth:** Each provider module (`src/proxy/*.ts`) handles authentication with the actual AI service API using keys from the configuration.
|
||||||
|
|
||||||
|
## Logging & Monitoring
|
||||||
|
|
||||||
|
* **Logging:** Configured in `src/logger.ts`, likely using a library like `pino` or `winston`. Logs requests, errors, and important events.
|
||||||
|
* **Prompt Logging:** Specific logic for logging prompts and responses might exist in `src/shared/prompt-logging/`.
|
||||||
|
* **Stats/Monitoring:** `src/shared/stats.ts` might handle collecting and exposing application metrics.
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
* **Docker:** The project likely includes Docker configuration for containerized deployment.
|
||||||
|
* **Render:** The `render.yaml` file suggests the project is or can be deployed on Render.
|
||||||
|
* **Environment Variables:** The `.env.example` file provides a template for required environment variables in production.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
When contributing to this project:
|
||||||
|
|
||||||
|
1. **Follow Coding Standards:** Use the established patterns and standards in the codebase. The `.prettierrc` file defines code formatting rules.
|
||||||
|
2. **Update Documentation:** Keep this guide updated when adding new components or changing existing ones.
|
||||||
|
3. **Add Tests:** Ensure your changes are tested appropriately.
|
||||||
|
4. **Update Configuration:** If your changes require new environment variables, update `.env.example`.
|
||||||
|
|
||||||
|
*This guide provides a high-level overview. For detailed information, refer to the specific source code files.*
|
||||||
@@ -1,16 +1,20 @@
|
|||||||
# OAI Reverse Proxy
|
# OAI Reverse Proxy - just a shitty fork
|
||||||
|
|
||||||
Reverse proxy server for various LLM APIs.
|
Reverse proxy server for various LLM APIs.
|
||||||
|
|
||||||
### Table of Contents
|
### Table of Contents
|
||||||
- [What is this?](#what-is-this)
|
<!-- TOC -->
|
||||||
- [Features](#features)
|
* [OAI Reverse Proxy](#oai-reverse-proxy)
|
||||||
- [Usage Instructions](#usage-instructions)
|
* [Table of Contents](#table-of-contents)
|
||||||
- [Self-hosting](#self-hosting)
|
* [What is this?](#what-is-this)
|
||||||
- [Alternatives](#alternatives)
|
* [Features](#features)
|
||||||
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
|
* [Usage Instructions](#usage-instructions)
|
||||||
- [Render (outdated, not advised)](#render-outdated-not-advised)
|
* [Personal Use (single-user)](#personal-use-single-user)
|
||||||
- [Local Development](#local-development)
|
* [Updating](#updating)
|
||||||
|
* [Local Development](#local-development)
|
||||||
|
* [Self-hosting](#self-hosting)
|
||||||
|
* [Building](#building)
|
||||||
|
* [Forking](#forking)
|
||||||
|
<!-- TOC -->
|
||||||
|
|
||||||
## What is this?
|
## What is this?
|
||||||
This project allows you to run a reverse proxy server for various LLM APIs.
|
This project allows you to run a reverse proxy server for various LLM APIs.
|
||||||
@@ -19,7 +23,8 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
|||||||
- [x] Support for multiple APIs
|
- [x] Support for multiple APIs
|
||||||
- [x] [OpenAI](https://openai.com/)
|
- [x] [OpenAI](https://openai.com/)
|
||||||
- [x] [Anthropic](https://www.anthropic.com/)
|
- [x] [Anthropic](https://www.anthropic.com/)
|
||||||
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/) (Claude4 is fucked, dont care)
|
||||||
|
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
|
||||||
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
|
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
|
||||||
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
|
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
|
||||||
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
|
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
|
||||||
@@ -28,43 +33,40 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
|||||||
- [x] Simple role-based permissions
|
- [x] Simple role-based permissions
|
||||||
- [x] Per-model token quotas
|
- [x] Per-model token quotas
|
||||||
- [x] Temporary user accounts
|
- [x] Temporary user accounts
|
||||||
- [x] Prompt and completion logging
|
- [x] Event audit logging
|
||||||
|
- [x] Optional full logging of prompts and completions
|
||||||
- [x] Abuse detection and prevention
|
- [x] Abuse detection and prevention
|
||||||
|
- [x] IP address and user token model invocation rate limits
|
||||||
---
|
- [x] IP blacklists
|
||||||
|
- [x] Proof-of-work challenge for access by anonymous users
|
||||||
|
|
||||||
## Usage Instructions
|
## Usage Instructions
|
||||||
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
|
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
|
||||||
|
|
||||||
|
### Personal Use (single-user)
|
||||||
|
If you just want to run the proxy server to use yourself without hosting it for others:
|
||||||
|
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
|
||||||
|
2. Clone this repository
|
||||||
|
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
||||||
|
4. Install dependencies with `npm install`
|
||||||
|
5. Run `npm run build`
|
||||||
|
6. Run `npm start`
|
||||||
|
|
||||||
|
#### Updating
|
||||||
|
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
|
||||||
|
|
||||||
|
#### Local Development
|
||||||
|
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
|
||||||
|
|
||||||
### Self-hosting
|
### Self-hosting
|
||||||
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
|
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
|
||||||
|
|
||||||
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
|
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
|
|
||||||
|
|
||||||
### Huggingface (outdated, not advised)
|
|
||||||
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
|
|
||||||
|
|
||||||
### Render (outdated, not advised)
|
|
||||||
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
|
|
||||||
|
|
||||||
## Local Development
|
|
||||||
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
|
|
||||||
|
|
||||||
1. Clone the repo
|
|
||||||
2. Install dependencies with `npm install`
|
|
||||||
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
|
||||||
4. Start the server in development mode with `npm run start:dev`.
|
|
||||||
|
|
||||||
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
|
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
|
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
|
||||||
|
|
||||||
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
|
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
|
||||||
|
|
||||||
## Forking
|
## Forking
|
||||||
|
|
||||||
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
|
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
|
||||||
|
|||||||
@@ -17,9 +17,8 @@ ARG GREETING_URL
|
|||||||
RUN if [ -n "$GREETING_URL" ]; then \
|
RUN if [ -n "$GREETING_URL" ]; then \
|
||||||
curl -sL "$GREETING_URL" > greeting.md; \
|
curl -sL "$GREETING_URL" > greeting.md; \
|
||||||
fi
|
fi
|
||||||
COPY package*.json greeting.md* ./
|
|
||||||
RUN npm install
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
RUN npm install
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
|
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
|
||||||
EXPOSE 10000
|
EXPOSE 10000
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Deploy to Render.com
|
# Deploy to Render.com
|
||||||
|
|
||||||
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
||||||
|
|
||||||
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
|
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
# Configuring the proxy for Vertex AI (GCP)
|
||||||
|
|
||||||
|
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
|
||||||
|
|
||||||
|
- [Setting keys](#setting-keys)
|
||||||
|
- [Setup Vertex AI](#setup-vertex-ai)
|
||||||
|
- [Supported model IDs](#supported-model-ids)
|
||||||
|
|
||||||
|
## Setting keys
|
||||||
|
|
||||||
|
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
|
||||||
|
|
||||||
|
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setup Vertex AI
|
||||||
|
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
|
||||||
|
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
|
||||||
|
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
|
||||||
|
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
|
||||||
|
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
|
||||||
|
6. The required credential is in the JSON file you just downloaded.
|
||||||
|
|
||||||
|
## Supported model IDs
|
||||||
|
Users can send these model IDs to the proxy to invoke the corresponding models.
|
||||||
|
- **Claude**
|
||||||
|
- `claude-3-haiku@20240307`
|
||||||
|
- `claude-3-sonnet@20240229`
|
||||||
|
- `claude-3-opus@20240229`
|
||||||
|
- `claude-3-5-sonnet@20240620`
|
||||||
+1
-1
@@ -129,7 +129,7 @@ also significantly reduce hash rates on mobile devices.
|
|||||||
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
|
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
|
||||||
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
|
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
|
||||||
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
|
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
|
||||||
- iPhone 13 Pro (Safari): 4.0 - 4.6 H/s
|
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
|
||||||
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
|
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
|
||||||
- This is a 2019 phone almost matching an iPhone five years newer because of
|
- This is a 2019 phone almost matching an iPhone five years newer because of
|
||||||
bad Safari performance.
|
bad Safari performance.
|
||||||
|
|||||||
+13
-1
@@ -12,6 +12,7 @@ Several of these features require you to set secrets in your environment. If usi
|
|||||||
- [Memory](#memory)
|
- [Memory](#memory)
|
||||||
- [Firebase Realtime Database](#firebase-realtime-database)
|
- [Firebase Realtime Database](#firebase-realtime-database)
|
||||||
- [Firebase setup instructions](#firebase-setup-instructions)
|
- [Firebase setup instructions](#firebase-setup-instructions)
|
||||||
|
- [SQLite Database](#sqlite-database)
|
||||||
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
|
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
|
||||||
|
|
||||||
## No user management (`GATEKEEPER=none`)
|
## No user management (`GATEKEEPER=none`)
|
||||||
@@ -63,6 +64,17 @@ To use Firebase Realtime Database to persist user data, set the following enviro
|
|||||||
|
|
||||||
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
|
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
|
||||||
|
|
||||||
|
### SQLite Database
|
||||||
|
|
||||||
|
To use a local SQLite database file to persist user data, set the following environment variables:
|
||||||
|
|
||||||
|
- `GATEKEEPER_STORE`: Set this to `sqlite`.
|
||||||
|
- `SQLITE_USER_STORE_PATH` (Optional): Specifies the path to the SQLite database file.
|
||||||
|
- If not set, it defaults to `data/user-store.sqlite` within the project directory.
|
||||||
|
- Ensure that the directory where the SQLite file will be created (e.g., the `data/` directory) is writable by the application process.
|
||||||
|
|
||||||
|
Using SQLite provides a simple way to persist user data locally without relying on external services. User data will be saved to the specified file and will be available across server restarts.
|
||||||
|
|
||||||
## Whitelisting admin IP addresses
|
## Whitelisting admin IP addresses
|
||||||
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
|
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
|
||||||
|
|
||||||
@@ -70,4 +82,4 @@ You can provide a comma-separated list containing individual IPv4 or IPv6 addres
|
|||||||
|
|
||||||
To whitelist an entire IP range, use CIDR notation. For example, `192.168.0.1/24` would whitelist all addresses from `192.168.0.0` to `192.168.0.255`.
|
To whitelist an entire IP range, use CIDR notation. For example, `192.168.0.1/24` would whitelist all addresses from `192.168.0.0` to `192.168.0.255`.
|
||||||
|
|
||||||
To disable the whitelist, set `ADMIN_WHITELIST=0.0.0.0/0`, which will allow access from any IP address. This is the default behavior.
|
To disable the whitelist, set `ADMIN_WHITELIST=0.0.0.0/0,::0`, which will allow access from any IPv4 or IPv6 address. This is the default behavior.
|
||||||
|
|||||||
Generated
+1660
-575
File diff suppressed because it is too large
Load Diff
+19
-12
@@ -5,10 +5,11 @@
|
|||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
|
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
|
||||||
"database:migrate": "ts-node scripts/migrate.ts",
|
"database:migrate": "ts-node scripts/migrate.ts",
|
||||||
|
"postinstall": "patch-package",
|
||||||
"prepare": "husky install",
|
"prepare": "husky install",
|
||||||
"start": "node build/server.js",
|
"start": "node --trace-deprecation --trace-warnings build/server.js",
|
||||||
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
||||||
"start:replit": "tsc && node build/server.js",
|
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
|
||||||
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
||||||
"type-check": "tsc --noEmit"
|
"type-check": "tsc --noEmit"
|
||||||
},
|
},
|
||||||
@@ -20,14 +21,14 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/tokenizer": "^0.0.4",
|
"@anthropic-ai/tokenizer": "^0.0.4",
|
||||||
"@aws-crypto/sha256-js": "^5.2.0",
|
"@aws-crypto/sha256-js": "^5.2.0",
|
||||||
|
"@huggingface/jinja": "^0.3.0",
|
||||||
"@node-rs/argon2": "^1.8.3",
|
"@node-rs/argon2": "^1.8.3",
|
||||||
"@smithy/eventstream-codec": "^2.1.3",
|
"@smithy/eventstream-codec": "^2.1.3",
|
||||||
"@smithy/eventstream-serde-node": "^2.1.3",
|
"@smithy/eventstream-serde-node": "^2.1.3",
|
||||||
"@smithy/protocol-http": "^3.2.1",
|
"@smithy/protocol-http": "^3.2.1",
|
||||||
"@smithy/signature-v4": "^2.1.3",
|
"@smithy/signature-v4": "^2.1.3",
|
||||||
"@smithy/types": "^2.10.1",
|
|
||||||
"@smithy/util-utf8": "^2.1.1",
|
"@smithy/util-utf8": "^2.1.1",
|
||||||
"axios": "^1.3.5",
|
"axios": "^1.7.4",
|
||||||
"better-sqlite3": "^10.0.0",
|
"better-sqlite3": "^10.0.0",
|
||||||
"check-disk-space": "^3.4.0",
|
"check-disk-space": "^3.4.0",
|
||||||
"cookie-parser": "^1.4.6",
|
"cookie-parser": "^1.4.6",
|
||||||
@@ -36,30 +37,35 @@
|
|||||||
"csrf-csrf": "^2.3.0",
|
"csrf-csrf": "^2.3.0",
|
||||||
"dotenv": "^16.3.1",
|
"dotenv": "^16.3.1",
|
||||||
"ejs": "^3.1.10",
|
"ejs": "^3.1.10",
|
||||||
"express": "^4.18.2",
|
"express": "^4.19.3",
|
||||||
"express-session": "^1.17.3",
|
"express-session": "^1.17.3",
|
||||||
"firebase-admin": "^12.1.0",
|
"firebase-admin": "^12.5.0",
|
||||||
"glob": "^10.3.12",
|
"glob": "^10.3.12",
|
||||||
"googleapis": "^122.0.0",
|
"googleapis": "^122.0.0",
|
||||||
"http-proxy-middleware": "^3.0.0-beta.1",
|
"http-proxy": "1.18.1",
|
||||||
|
"http-proxy-middleware": "^3.0.2",
|
||||||
"ipaddr.js": "^2.1.0",
|
"ipaddr.js": "^2.1.0",
|
||||||
"memorystore": "^1.6.7",
|
"memorystore": "^1.6.7",
|
||||||
"multer": "^1.4.5-lts.1",
|
"multer": "^1.4.5-lts.1",
|
||||||
"node-schedule": "^2.1.1",
|
"node-schedule": "^2.1.1",
|
||||||
|
"patch-package": "^8.0.0",
|
||||||
"pino": "^8.11.0",
|
"pino": "^8.11.0",
|
||||||
"pino-http": "^8.3.3",
|
"pino-http": "^8.3.3",
|
||||||
"sanitize-html": "2.12.1",
|
"proxy-agent": "^6.4.0",
|
||||||
"sharp": "^0.32.6",
|
"sanitize-html": "^2.13.0",
|
||||||
|
"sharp": "^0.34.2",
|
||||||
"showdown": "^2.1.0",
|
"showdown": "^2.1.0",
|
||||||
"source-map-support": "^0.5.21",
|
"source-map-support": "^0.5.21",
|
||||||
"stream-json": "^1.8.0",
|
"stream-json": "^1.8.0",
|
||||||
"tiktoken": "^1.0.10",
|
"tiktoken": "^1.0.10",
|
||||||
|
"tinyws": "^0.1.0",
|
||||||
"uuid": "^9.0.0",
|
"uuid": "^9.0.0",
|
||||||
"zlib": "^1.0.5",
|
"zlib": "^1.0.5",
|
||||||
"zod": "^3.22.3",
|
"zod": "^3.22.3",
|
||||||
"zod-error": "^1.5.0"
|
"zod-error": "^1.5.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@smithy/types": "^3.3.0",
|
||||||
"@types/better-sqlite3": "^7.6.10",
|
"@types/better-sqlite3": "^7.6.10",
|
||||||
"@types/cookie-parser": "^1.4.3",
|
"@types/cookie-parser": "^1.4.3",
|
||||||
"@types/cors": "^2.8.13",
|
"@types/cors": "^2.8.13",
|
||||||
@@ -72,7 +78,7 @@
|
|||||||
"@types/stream-json": "^1.7.7",
|
"@types/stream-json": "^1.7.7",
|
||||||
"@types/uuid": "^9.0.1",
|
"@types/uuid": "^9.0.1",
|
||||||
"concurrently": "^8.0.1",
|
"concurrently": "^8.0.1",
|
||||||
"esbuild": "^0.17.16",
|
"esbuild": "^0.25.5",
|
||||||
"esbuild-register": "^3.4.2",
|
"esbuild-register": "^3.4.2",
|
||||||
"husky": "^8.0.3",
|
"husky": "^8.0.3",
|
||||||
"nodemon": "^3.0.1",
|
"nodemon": "^3.0.1",
|
||||||
@@ -83,7 +89,8 @@
|
|||||||
"typescript": "^5.4.2"
|
"typescript": "^5.4.2"
|
||||||
},
|
},
|
||||||
"overrides": {
|
"overrides": {
|
||||||
"postcss": "^8.4.31",
|
"node-fetch@2.x": {
|
||||||
"follow-redirects": "^1.15.4"
|
"whatwg-url": "14.x"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,23 @@
|
|||||||
|
# Patches
|
||||||
|
Contains monkey patches for certain packages, applied using `patch-package`.
|
||||||
|
|
||||||
|
## `http-proxy+1.18.1.patch`
|
||||||
|
Modifies the `http-proxy` package to work around an incompatibility with
|
||||||
|
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
|
||||||
|
when `socks-proxy-agent` is used instead of a generic http.Agent.
|
||||||
|
|
||||||
|
Modification involves adjusting the `buffer` property on ProxyServer's `options`
|
||||||
|
object to be a function that returns a stream instead of a stream itself. This
|
||||||
|
allows us to give it a function which produces a new Readable from the already-
|
||||||
|
parsed request body.
|
||||||
|
|
||||||
|
With the old implementation we would need to create an entirely new ProxyServer
|
||||||
|
instance for each request, which is not ideal under heavy load.
|
||||||
|
|
||||||
|
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
|
||||||
|
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
|
||||||
|
|
||||||
|
### See also
|
||||||
|
https://github.com/chimurai/http-proxy-middleware/issues/40
|
||||||
|
https://github.com/chimurai/http-proxy-middleware/issues/299
|
||||||
|
https://github.com/http-party/node-http-proxy/pull/1027
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||||
|
index 7ae7355..c825c27 100644
|
||||||
|
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||||
|
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||||
|
@@ -167,7 +167,7 @@ module.exports = {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (options.buffer || req).pipe(proxyReq);
|
||||||
|
+ (options.buffer(req) || req).pipe(proxyReq);
|
||||||
|
|
||||||
|
proxyReq.on('response', function(proxyRes) {
|
||||||
|
if(server) { server.emit('proxyRes', proxyRes, req, res); }
|
||||||
@@ -30,7 +30,6 @@ self.onmessage = async (event) => {
|
|||||||
nonce = data.nonce;
|
nonce = data.nonce;
|
||||||
|
|
||||||
const c = data.challenge;
|
const c = data.challenge;
|
||||||
// decode salt to Uint8Array
|
|
||||||
const salt = new Uint8Array(c.s.length / 2);
|
const salt = new Uint8Array(c.s.length / 2);
|
||||||
for (let i = 0; i < c.s.length; i += 2) {
|
for (let i = 0; i < c.s.length; i += 2) {
|
||||||
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
|
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
|
||||||
@@ -99,7 +98,7 @@ const solve = async () => {
|
|||||||
self.postMessage({ type: "solved", nonce: solution.nonce });
|
self.postMessage({ type: "solved", nonce: solution.nonce });
|
||||||
active = false;
|
active = false;
|
||||||
} else {
|
} else {
|
||||||
if (Date.now() - lastNotify > 1000) {
|
if (Date.now() - lastNotify >= 500) {
|
||||||
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
|
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
|
||||||
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
|
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
|
||||||
lastNotify = Date.now();
|
lastNotify = Date.now();
|
||||||
|
|||||||
@@ -230,6 +230,39 @@ Content-Type: application/json
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / GCP Claude -- Native Completion
|
||||||
|
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
anthropic-version: 2023-01-01
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "claude-v2",
|
||||||
|
"max_tokens_to_sample": 10,
|
||||||
|
"temperature": 0,
|
||||||
|
"stream": true,
|
||||||
|
"prompt": "What is genshin impact\n\n:Assistant:"
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
|
||||||
|
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 50,
|
||||||
|
"stream": true,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What is genshin impact?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
###
|
###
|
||||||
# @name Proxy / Azure OpenAI -- Native Chat Completions
|
# @name Proxy / Azure OpenAI -- Native Chat Completions
|
||||||
POST {{proxy-host}}/proxy/azure/openai/chat/completions
|
POST {{proxy-host}}/proxy/azure/openai/chat/completions
|
||||||
|
|||||||
@@ -51,6 +51,8 @@ function getRandomModelFamily() {
|
|||||||
"mistral-large",
|
"mistral-large",
|
||||||
"aws-claude",
|
"aws-claude",
|
||||||
"aws-claude-opus",
|
"aws-claude-opus",
|
||||||
|
"gcp-claude",
|
||||||
|
"gcp-claude-opus",
|
||||||
"azure-turbo",
|
"azure-turbo",
|
||||||
"azure-gpt4",
|
"azure-gpt4",
|
||||||
"azure-gpt4-32k",
|
"azure-gpt4-32k",
|
||||||
|
|||||||
@@ -0,0 +1,118 @@
|
|||||||
|
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
|
||||||
|
import axios from "axios";
|
||||||
|
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||||
|
import { SignatureV4 } from "@smithy/signature-v4";
|
||||||
|
import { HttpRequest } from "@smithy/protocol-http";
|
||||||
|
|
||||||
|
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
|
||||||
|
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
|
||||||
|
|
||||||
|
// Copied from amazon bedrock docs
|
||||||
|
|
||||||
|
// List models
|
||||||
|
// ListFoundationModels
|
||||||
|
// Service: Amazon Bedrock
|
||||||
|
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
|
||||||
|
// Bedrock User Guide.
|
||||||
|
// Request Syntax
|
||||||
|
// GET /foundation-models?
|
||||||
|
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
|
||||||
|
// HTTP/1.1
|
||||||
|
// URI Request Parameters
|
||||||
|
// The request uses the following URI parameters.
|
||||||
|
// byCustomizationType (p. 38)
|
||||||
|
// List by customization type.
|
||||||
|
// Valid Values: FINE_TUNING
|
||||||
|
// byInferenceType (p. 38)
|
||||||
|
// List by inference type.
|
||||||
|
// Valid Values: ON_DEMAND | PROVISIONED
|
||||||
|
// byOutputModality (p. 38)
|
||||||
|
// List by output modality type.
|
||||||
|
// Valid Values: TEXT | IMAGE | EMBEDDING
|
||||||
|
// byProvider (p. 38)
|
||||||
|
// A Bedrock model provider.
|
||||||
|
// Pattern: ^[a-z0-9-]{1,63}$
|
||||||
|
// Request Body
|
||||||
|
// The request does not have a request body
|
||||||
|
|
||||||
|
// Run inference on a text model
|
||||||
|
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
|
||||||
|
// parameter to accept any content type in the response.
|
||||||
|
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
|
||||||
|
// -H accept: */*
|
||||||
|
// -H content-type: application/json
|
||||||
|
// Payload
|
||||||
|
// {"inputText": "Hello world"}
|
||||||
|
// Example response
|
||||||
|
// Response for the above request.
|
||||||
|
// -H content-type: application/json
|
||||||
|
// Payload
|
||||||
|
// <the model response>
|
||||||
|
|
||||||
|
const AMZ_REGION = "us-east-1";
|
||||||
|
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
|
||||||
|
|
||||||
|
async function listModels() {
|
||||||
|
const httpRequest = new HttpRequest({
|
||||||
|
method: "GET",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: AMZ_HOST,
|
||||||
|
path: "/foundation-models",
|
||||||
|
headers: { ["Host"]: AMZ_HOST },
|
||||||
|
});
|
||||||
|
|
||||||
|
const signedRequest = await signRequest(httpRequest);
|
||||||
|
const response = await axios.get(
|
||||||
|
`https://${signedRequest.hostname}${signedRequest.path}`,
|
||||||
|
{ headers: signedRequest.headers }
|
||||||
|
);
|
||||||
|
console.log(response.data);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function invokeModel() {
|
||||||
|
const model = "anthropic.claude-v1";
|
||||||
|
const httpRequest = new HttpRequest({
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: AMZ_HOST,
|
||||||
|
path: `/model/${model}/invoke`,
|
||||||
|
headers: {
|
||||||
|
["Host"]: AMZ_HOST,
|
||||||
|
["accept"]: "*/*",
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
temperature: 0.5,
|
||||||
|
prompt: "\n\nHuman:Hello world\n\nAssistant:",
|
||||||
|
max_tokens_to_sample: 10,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
console.log("httpRequest", httpRequest);
|
||||||
|
|
||||||
|
const signedRequest = await signRequest(httpRequest);
|
||||||
|
const response = await axios.post(
|
||||||
|
`https://${signedRequest.hostname}${signedRequest.path}`,
|
||||||
|
signedRequest.body,
|
||||||
|
{ headers: signedRequest.headers }
|
||||||
|
);
|
||||||
|
console.log(response.status);
|
||||||
|
console.log(response.headers);
|
||||||
|
console.log(response.data);
|
||||||
|
console.log("full url", response.request.res.responseUrl);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function signRequest(request: HttpRequest) {
|
||||||
|
const signer = new SignatureV4({
|
||||||
|
sha256: Sha256,
|
||||||
|
credentials: {
|
||||||
|
accessKeyId: AWS_ACCESS_KEY_ID,
|
||||||
|
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
||||||
|
},
|
||||||
|
region: AMZ_REGION,
|
||||||
|
service: "bedrock",
|
||||||
|
});
|
||||||
|
return await signer.sign(request, { signingDate: new Date() });
|
||||||
|
}
|
||||||
|
|
||||||
|
// listModels();
|
||||||
|
// invokeModel();
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
const axios = require("axios");
|
||||||
|
|
||||||
|
function randomInteger(max) {
|
||||||
|
return Math.floor(Math.random() * max + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function testQueue() {
|
||||||
|
const requests = Array(10).fill(undefined).map(async function() {
|
||||||
|
const maxTokens = randomInteger(2000);
|
||||||
|
|
||||||
|
const headers = {
|
||||||
|
"Authorization": "Bearer test",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
|
||||||
|
};
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
model: "gpt-4o-mini-2024-07-18",
|
||||||
|
max_tokens: 20 + maxTokens,
|
||||||
|
stream: false,
|
||||||
|
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
|
||||||
|
temperature: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.post(
|
||||||
|
"http://localhost:7860/proxy/openai/v1/chat/completions",
|
||||||
|
payload,
|
||||||
|
{ headers }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (response.status !== 200) {
|
||||||
|
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = response.data.choices[0].message.content;
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Request ${maxTokens} `,
|
||||||
|
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
const msg = error.response;
|
||||||
|
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
await Promise.all(requests);
|
||||||
|
console.log("All requests finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
testQueue();
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import { Router } from "express";
|
import { Router } from "express";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
import { encodeCursor, decodeCursor } from "../../shared/utils";
|
import { encodeCursor, decodeCursor } from "../../shared/utils";
|
||||||
import { eventsRepo } from "../../shared/database/repos/events";
|
import { eventsRepo } from "../../shared/database/repos/event";
|
||||||
|
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import { Router } from "express";
|
|||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
import * as userStore from "../../shared/users/user-store";
|
import * as userStore from "../../shared/users/user-store";
|
||||||
import { parseSort, sortBy } from "../../shared/utils";
|
import { parseSort, sortBy } from "../../shared/utils";
|
||||||
import { UserPartialSchema, UserSchema } from "../../shared/database/repos/users";
|
import { UserPartialSchema, UserSchema } from "../../shared/users/schema";
|
||||||
|
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import { eventsApiRouter } from "./api/events";
|
|||||||
import { usersApiRouter } from "./api/users";
|
import { usersApiRouter } from "./api/users";
|
||||||
import { usersWebRouter as webRouter } from "./web/manage";
|
import { usersWebRouter as webRouter } from "./web/manage";
|
||||||
import { logger } from "../logger";
|
import { logger } from "../logger";
|
||||||
|
import { keyPool } from "../shared/key-management";
|
||||||
|
|
||||||
const adminRouter = Router();
|
const adminRouter = Router();
|
||||||
|
|
||||||
@@ -36,6 +37,43 @@ adminRouter.use(injectCsrfToken);
|
|||||||
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
|
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
|
||||||
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
|
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
|
||||||
|
|
||||||
|
// Special endpoint to validate organization verification status for all OpenAI keys
|
||||||
|
// This checks both gpt-image-1 and o3 streaming access which require verified organizations
|
||||||
|
adminRouter.post("/validate-gpt-image-keys", authorize({ via: "header" }), async (req, res) => {
|
||||||
|
try {
|
||||||
|
logger.info("Manual validation of organization verification status initiated");
|
||||||
|
|
||||||
|
// Use the specialized validation function that tests each key's organization verification
|
||||||
|
// status using o3 streaming and waits for the results
|
||||||
|
const results = await keyPool.validateGptImageAccess();
|
||||||
|
|
||||||
|
logger.info({
|
||||||
|
total: results.total,
|
||||||
|
verified: results.verified.length,
|
||||||
|
removed: results.removed.length,
|
||||||
|
errors: results.errors.length
|
||||||
|
}, "Manual organization verification check completed");
|
||||||
|
|
||||||
|
return res.json({
|
||||||
|
success: true,
|
||||||
|
message: "Organization verification check completed",
|
||||||
|
results: {
|
||||||
|
total: results.total,
|
||||||
|
verified: results.verified.length,
|
||||||
|
removed: results.removed.length,
|
||||||
|
errors: results.errors.length,
|
||||||
|
// Only include hashes, not full keys
|
||||||
|
verified_keys: results.verified,
|
||||||
|
removed_keys: results.removed,
|
||||||
|
error_details: results.errors
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error({ error }, "Error validating organization verification status for OpenAI keys");
|
||||||
|
return res.status(500).json({ error: "Failed to validate keys", details: error.message });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
adminRouter.use(checkCsrfToken);
|
adminRouter.use(checkCsrfToken);
|
||||||
adminRouter.use(injectLocals);
|
adminRouter.use(injectLocals);
|
||||||
adminRouter.use("/", loginRouter);
|
adminRouter.use("/", loginRouter);
|
||||||
|
|||||||
+101
-24
@@ -9,10 +9,15 @@ import { parseSort, sortBy, paginate } from "../../shared/utils";
|
|||||||
import { keyPool } from "../../shared/key-management";
|
import { keyPool } from "../../shared/key-management";
|
||||||
import { LLMService, MODEL_FAMILIES } from "../../shared/models";
|
import { LLMService, MODEL_FAMILIES } from "../../shared/models";
|
||||||
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
|
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
|
||||||
|
import {
|
||||||
|
User,
|
||||||
|
UserPartialSchema,
|
||||||
|
UserSchema,
|
||||||
|
UserTokenCounts,
|
||||||
|
} from "../../shared/users/schema";
|
||||||
import { getLastNImages } from "../../shared/file-storage/image-history";
|
import { getLastNImages } from "../../shared/file-storage/image-history";
|
||||||
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
|
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
|
||||||
import { invalidatePowHmacKey } from "../../user/web/pow-captcha";
|
import { invalidatePowChallenges } from "../../user/web/pow-captcha";
|
||||||
import { User, UserPartialSchema, UserSchema, UserTokenCounts } from "../../shared/database/repos/users";
|
|
||||||
|
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
@@ -127,10 +132,11 @@ router.post("/create-user", (req, res) => {
|
|||||||
)
|
)
|
||||||
.transform((data: any) => {
|
.transform((data: any) => {
|
||||||
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
|
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
|
||||||
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
|
const tokenLimits = MODEL_FAMILIES.reduce((limits, modelFamily) => {
|
||||||
limits[model] = data[`temporaryUserQuota_${model}`];
|
const quotaValue = data[`temporaryUserQuota_${modelFamily}`];
|
||||||
|
limits[modelFamily] = typeof quotaValue === 'number' ? quotaValue : 0;
|
||||||
return limits;
|
return limits;
|
||||||
}, {} as UserTokenCounts);
|
}, {} as any);
|
||||||
return { ...data, expiresAt, tokenLimits };
|
return { ...data, expiresAt, tokenLimits };
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -184,7 +190,70 @@ router.post("/import-users", upload.single("users"), (req, res) => {
|
|||||||
if (!req.file) throw new HttpError(400, "No file uploaded");
|
if (!req.file) throw new HttpError(400, "No file uploaded");
|
||||||
|
|
||||||
const data = JSON.parse(req.file.buffer.toString());
|
const data = JSON.parse(req.file.buffer.toString());
|
||||||
const result = z.array(UserPartialSchema).safeParse(data.users);
|
|
||||||
|
// Transform old token count format to new format
|
||||||
|
const transformedUsers = data.users.map((user: any) => {
|
||||||
|
if (user.tokenCounts) {
|
||||||
|
const transformedTokenCounts: any = {};
|
||||||
|
for (const [family, value] of Object.entries(user.tokenCounts)) {
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
// Old format: just a number (legacy_total)
|
||||||
|
transformedTokenCounts[family] = {
|
||||||
|
input: 0,
|
||||||
|
output: 0,
|
||||||
|
legacy_total: value
|
||||||
|
};
|
||||||
|
} else if (typeof value === 'object' && value !== null) {
|
||||||
|
// New format or partially new format
|
||||||
|
const transformedCounts: { input: number; output: number; legacy_total?: number } = {
|
||||||
|
input: (value as any).input || 0,
|
||||||
|
output: (value as any).output || 0
|
||||||
|
};
|
||||||
|
if ((value as any).legacy_total !== undefined) {
|
||||||
|
transformedCounts.legacy_total = (value as any).legacy_total;
|
||||||
|
}
|
||||||
|
transformedTokenCounts[family] = transformedCounts;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
user.tokenCounts = transformedTokenCounts;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle tokenLimits - should be flat numbers
|
||||||
|
if (user.tokenLimits) {
|
||||||
|
const transformedTokenLimits: any = {};
|
||||||
|
for (const [family, value] of Object.entries(user.tokenLimits)) {
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
// Already in correct format
|
||||||
|
transformedTokenLimits[family] = value;
|
||||||
|
} else if (typeof value === 'object' && value !== null) {
|
||||||
|
// Old format with input/output/legacy_total - sum them up
|
||||||
|
const val = value as any;
|
||||||
|
transformedTokenLimits[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
user.tokenLimits = transformedTokenLimits;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle tokenRefresh - should be flat numbers
|
||||||
|
if (user.tokenRefresh) {
|
||||||
|
const transformedTokenRefresh: any = {};
|
||||||
|
for (const [family, value] of Object.entries(user.tokenRefresh)) {
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
// Already in correct format
|
||||||
|
transformedTokenRefresh[family] = value;
|
||||||
|
} else if (typeof value === 'object' && value !== null) {
|
||||||
|
// Old format with input/output/legacy_total - sum them up
|
||||||
|
const val = value as any;
|
||||||
|
transformedTokenRefresh[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
user.tokenRefresh = transformedTokenRefresh;
|
||||||
|
}
|
||||||
|
|
||||||
|
return user;
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = z.array(UserPartialSchema).safeParse(transformedUsers);
|
||||||
if (!result.success) throw new HttpError(400, result.error.toString());
|
if (!result.success) throw new HttpError(400, result.error.toString());
|
||||||
|
|
||||||
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
||||||
@@ -263,7 +332,14 @@ router.post("/maintenance", (req, res) => {
|
|||||||
let flash = { type: "", message: "" };
|
let flash = { type: "", message: "" };
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case "recheck": {
|
case "recheck": {
|
||||||
const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
|
const checkable: LLMService[] = [
|
||||||
|
"openai",
|
||||||
|
"anthropic",
|
||||||
|
"aws",
|
||||||
|
"gcp",
|
||||||
|
"azure",
|
||||||
|
"google-ai"
|
||||||
|
];
|
||||||
checkable.forEach((s) => keyPool.recheck(s));
|
checkable.forEach((s) => keyPool.recheck(s));
|
||||||
const keyCount = keyPool
|
const keyCount = keyPool
|
||||||
.list()
|
.list()
|
||||||
@@ -312,7 +388,7 @@ router.post("/maintenance", (req, res) => {
|
|||||||
user.disabledReason = "Admin forced expiration.";
|
user.disabledReason = "Admin forced expiration.";
|
||||||
userStore.upsertUser(user);
|
userStore.upsertUser(user);
|
||||||
});
|
});
|
||||||
invalidatePowHmacKey();
|
invalidatePowChallenges();
|
||||||
flash.type = "success";
|
flash.type = "success";
|
||||||
flash.message = `${temps.length} temporary users marked for expiration.`;
|
flash.message = `${temps.length} temporary users marked for expiration.`;
|
||||||
break;
|
break;
|
||||||
@@ -333,24 +409,20 @@ router.post("/maintenance", (req, res) => {
|
|||||||
case "setDifficulty": {
|
case "setDifficulty": {
|
||||||
const selected = req.body["pow-difficulty"];
|
const selected = req.body["pow-difficulty"];
|
||||||
const valid = ["low", "medium", "high", "extreme"];
|
const valid = ["low", "medium", "high", "extreme"];
|
||||||
if (!selected || !valid.includes(selected)) {
|
const isNumber = Number.isInteger(Number(selected));
|
||||||
throw new HttpError(400, "Invalid difficulty" + selected);
|
if (!selected || !valid.includes(selected) && !isNumber) {
|
||||||
|
throw new HttpError(400, "Invalid difficulty " + selected);
|
||||||
}
|
}
|
||||||
config.powDifficultyLevel = selected;
|
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
|
||||||
|
invalidatePowChallenges();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case "generateTempIpReport": {
|
case "generateTempIpReport": {
|
||||||
const tempUsers = userStore
|
const tempUsers = userStore
|
||||||
.getUsers()
|
.getUsers()
|
||||||
.filter((u) => u.type === "temporary");
|
.filter((u) => u.type === "temporary");
|
||||||
const ipv4RangeMap: Map<string, Set<string>> = new Map<
|
const ipv4RangeMap = new Map<string, Set<string>>();
|
||||||
string,
|
const ipv6RangeMap = new Map<string, Set<string>>();
|
||||||
Set<string>
|
|
||||||
>();
|
|
||||||
const ipv6RangeMap: Map<string, Set<string>> = new Map<
|
|
||||||
string,
|
|
||||||
Set<string>
|
|
||||||
>();
|
|
||||||
|
|
||||||
tempUsers.forEach((u) => {
|
tempUsers.forEach((u) => {
|
||||||
u.ip.forEach((ip) => {
|
u.ip.forEach((ip) => {
|
||||||
@@ -360,14 +432,14 @@ router.post("/maintenance", (req, res) => {
|
|||||||
const subnet =
|
const subnet =
|
||||||
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
|
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
|
||||||
".0/24";
|
".0/24";
|
||||||
const userSet = ipv4RangeMap.get(subnet) || new Set<string>();
|
const userSet = ipv4RangeMap.get(subnet) || new Set();
|
||||||
userSet.add(u.token);
|
userSet.add(u.token);
|
||||||
ipv4RangeMap.set(subnet, userSet);
|
ipv4RangeMap.set(subnet, userSet);
|
||||||
} else if (parsed.kind() === "ipv6") {
|
} else if (parsed.kind() === "ipv6") {
|
||||||
const subnet =
|
const subnet =
|
||||||
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
|
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
|
||||||
"::/48";
|
"::/48";
|
||||||
const userSet = ipv6RangeMap.get(subnet) || new Set<string>();
|
const userSet = ipv6RangeMap.get(subnet) || new Set();
|
||||||
userSet.add(u.token);
|
userSet.add(u.token);
|
||||||
ipv6RangeMap.set(subnet, userSet);
|
ipv6RangeMap.set(subnet, userSet);
|
||||||
}
|
}
|
||||||
@@ -539,9 +611,14 @@ router.post("/generate-stats", (req, res) => {
|
|||||||
function getSumsForUser(user: User) {
|
function getSumsForUser(user: User) {
|
||||||
const sums = MODEL_FAMILIES.reduce(
|
const sums = MODEL_FAMILIES.reduce(
|
||||||
(s, model) => {
|
(s, model) => {
|
||||||
const tokens = user.tokenCounts[model] ?? 0;
|
const counts = user.tokenCounts[model] ?? { input: 0, output: 0 };
|
||||||
s.sumTokens += tokens;
|
// Ensure inputTokens and outputTokens are numbers, defaulting to 0 if NaN or undefined
|
||||||
s.sumCost += getTokenCostUsd(model, tokens);
|
const inputTokens = Number(counts.input) || 0;
|
||||||
|
const outputTokens = Number(counts.output) || 0;
|
||||||
|
// We could also consider legacy_total here if input and output are 0
|
||||||
|
// For now, sumTokens and sumCost will be based on current input/output.
|
||||||
|
s.sumTokens += inputTokens + outputTokens;
|
||||||
|
s.sumCost += getTokenCostUsd(model, inputTokens, outputTokens);
|
||||||
return s;
|
return s;
|
||||||
},
|
},
|
||||||
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
|
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
|
||||||
|
|||||||
@@ -38,15 +38,20 @@
|
|||||||
<h3>Difficulty Level</h3>
|
<h3>Difficulty Level</h3>
|
||||||
<div>
|
<div>
|
||||||
<label for="difficulty">Difficulty Level:</label>
|
<label for="difficulty">Difficulty Level:</label>
|
||||||
<span id="currentDifficulty">Current: <%= difficulty %></span>
|
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
|
||||||
<select name="difficulty" id="difficulty">
|
|
||||||
<option value="low">Low</option>
|
<option value="low">Low</option>
|
||||||
<option value="medium">Medium</option>
|
<option value="medium">Medium</option>
|
||||||
<option value="high">High</option>
|
<option value="high">High</option>
|
||||||
<option value="extreme">Extreme</option>
|
<option value="extreme">Extreme</option>
|
||||||
|
<option value="custom">Custom</option>
|
||||||
</select>
|
</select>
|
||||||
|
<div id="custom-difficulty-container" style="display: none">
|
||||||
|
<label for="customDifficulty">Hashes required (average):</label>
|
||||||
|
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
|
||||||
|
</div>
|
||||||
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
|
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
|
||||||
</div>
|
</div>
|
||||||
|
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
|
||||||
<% } %>
|
<% } %>
|
||||||
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
|
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
|
||||||
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||||
@@ -63,15 +68,15 @@
|
|||||||
<div>
|
<div>
|
||||||
<h2>IP Whitelists and Blacklists</h2>
|
<h2>IP Whitelists and Blacklists</h2>
|
||||||
<p>
|
<p>
|
||||||
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Note that changes here are not
|
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
|
||||||
persisted across server restarts. If you want to make changes permanent, you can copy the values to your deployment
|
addresses or
|
||||||
configuration.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Entries can be specified as single addresses or
|
|
||||||
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
|
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
|
||||||
supported but not recommended for use with the current version of the proxy.
|
supported but not recommended for use with the current version of the proxy.
|
||||||
</p>
|
</p>
|
||||||
|
<p>
|
||||||
|
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
|
||||||
|
you can copy the values to your deployment configuration.
|
||||||
|
</p>
|
||||||
<% for (let i = 0; i < whitelists.length; i++) { %>
|
<% for (let i = 0; i < whitelists.length; i++) { %>
|
||||||
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
|
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
|
||||||
<% } %>
|
<% } %>
|
||||||
@@ -99,10 +104,25 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
|
function difficultyChanged(event) {
|
||||||
|
const value = event.target.value;
|
||||||
|
if (value === "custom") {
|
||||||
|
document.getElementById("custom-difficulty-container").style.display = "block";
|
||||||
|
} else {
|
||||||
|
document.getElementById("custom-difficulty-container").style.display = "none";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function doAction(action) {
|
function doAction(action) {
|
||||||
document.getElementById("hiddenAction").value = action;
|
document.getElementById("hiddenAction").value = action;
|
||||||
if (action === "setDifficulty") {
|
if (action === "setDifficulty") {
|
||||||
document.getElementById("hiddenDifficulty").value = document.getElementById("difficulty").value;
|
const selected = document.getElementById("difficulty").value;
|
||||||
|
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
|
||||||
|
if (selected === "custom") {
|
||||||
|
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
|
||||||
|
} else {
|
||||||
|
hiddenDifficulty.value = selected;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
document.getElementById("maintenanceForm").submit();
|
document.getElementById("maintenanceForm").submit();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,13 +18,19 @@
|
|||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<code>tokenCounts</code> (optional): the number of tokens the user has
|
<code>tokenCounts</code> (optional): the number of tokens the user has
|
||||||
consumed. This should be an object with keys <code>turbo</code>,
|
consumed. This should be an object with model family keys (e.g. <code>turbo</code>,
|
||||||
<code>gpt4</code>, and <code>claude</code>.
|
<code>gpt4</code>, <code>claude</code>), each containing an object with
|
||||||
|
<code>input</code> and <code>output</code> token counts.
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<code>tokenLimits</code> (optional): the number of tokens the user can
|
<code>tokenLimits</code> (optional): the maximum number of tokens the user can
|
||||||
consume. This should be an object with keys <code>turbo</code>,
|
consume. This should be an object with model family keys (e.g. <code>turbo</code>,
|
||||||
<code>gpt4</code>, and <code>claude</code>.
|
<code>gpt4</code>, <code>claude</code>), each containing a single number
|
||||||
|
representing the total token quota.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>tokenRefresh</code> (optional): the amount of tokens to refresh when quotas
|
||||||
|
are reset. Same format as <code>tokenLimits</code>.
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<code>createdAt</code> (optional): the timestamp when the user was created
|
<code>createdAt</code> (optional): the timestamp when the user was created
|
||||||
|
|||||||
@@ -43,7 +43,7 @@
|
|||||||
<legend>Bulk Quota Management</legend>
|
<legend>Bulk Quota Management</legend>
|
||||||
<p>
|
<p>
|
||||||
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
|
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
|
||||||
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
|
Immediately refreshes all users' quotas by the configured amounts.
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
|
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
|
||||||
|
|||||||
@@ -101,6 +101,10 @@
|
|||||||
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
|
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
|
||||||
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
|
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
|
||||||
<% }); %>
|
<% }); %>
|
||||||
|
<!-- tokenRefresh_ keys are dynamically generated -->
|
||||||
|
<% Object.entries(quota).forEach(([family]) => { %>
|
||||||
|
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
|
||||||
|
<% }); %>
|
||||||
</form>
|
</form>
|
||||||
|
|
||||||
<h3>Quota Information</h3>
|
<h3>Quota Information</h3>
|
||||||
@@ -111,7 +115,7 @@
|
|||||||
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
|
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
|
||||||
</form>
|
</form>
|
||||||
<% } %>
|
<% } %>
|
||||||
<%- include("partials/shared_quota-info", { quota, user }) %>
|
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
|
||||||
|
|
||||||
<p><a href="/admin/manage/list-users">Back to User List</a></p>
|
<p><a href="/admin/manage/list-users">Back to User List</a></p>
|
||||||
|
|
||||||
@@ -122,18 +126,25 @@
|
|||||||
const token = a.dataset.token;
|
const token = a.dataset.token;
|
||||||
const field = a.dataset.field;
|
const field = a.dataset.field;
|
||||||
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
|
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
|
||||||
let value = prompt(`Enter new value for '${field}'':`, existingValue);
|
|
||||||
|
let value = prompt(`Enter new value for '${field}':`, existingValue);
|
||||||
if (value !== null) {
|
if (value !== null) {
|
||||||
if (value === "") {
|
if (value === "") {
|
||||||
value = null;
|
value = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
|
||||||
|
if (field.startsWith("tokenRefresh_")) {
|
||||||
|
const family = field.slice("tokenRefresh_".length);
|
||||||
|
payload.tokenRefresh = { [family]: Number(value) };
|
||||||
|
} else {
|
||||||
|
payload[field] = value;
|
||||||
|
}
|
||||||
|
|
||||||
fetch(`/admin/manage/edit-user/${token}`, {
|
fetch(`/admin/manage/edit-user/${token}`, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
credentials: "same-origin",
|
credentials: "same-origin",
|
||||||
body: JSON.stringify({
|
body: JSON.stringify(payload),
|
||||||
[field]: value,
|
|
||||||
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
|
|
||||||
}),
|
|
||||||
headers: { "Content-Type": "application/json", Accept: "application/json" },
|
headers: { "Content-Type": "application/json", Accept: "application/json" },
|
||||||
})
|
})
|
||||||
.then((res) => Promise.all([res.ok, res.json()]))
|
.then((res) => Promise.all([res.ok, res.json()]))
|
||||||
@@ -141,9 +152,7 @@
|
|||||||
const url = new URL(window.location.href);
|
const url = new URL(window.location.href);
|
||||||
const params = new URLSearchParams();
|
const params = new URLSearchParams();
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
params.set("flash", `error: ${json.error.message}`);
|
alert(`Failed to edit user: ${json.message}`);
|
||||||
} else {
|
|
||||||
params.set("flash", `success: User's ${field} updated.`);
|
|
||||||
}
|
}
|
||||||
url.search = params.toString();
|
url.search = params.toString();
|
||||||
window.location.assign(url);
|
window.location.assign(url);
|
||||||
|
|||||||
+205
-72
@@ -3,7 +3,7 @@ import dotenv from "dotenv";
|
|||||||
import type firebase from "firebase-admin";
|
import type firebase from "firebase-admin";
|
||||||
import path from "path";
|
import path from "path";
|
||||||
import pino from "pino";
|
import pino from "pino";
|
||||||
import type { ModelFamily } from "./shared/models";
|
import type { LLMService, ModelFamily } from "./shared/models";
|
||||||
import { MODEL_FAMILIES } from "./shared/models";
|
import { MODEL_FAMILIES } from "./shared/models";
|
||||||
|
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
@@ -29,10 +29,40 @@ type Config = {
|
|||||||
* same but the APIs are different. Vertex is the GCP product for enterprise.
|
* same but the APIs are different. Vertex is the GCP product for enterprise.
|
||||||
**/
|
**/
|
||||||
googleAIKey?: string;
|
googleAIKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Google AI experimental model names that are
|
||||||
|
* allowed to bypass the experimental model block. By default, all models
|
||||||
|
* containing "exp" are blocked, but specific models listed here will be
|
||||||
|
* permitted.
|
||||||
|
*
|
||||||
|
* @example "gemini-2.0-flash-exp,gemini-exp-1206"
|
||||||
|
*/
|
||||||
|
allowedExpModels?: string;
|
||||||
/**
|
/**
|
||||||
* Comma-delimited list of Mistral AI API keys.
|
* Comma-delimited list of Mistral AI API keys.
|
||||||
*/
|
*/
|
||||||
mistralAIKey?: string;
|
mistralAIKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Deepseek API keys.
|
||||||
|
*/
|
||||||
|
deepseekKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Xai (Grok) API keys.
|
||||||
|
*/
|
||||||
|
xaiKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Cohere API keys.
|
||||||
|
*/
|
||||||
|
cohereKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Qwen API keys.
|
||||||
|
*/
|
||||||
|
qwenKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Moonshot API keys.
|
||||||
|
*/
|
||||||
|
moonshotKey?: string;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Comma-delimited list of AWS credentials. Each credential item should be a
|
* Comma-delimited list of AWS credentials. Each credential item should be a
|
||||||
* colon-delimited list of access key, secret key, and AWS region.
|
* colon-delimited list of access key, secret key, and AWS region.
|
||||||
@@ -45,6 +75,13 @@ type Config = {
|
|||||||
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
|
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
|
||||||
*/
|
*/
|
||||||
awsCredentials?: string;
|
awsCredentials?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of GCP credentials. Each credential item should be a
|
||||||
|
* colon-delimited list of access key, secret key, and GCP region.
|
||||||
|
*
|
||||||
|
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
|
||||||
|
*/
|
||||||
|
gcpCredentials?: string;
|
||||||
/**
|
/**
|
||||||
* Comma-delimited list of Azure OpenAI credentials. Each credential item
|
* Comma-delimited list of Azure OpenAI credentials. Each credential item
|
||||||
* should be a colon-delimited list of Azure resource name, deployment ID, and
|
* should be a colon-delimited list of Azure resource name, deployment ID, and
|
||||||
@@ -66,11 +103,6 @@ type Config = {
|
|||||||
* management mode is set to 'user_token'.
|
* management mode is set to 'user_token'.
|
||||||
*/
|
*/
|
||||||
adminKey?: string;
|
adminKey?: string;
|
||||||
/**
|
|
||||||
* The password required to view the service info/status page. If not set, the
|
|
||||||
* info page will be publicly accessible.
|
|
||||||
*/
|
|
||||||
serviceInfoPassword?: string;
|
|
||||||
/**
|
/**
|
||||||
* Which user management mode to use.
|
* Which user management mode to use.
|
||||||
* - `none`: No user management. Proxy is open to all requests with basic
|
* - `none`: No user management. Proxy is open to all requests with basic
|
||||||
@@ -87,10 +119,14 @@ type Config = {
|
|||||||
* - `memory`: Users are stored in memory and are lost on restart (default)
|
* - `memory`: Users are stored in memory and are lost on restart (default)
|
||||||
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
|
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
|
||||||
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
|
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
|
||||||
|
* - `sqlite`: Users are stored in an SQLite database; requires
|
||||||
|
* `sqliteUserStorePath` to be set.
|
||||||
*/
|
*/
|
||||||
gatekeeperStore: "memory" | "firebase_rtdb";
|
gatekeeperStore: "memory" | "firebase_rtdb" | "sqlite";
|
||||||
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
|
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
|
||||||
firebaseRtdbUrl?: string;
|
firebaseRtdbUrl?: string;
|
||||||
|
/** Path to the SQLite database file for storing user data. */
|
||||||
|
sqliteUserStorePath?: string;
|
||||||
/**
|
/**
|
||||||
* Base64-encoded Firebase service account key if using the Firebase RTDB
|
* Base64-encoded Firebase service account key if using the Firebase RTDB
|
||||||
* store. Note that you should encode the *entire* JSON key file, not just the
|
* store. Note that you should encode the *entire* JSON key file, not just the
|
||||||
@@ -340,13 +376,18 @@ type Config = {
|
|||||||
*/
|
*/
|
||||||
allowOpenAIToolUsage?: boolean;
|
allowOpenAIToolUsage?: boolean;
|
||||||
/**
|
/**
|
||||||
* Whether to allow prompts containing images, for use with multimodal models.
|
* Which services will accept prompts containing images, for use with
|
||||||
* Avoid giving this to untrusted users, as they can submit illegal content.
|
* multimodal models. Users with `special` role are exempt from this
|
||||||
|
* restriction.
|
||||||
*
|
*
|
||||||
* Applies to GPT-4 Vision and Claude Vision. Users with `special` role are
|
* Do not enable this feature for untrusted users, as malicious users could
|
||||||
* exempt from this restriction.
|
* send images which violate your provider's terms of service or local laws.
|
||||||
|
*
|
||||||
|
* Defaults to no services, meaning image prompts are disabled. Use a comma-
|
||||||
|
* separated list. Available services are:
|
||||||
|
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure,xai
|
||||||
*/
|
*/
|
||||||
allowImagePrompts?: boolean;
|
allowedVisionServices: LLMService[];
|
||||||
/**
|
/**
|
||||||
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
|
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
|
||||||
* A leading slash is required.
|
* A leading slash is required.
|
||||||
@@ -366,6 +407,51 @@ type Config = {
|
|||||||
* Takes precedence over the adminWhitelist.
|
* Takes precedence over the adminWhitelist.
|
||||||
*/
|
*/
|
||||||
ipBlacklist: string[];
|
ipBlacklist: string[];
|
||||||
|
/**
|
||||||
|
* If set, pushes requests further back into the queue according to their
|
||||||
|
* token costs by factor*tokens*milliseconds (or more intuitively
|
||||||
|
* factor*thousands_of_tokens*seconds).
|
||||||
|
* Accepts floats.
|
||||||
|
*/
|
||||||
|
tokensPunishmentFactor: number;
|
||||||
|
/**
|
||||||
|
* Configuration for HTTP requests made by the proxy to other servers, such
|
||||||
|
* as when checking keys or forwarding users' requests to external services.
|
||||||
|
* If not set, all requests will be made using the default agent.
|
||||||
|
*
|
||||||
|
* If set, the proxy may make requests to other servers using the specified
|
||||||
|
* settings. This is useful if you wish to route users' requests through
|
||||||
|
* another proxy or VPN, or if you have multiple network interfaces and want
|
||||||
|
* to use a specific one for outgoing requests.
|
||||||
|
*/
|
||||||
|
httpAgent?: {
|
||||||
|
/**
|
||||||
|
* The name of the network interface to use. The first external IPv4 address
|
||||||
|
* belonging to this interface will be used for outgoing requests.
|
||||||
|
*/
|
||||||
|
interface?: string;
|
||||||
|
/**
|
||||||
|
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
|
||||||
|
* HTTPS. If not set, the proxy will be made using the default agent.
|
||||||
|
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
|
||||||
|
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
|
||||||
|
* - HTTP: `http://proxy-server-over-tcp.com:3128`
|
||||||
|
* - HTTPS: `https://proxy-server-over-tls.com:3129`
|
||||||
|
*
|
||||||
|
* **Note:** If your proxy server issues a certificate, you may need to set
|
||||||
|
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
|
||||||
|
* application will reject TLS connections.
|
||||||
|
*/
|
||||||
|
proxyUrl?: string;
|
||||||
|
};
|
||||||
|
/** URL for the image on the login page. Defaults to empty string (no image). */
|
||||||
|
loginImageUrl?: string;
|
||||||
|
/** Whether to enable the token-based login page for the service info page. Defaults to true. */
|
||||||
|
enableInfoPageLogin?: boolean;
|
||||||
|
/** Authentication mode for the service info page. (token | password) */
|
||||||
|
serviceInfoAuthMode: "token" | "password";
|
||||||
|
/** Password for the service info page if serviceInfoAuthMode is 'password'. */
|
||||||
|
serviceInfoPassword?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
// To change configs, create a file called .env in the root directory.
|
// To change configs, create a file called .env in the root directory.
|
||||||
@@ -375,13 +461,19 @@ export const config: Config = {
|
|||||||
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
|
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
|
||||||
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
||||||
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
|
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
|
||||||
|
qwenKey: getEnvWithDefault("QWEN_KEY", ""),
|
||||||
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
|
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
|
||||||
|
allowedExpModels: getEnvWithDefault("ALLOWED_EXP_MODELS", ""),
|
||||||
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
|
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
|
||||||
|
deepseekKey: getEnvWithDefault("DEEPSEEK_KEY", ""),
|
||||||
|
xaiKey: getEnvWithDefault("XAI_KEY", ""),
|
||||||
|
cohereKey: getEnvWithDefault("COHERE_KEY", ""),
|
||||||
|
moonshotKey: getEnvWithDefault("MOONSHOT_KEY", ""),
|
||||||
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
|
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
|
||||||
|
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
|
||||||
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
|
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
|
||||||
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
||||||
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
||||||
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
|
|
||||||
sqliteDataPath: getEnvWithDefault(
|
sqliteDataPath: getEnvWithDefault(
|
||||||
"SQLITE_DATA_PATH",
|
"SQLITE_DATA_PATH",
|
||||||
path.join(DATA_DIR, "database.sqlite")
|
path.join(DATA_DIR, "database.sqlite")
|
||||||
@@ -389,7 +481,11 @@ export const config: Config = {
|
|||||||
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
|
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
|
||||||
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
|
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
|
||||||
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
|
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
|
||||||
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
|
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory") as Config["gatekeeperStore"],
|
||||||
|
sqliteUserStorePath: getEnvWithDefault(
|
||||||
|
"SQLITE_USER_STORE_PATH",
|
||||||
|
path.join(DATA_DIR, "user-store.sqlite")
|
||||||
|
),
|
||||||
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
|
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
|
||||||
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
|
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
|
||||||
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
|
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
|
||||||
@@ -402,40 +498,23 @@ export const config: Config = {
|
|||||||
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
|
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
|
||||||
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
|
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
|
||||||
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
|
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
|
||||||
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
|
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 32768),
|
||||||
maxContextTokensAnthropic: getEnvWithDefault(
|
maxContextTokensAnthropic: getEnvWithDefault(
|
||||||
"MAX_CONTEXT_TOKENS_ANTHROPIC",
|
"MAX_CONTEXT_TOKENS_ANTHROPIC",
|
||||||
0
|
32768
|
||||||
),
|
),
|
||||||
maxOutputTokensOpenAI: getEnvWithDefault(
|
maxOutputTokensOpenAI: getEnvWithDefault(
|
||||||
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
|
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
|
||||||
400
|
1024
|
||||||
),
|
),
|
||||||
maxOutputTokensAnthropic: getEnvWithDefault(
|
maxOutputTokensAnthropic: getEnvWithDefault(
|
||||||
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
|
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
|
||||||
400
|
1024
|
||||||
|
),
|
||||||
|
allowedModelFamilies: getEnvWithDefault(
|
||||||
|
"ALLOWED_MODEL_FAMILIES",
|
||||||
|
getDefaultModelFamilies()
|
||||||
),
|
),
|
||||||
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
|
|
||||||
"turbo",
|
|
||||||
"gpt4",
|
|
||||||
"gpt4-32k",
|
|
||||||
"gpt4-turbo",
|
|
||||||
"gpt4o",
|
|
||||||
"claude",
|
|
||||||
"claude-opus",
|
|
||||||
"gemini-pro",
|
|
||||||
"mistral-tiny",
|
|
||||||
"mistral-small",
|
|
||||||
"mistral-medium",
|
|
||||||
"mistral-large",
|
|
||||||
"aws-claude",
|
|
||||||
"aws-claude-opus",
|
|
||||||
"azure-turbo",
|
|
||||||
"azure-gpt4",
|
|
||||||
"azure-gpt4-32k",
|
|
||||||
"azure-gpt4-turbo",
|
|
||||||
"azure-gpt4o",
|
|
||||||
]),
|
|
||||||
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
|
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
|
||||||
rejectMessage: getEnvWithDefault(
|
rejectMessage: getEnvWithDefault(
|
||||||
"REJECT_MESSAGE",
|
"REJECT_MESSAGE",
|
||||||
@@ -479,10 +558,23 @@ export const config: Config = {
|
|||||||
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
|
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
|
||||||
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
|
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
|
||||||
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
|
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
|
||||||
allowImagePrompts: getEnvWithDefault("ALLOW_IMAGE_PROMPTS", false),
|
allowedVisionServices: parseCsv(
|
||||||
|
getEnvWithDefault("ALLOWED_VISION_SERVICES", "")
|
||||||
|
) as LLMService[],
|
||||||
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
|
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
|
||||||
adminWhitelist: parseCsv(getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0")),
|
adminWhitelist: parseCsv(
|
||||||
|
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
|
||||||
|
),
|
||||||
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
|
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
|
||||||
|
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
|
||||||
|
httpAgent: {
|
||||||
|
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
|
||||||
|
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
|
||||||
|
},
|
||||||
|
loginImageUrl: getEnvWithDefault("LOGIN_IMAGE_URL", ""),
|
||||||
|
enableInfoPageLogin: getEnvWithDefault("ENABLE_INFO_PAGE_LOGIN", true),
|
||||||
|
serviceInfoAuthMode: getEnvWithDefault("SERVICE_INFO_AUTH_MODE", "token") as Config["serviceInfoAuthMode"],
|
||||||
|
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", undefined),
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
function generateSigningKey() {
|
function generateSigningKey() {
|
||||||
@@ -499,7 +591,10 @@ function generateSigningKey() {
|
|||||||
config.anthropicKey,
|
config.anthropicKey,
|
||||||
config.googleAIKey,
|
config.googleAIKey,
|
||||||
config.mistralAIKey,
|
config.mistralAIKey,
|
||||||
|
config.deepseekKey,
|
||||||
|
config.xaiKey,
|
||||||
config.awsCredentials,
|
config.awsCredentials,
|
||||||
|
config.gcpCredentials,
|
||||||
config.azureCredentials,
|
config.azureCredentials,
|
||||||
];
|
];
|
||||||
if (secrets.filter((s) => s).length === 0) {
|
if (secrets.filter((s) => s).length === 0) {
|
||||||
@@ -518,7 +613,7 @@ function generateSigningKey() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const signingKey = generateSigningKey();
|
const signingKey = generateSigningKey();
|
||||||
export const COOKIE_SECRET = signingKey;
|
export const SECRET_SIGNING_KEY = signingKey;
|
||||||
|
|
||||||
export async function assertConfigIsValid() {
|
export async function assertConfigIsValid() {
|
||||||
if (process.env.MODEL_RATE_LIMIT !== undefined) {
|
if (process.env.MODEL_RATE_LIMIT !== undefined) {
|
||||||
@@ -534,6 +629,17 @@ export async function assertConfigIsValid() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (process.env.ALLOW_IMAGE_PROMPTS === "true") {
|
||||||
|
const hasAllowedServices = config.allowedVisionServices.length > 0;
|
||||||
|
if (!hasAllowedServices) {
|
||||||
|
config.allowedVisionServices = ["openai", "anthropic"];
|
||||||
|
startupLogger.warn(
|
||||||
|
{ allowedVisionServices: config.allowedVisionServices },
|
||||||
|
"ALLOW_IMAGE_PROMPTS is deprecated. Use ALLOWED_VISION_SERVICES instead."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (config.promptLogging && !config.promptLoggingBackend) {
|
if (config.promptLogging && !config.promptLoggingBackend) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"Prompt logging is enabled but no backend is configured. Set PROMPT_LOGGING_BACKEND to 'google_sheets' or 'file'."
|
"Prompt logging is enabled but no backend is configured. Set PROMPT_LOGGING_BACKEND to 'google_sheets' or 'file'."
|
||||||
@@ -590,6 +696,41 @@ export async function assertConfigIsValid() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (config.gatekeeperStore === "sqlite" && !config.sqliteUserStorePath) {
|
||||||
|
throw new Error(
|
||||||
|
"SQLite user store requires `SQLITE_USER_STORE_PATH` to be set."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
|
||||||
|
delete config.httpAgent;
|
||||||
|
} else if (config.httpAgent) {
|
||||||
|
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
|
||||||
|
throw new Error(
|
||||||
|
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.enableInfoPageLogin) {
|
||||||
|
if (!["token", "password"].includes(config.serviceInfoAuthMode)) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid SERVICE_INFO_AUTH_MODE: ${config.serviceInfoAuthMode}. Must be 'token' or 'password'.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (config.serviceInfoAuthMode === "password" && !config.serviceInfoPassword) {
|
||||||
|
throw new Error(
|
||||||
|
"SERVICE_INFO_AUTH_MODE is 'password' but SERVICE_INFO_PASSWORD is not set."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// If service info login is token-based, gatekeeper must be 'user_token' mode for getUser() to be effective.
|
||||||
|
if (config.serviceInfoAuthMode === "token" && config.gatekeeper !== "user_token") {
|
||||||
|
throw new Error(
|
||||||
|
"SERVICE_INFO_AUTH_MODE is 'token' for info page login, but GATEKEEPER is not 'user_token'. User token authentication will not work."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure forks which add new secret-like config keys don't unwittingly expose
|
// Ensure forks which add new secret-like config keys don't unwittingly expose
|
||||||
// them to users.
|
// them to users.
|
||||||
for (const key of getKeys(config)) {
|
for (const key of getKeys(config)) {
|
||||||
@@ -603,15 +744,16 @@ export async function assertConfigIsValid() {
|
|||||||
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
|
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
await maybeInitializeFirebase();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Config keys that are masked on the info page, but not hidden as their
|
* Config keys that are masked on the info page, but not hidden as their
|
||||||
* presence may be relevant to the user due to privacy implications.
|
* presence may be relevant to the user due to privacy implications.
|
||||||
*/
|
*/
|
||||||
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
|
export const SENSITIVE_KEYS: (keyof Config)[] = [
|
||||||
|
"googleSheetsSpreadsheetId",
|
||||||
|
"httpAgent",
|
||||||
|
];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Config keys that are not displayed on the info page at all, generally because
|
* Config keys that are not displayed on the info page at all, generally because
|
||||||
@@ -624,12 +766,17 @@ export const OMITTED_KEYS = [
|
|||||||
"openaiKey",
|
"openaiKey",
|
||||||
"anthropicKey",
|
"anthropicKey",
|
||||||
"googleAIKey",
|
"googleAIKey",
|
||||||
|
"deepseekKey",
|
||||||
|
"xaiKey",
|
||||||
|
"cohereKey",
|
||||||
|
"qwenKey",
|
||||||
|
"moonshotKey",
|
||||||
"mistralAIKey",
|
"mistralAIKey",
|
||||||
"awsCredentials",
|
"awsCredentials",
|
||||||
|
"gcpCredentials",
|
||||||
"azureCredentials",
|
"azureCredentials",
|
||||||
"proxyKey",
|
"proxyKey",
|
||||||
"adminKey",
|
"adminKey",
|
||||||
"serviceInfoPassword",
|
|
||||||
"rejectPhrases",
|
"rejectPhrases",
|
||||||
"rejectMessage",
|
"rejectMessage",
|
||||||
"showTokenCosts",
|
"showTokenCosts",
|
||||||
@@ -638,6 +785,7 @@ export const OMITTED_KEYS = [
|
|||||||
"firebaseKey",
|
"firebaseKey",
|
||||||
"firebaseRtdbUrl",
|
"firebaseRtdbUrl",
|
||||||
"sqliteDataPath",
|
"sqliteDataPath",
|
||||||
|
"sqliteUserStorePath",
|
||||||
"eventLogging",
|
"eventLogging",
|
||||||
"eventLoggingTrim",
|
"eventLoggingTrim",
|
||||||
"gatekeeperStore",
|
"gatekeeperStore",
|
||||||
@@ -656,6 +804,9 @@ export const OMITTED_KEYS = [
|
|||||||
"adminWhitelist",
|
"adminWhitelist",
|
||||||
"ipBlacklist",
|
"ipBlacklist",
|
||||||
"powTokenPurgeHours",
|
"powTokenPurgeHours",
|
||||||
|
"loginImageUrl",
|
||||||
|
"enableInfoPageLogin",
|
||||||
|
"serviceInfoPassword",
|
||||||
] satisfies (keyof Config)[];
|
] satisfies (keyof Config)[];
|
||||||
type OmitKeys = (typeof OMITTED_KEYS)[number];
|
type OmitKeys = (typeof OMITTED_KEYS)[number];
|
||||||
|
|
||||||
@@ -716,7 +867,9 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
|||||||
"ANTHROPIC_KEY",
|
"ANTHROPIC_KEY",
|
||||||
"GOOGLE_AI_KEY",
|
"GOOGLE_AI_KEY",
|
||||||
"AWS_CREDENTIALS",
|
"AWS_CREDENTIALS",
|
||||||
|
"GCP_CREDENTIALS",
|
||||||
"AZURE_CREDENTIALS",
|
"AZURE_CREDENTIALS",
|
||||||
|
"QWEN_KEY",
|
||||||
].includes(String(env))
|
].includes(String(env))
|
||||||
) {
|
) {
|
||||||
return value as unknown as T;
|
return value as unknown as T;
|
||||||
@@ -733,32 +886,6 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let firebaseApp: firebase.app.App | undefined;
|
|
||||||
|
|
||||||
async function maybeInitializeFirebase() {
|
|
||||||
if (!config.gatekeeperStore.startsWith("firebase")) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const firebase = await import("firebase-admin");
|
|
||||||
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
|
|
||||||
const app = firebase.initializeApp({
|
|
||||||
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
|
|
||||||
databaseURL: config.firebaseRtdbUrl,
|
|
||||||
});
|
|
||||||
|
|
||||||
await app.database().ref("connection-test").set(Date.now());
|
|
||||||
|
|
||||||
firebaseApp = app;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getFirebaseApp(): firebase.app.App {
|
|
||||||
if (!firebaseApp) {
|
|
||||||
throw new Error("Firebase app not initialized.");
|
|
||||||
}
|
|
||||||
return firebaseApp;
|
|
||||||
}
|
|
||||||
|
|
||||||
function parseCsv(val: string): string[] {
|
function parseCsv(val: string): string[] {
|
||||||
if (!val) return [];
|
if (!val) return [];
|
||||||
|
|
||||||
@@ -766,3 +893,9 @@ function parseCsv(val: string): string[] {
|
|||||||
const matches = val.match(regex) || [];
|
const matches = val.match(regex) || [];
|
||||||
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
|
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getDefaultModelFamilies(): ModelFamily[] {
|
||||||
|
return MODEL_FAMILIES.filter(
|
||||||
|
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
|
||||||
|
) as ModelFamily[];
|
||||||
|
}
|
||||||
|
|||||||
+254
-130
@@ -1,4 +1,8 @@
|
|||||||
/** This whole module kinda sucks */
|
/* ──────────────────────────────────────────────────────────────
|
||||||
|
Login-gated info page
|
||||||
|
drop-in replacement for src/info-page.ts
|
||||||
|
──────────────────────────────────────────────────────────── */
|
||||||
|
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
import express, { Router, Request, Response } from "express";
|
import express, { Router, Request, Response } from "express";
|
||||||
import showdown from "showdown";
|
import showdown from "showdown";
|
||||||
@@ -8,41 +12,166 @@ import { getLastNImages } from "./shared/file-storage/image-history";
|
|||||||
import { keyPool } from "./shared/key-management";
|
import { keyPool } from "./shared/key-management";
|
||||||
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
|
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
|
||||||
import { withSession } from "./shared/with-session";
|
import { withSession } from "./shared/with-session";
|
||||||
import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
|
import { injectCsrfToken, checkCsrfToken } from "./shared/inject-csrf";
|
||||||
|
import { getUser } from "./shared/users/user-store";
|
||||||
|
|
||||||
|
/* ──────────────── TYPES: extend express-session ──────────── */
|
||||||
|
declare module "express-session" {
|
||||||
|
interface Session {
|
||||||
|
infoPageAuthed?: boolean;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ──────────────── misc constants ─────────────────────────── */
|
||||||
|
const INFO_PAGE_TTL = 2_000; // ms
|
||||||
|
const LOGIN_ROUTE = "/";
|
||||||
|
|
||||||
const INFO_PAGE_TTL = 2000;
|
|
||||||
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
|
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
|
||||||
turbo: "GPT-3.5 Turbo",
|
qwen: "Qwen",
|
||||||
|
cohere: "Cohere",
|
||||||
|
deepseek: "Deepseek",
|
||||||
|
xai: "Grok",
|
||||||
|
moonshot: "Moonshot",
|
||||||
|
turbo: "GPT-4o Mini / 3.5 Turbo",
|
||||||
gpt4: "GPT-4",
|
gpt4: "GPT-4",
|
||||||
"gpt4-32k": "GPT-4 32k",
|
"gpt4-32k": "GPT-4 32k",
|
||||||
"gpt4-turbo": "GPT-4 Turbo",
|
"gpt4-turbo": "GPT-4 Turbo",
|
||||||
gpt4o: "GPT-4o",
|
gpt4o: "GPT-4o",
|
||||||
|
gpt41: "GPT-4.1",
|
||||||
|
"gpt41-mini": "GPT-4.1 Mini",
|
||||||
|
"gpt41-nano": "GPT-4.1 Nano",
|
||||||
|
gpt5: "GPT-5",
|
||||||
|
"gpt5-mini": "GPT-5 Mini",
|
||||||
|
"gpt5-nano": "GPT-5 Nano",
|
||||||
|
"gpt5-chat-latest": "GPT-5 Chat Latest",
|
||||||
|
gpt45: "GPT-4.5",
|
||||||
|
o1: "OpenAI o1",
|
||||||
|
"o1-mini": "OpenAI o1 mini",
|
||||||
|
"o1-pro": "OpenAI o1 pro",
|
||||||
|
"o3-pro": "OpenAI o3 pro",
|
||||||
|
"o3-mini": "OpenAI o3 mini",
|
||||||
|
"o3": "OpenAI o3",
|
||||||
|
"o4-mini": "OpenAI o4 mini",
|
||||||
|
"codex-mini": "OpenAI Codex Mini",
|
||||||
"dall-e": "DALL-E",
|
"dall-e": "DALL-E",
|
||||||
|
"gpt-image": "GPT Image",
|
||||||
claude: "Claude (Sonnet)",
|
claude: "Claude (Sonnet)",
|
||||||
"claude-opus": "Claude (Opus)",
|
"claude-opus": "Claude (Opus)",
|
||||||
|
"gemini-flash": "Gemini Flash",
|
||||||
"gemini-pro": "Gemini Pro",
|
"gemini-pro": "Gemini Pro",
|
||||||
|
"gemini-ultra": "Gemini Ultra",
|
||||||
"mistral-tiny": "Mistral 7B",
|
"mistral-tiny": "Mistral 7B",
|
||||||
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
|
"mistral-small": "Mistral Nemo",
|
||||||
"mistral-medium": "Mistral Medium",
|
"mistral-medium": "Mistral Medium",
|
||||||
"mistral-large": "Mistral Large",
|
"mistral-large": "Mistral Large",
|
||||||
"aws-claude": "AWS Claude (Sonnet)",
|
"aws-claude": "AWS Claude (Sonnet)",
|
||||||
"aws-claude-opus": "AWS Claude (Opus)",
|
"aws-claude-opus": "AWS Claude (Opus)",
|
||||||
|
"aws-mistral-tiny": "AWS Mistral 7B",
|
||||||
|
"aws-mistral-small": "AWS Mistral Nemo",
|
||||||
|
"aws-mistral-medium": "AWS Mistral Medium",
|
||||||
|
"aws-mistral-large": "AWS Mistral Large",
|
||||||
|
"gcp-claude": "GCP Claude (Sonnet)",
|
||||||
|
"gcp-claude-opus": "GCP Claude (Opus)",
|
||||||
"azure-turbo": "Azure GPT-3.5 Turbo",
|
"azure-turbo": "Azure GPT-3.5 Turbo",
|
||||||
"azure-gpt4": "Azure GPT-4",
|
"azure-gpt4": "Azure GPT-4",
|
||||||
"azure-gpt4-32k": "Azure GPT-4 32k",
|
"azure-gpt4-32k": "Azure GPT-4 32k",
|
||||||
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
|
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
|
||||||
"azure-gpt4o": "Azure GPT-4o",
|
"azure-gpt4o": "Azure GPT-4o",
|
||||||
|
"azure-gpt45": "Azure GPT-4.5",
|
||||||
|
"azure-gpt41": "Azure GPT-4.1",
|
||||||
|
"azure-gpt41-mini": "Azure GPT-4.1 Mini",
|
||||||
|
"azure-gpt41-nano": "Azure GPT-4.1 Nano",
|
||||||
|
"azure-gpt5": "Azure GPT-5",
|
||||||
|
"azure-gpt5-mini": "Azure GPT-5 Mini",
|
||||||
|
"azure-gpt5-nano": "Azure GPT-5 Nano",
|
||||||
|
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
|
||||||
|
"azure-o1": "Azure o1",
|
||||||
|
"azure-o1-mini": "Azure o1 mini",
|
||||||
|
"azure-o1-pro": "Azure o1 pro",
|
||||||
|
"azure-o3-pro": "Azure o3 pro",
|
||||||
|
"azure-o3-mini": "Azure o3 mini",
|
||||||
|
"azure-o3": "Azure o3",
|
||||||
|
"azure-o4-mini": "Azure o4 mini",
|
||||||
|
"azure-codex-mini": "Azure Codex Mini",
|
||||||
"azure-dall-e": "Azure DALL-E",
|
"azure-dall-e": "Azure DALL-E",
|
||||||
|
"azure-gpt-image": "Azure GPT Image",
|
||||||
};
|
};
|
||||||
|
|
||||||
const converter = new showdown.Converter();
|
const converter = new showdown.Converter();
|
||||||
|
|
||||||
|
/* optional markdown greeting */
|
||||||
const customGreeting = fs.existsSync("greeting.md")
|
const customGreeting = fs.existsSync("greeting.md")
|
||||||
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
|
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
|
||||||
: "";
|
: "";
|
||||||
|
|
||||||
|
/* ──────────────── Login page ──────────────────────── */
|
||||||
|
function renderLoginPage(csrf: string, error?: string) {
|
||||||
|
const errBlock = error
|
||||||
|
? `<div class="error-message">${escapeHtml(error)}</div>`
|
||||||
|
: "";
|
||||||
|
const pageTitle = getServerTitle();
|
||||||
|
return `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>${pageTitle} – Login</title>
|
||||||
|
<style>
|
||||||
|
body{font-family:Arial, sans-serif;display:flex;justify-content:center;
|
||||||
|
align-items:center;height:100vh;margin:0;padding:20px;background:#f5f5f5;}
|
||||||
|
.login-container{background:#fff;border-radius:8px;box-shadow:0 4px 8px rgba(0,0,0,.1);
|
||||||
|
padding:30px;width:100%;max-width:400px;text-align:center;}
|
||||||
|
.logo-image{max-width:200px;margin-bottom:20px;}
|
||||||
|
.form-group{margin-bottom:20px;}
|
||||||
|
input[type=text], input[type=password]{width:100%;padding:10px;border:1px solid #ddd;border-radius:4px;
|
||||||
|
box-sizing:border-box;font-size:16px;}
|
||||||
|
button{background:#4caf50;color:#fff;border:none;padding:12px 20px;border-radius:4px;
|
||||||
|
cursor:pointer;font-size:16px;width:100%;}
|
||||||
|
button:hover{background:#45a049;}
|
||||||
|
.error-message{color:#f44336;margin-bottom:15px;}
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
body { background: #2c2c2c; color: #e0e0e0; }
|
||||||
|
.login-container { background: #383838; box-shadow: 0 4px 12px rgba(0,0,0,0.4); border: 1px solid #4a4a4a; }
|
||||||
|
input[type=text], input[type=password] { background: #4a4a4a; color: #e0e0e0; border: 1px solid #5a5a5a; }
|
||||||
|
input[type=text]::placeholder, input[type=password]::placeholder { color: #999; }
|
||||||
|
button { background: #007bff; } /* Using a blue for dark mode button */
|
||||||
|
button:hover { background: #0056b3; }
|
||||||
|
.error-message { color: #ff8a80; } /* Lighter red for errors in dark mode */
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="login-container">
|
||||||
|
${config.loginImageUrl ? `<img src="${config.loginImageUrl}" alt="Logo" class="logo-image">` : ''}
|
||||||
|
${errBlock}
|
||||||
|
<form method="POST" action="${LOGIN_ROUTE}">
|
||||||
|
<div class="form-group">
|
||||||
|
${config.serviceInfoAuthMode === "password"
|
||||||
|
? `<input type="password" id="password" name="password" required placeholder="Service Password">`
|
||||||
|
: `<input type="text" id="token" name="token" required placeholder="Your token">`}
|
||||||
|
<input type="hidden" name="_csrf" value="${csrf}">
|
||||||
|
</div>
|
||||||
|
<button type="submit">Access Dashboard</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ──────────────── login-required middleware ──────────────── */
|
||||||
|
function requireLogin(
|
||||||
|
req: Request,
|
||||||
|
res: Response,
|
||||||
|
next: express.NextFunction
|
||||||
|
) {
|
||||||
|
if (req.session?.infoPageAuthed) return next();
|
||||||
|
return res.send(renderLoginPage(res.locals.csrfToken));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ──────────────── INFO PAGE CACHING ──────────────────────── */
|
||||||
let infoPageHtml: string | undefined;
|
let infoPageHtml: string | undefined;
|
||||||
let infoPageLastUpdated = 0;
|
let infoPageLastUpdated = 0;
|
||||||
|
|
||||||
export const handleInfoPage = (req: Request, res: Response) => {
|
export function handleInfoPage(req: Request, res: Response) {
|
||||||
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
|
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
|
||||||
return res.send(infoPageHtml);
|
return res.send(infoPageHtml);
|
||||||
}
|
}
|
||||||
@@ -57,60 +186,46 @@ export const handleInfoPage = (req: Request, res: Response) => {
|
|||||||
infoPageLastUpdated = Date.now();
|
infoPageLastUpdated = Date.now();
|
||||||
|
|
||||||
res.send(infoPageHtml);
|
res.send(infoPageHtml);
|
||||||
};
|
}
|
||||||
|
|
||||||
|
/* ──────────────── RENDER FULL INFO PAGE ──────────────────── */
|
||||||
export function renderPage(info: ServiceInfo) {
|
export function renderPage(info: ServiceInfo) {
|
||||||
const title = getServerTitle();
|
const title = getServerTitle();
|
||||||
const headerHtml = buildInfoPageHeader(info);
|
const headerHtml = buildInfoPageHeader(info);
|
||||||
|
|
||||||
return `<!doctype html>
|
return `<!doctype html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8" />
|
<meta charset="utf-8" />
|
||||||
<meta name="robots" content="noindex" />
|
<meta name="robots" content="noindex" />
|
||||||
<title>${title}</title>
|
<title>${title}</title>
|
||||||
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
|
<link rel="stylesheet" href="/res/css/reset.css" />
|
||||||
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
|
<link rel="stylesheet" href="/res/css/sakura.css" />
|
||||||
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
|
<link rel="stylesheet" href="/res/css/sakura-dark.css"
|
||||||
<style>
|
media="screen and (prefers-color-scheme: dark)" />
|
||||||
body {
|
<style>
|
||||||
font-family: sans-serif;
|
body{font-family:sans-serif;padding:1em;max-width:900px;margin:0;}
|
||||||
padding: 1em;
|
.self-service-links{display:flex;justify-content:center;margin-bottom:1em;
|
||||||
max-width: 900px;
|
padding:0.5em;font-size:0.8em;}
|
||||||
margin: 0;
|
.self-service-links a{margin:0 0.5em;}
|
||||||
}
|
</style>
|
||||||
|
</head>
|
||||||
.self-service-links {
|
<body>
|
||||||
display: flex;
|
${headerHtml}
|
||||||
justify-content: center;
|
<hr/>
|
||||||
margin-bottom: 1em;
|
${getSelfServiceLinks()}
|
||||||
padding: 0.5em;
|
<h2>Service Info</h2>
|
||||||
font-size: 0.8em;
|
<pre>${JSON.stringify(info, null, 2)}</pre>
|
||||||
}
|
</body>
|
||||||
|
|
||||||
.self-service-links a {
|
|
||||||
margin: 0 0.5em;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
${headerHtml}
|
|
||||||
<hr />
|
|
||||||
${getSelfServiceLinks()}
|
|
||||||
<h2>Service Info</h2>
|
|
||||||
<pre>${JSON.stringify(info, null, 2)}</pre>
|
|
||||||
</body>
|
|
||||||
</html>`;
|
</html>`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* ──────────────── header & helper functions ──────────────── */
|
||||||
* If the server operator provides a `greeting.md` file, it will be included in
|
/* (all copied verbatim from original file) */
|
||||||
* the rendered info page.
|
|
||||||
**/
|
|
||||||
function buildInfoPageHeader(info: ServiceInfo) {
|
function buildInfoPageHeader(info: ServiceInfo) {
|
||||||
const title = getServerTitle();
|
const title = getServerTitle();
|
||||||
// TODO: use some templating engine instead of this mess
|
|
||||||
let infoBody = `# ${title}`;
|
let infoBody = `# ${title}`;
|
||||||
|
|
||||||
if (config.promptLogging) {
|
if (config.promptLogging) {
|
||||||
infoBody += `\n## Prompt Logging Enabled
|
infoBody += `\n## Prompt Logging Enabled
|
||||||
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
|
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
|
||||||
@@ -129,9 +244,9 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
|||||||
for (const modelFamily of config.allowedModelFamilies) {
|
for (const modelFamily of config.allowedModelFamilies) {
|
||||||
const service = MODEL_FAMILY_SERVICE[modelFamily];
|
const service = MODEL_FAMILY_SERVICE[modelFamily];
|
||||||
|
|
||||||
const hasKeys = keyPool.list().some((k) => {
|
const hasKeys = keyPool.list().some(
|
||||||
return k.service === service && k.modelFamilies.includes(modelFamily);
|
(k) => k.service === service && k.modelFamilies.includes(modelFamily)
|
||||||
});
|
);
|
||||||
|
|
||||||
const wait = info[modelFamily]?.estimatedQueueTime;
|
const wait = info[modelFamily]?.estimatedQueueTime;
|
||||||
if (hasKeys && wait) {
|
if (hasKeys && wait) {
|
||||||
@@ -142,9 +257,7 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
|||||||
}
|
}
|
||||||
|
|
||||||
infoBody += "\n\n" + waits.join(" / ");
|
infoBody += "\n\n" + waits.join(" / ");
|
||||||
|
|
||||||
infoBody += customGreeting;
|
infoBody += customGreeting;
|
||||||
|
|
||||||
infoBody += buildRecentImageSection();
|
infoBody += buildRecentImageSection();
|
||||||
|
|
||||||
return converter.makeHtml(infoBody);
|
return converter.makeHtml(infoBody);
|
||||||
@@ -152,63 +265,60 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
|||||||
|
|
||||||
function getSelfServiceLinks() {
|
function getSelfServiceLinks() {
|
||||||
if (config.gatekeeper !== "user_token") return "";
|
if (config.gatekeeper !== "user_token") return "";
|
||||||
|
|
||||||
const links = [["Check your user token", "/user/lookup"]];
|
const links = [["Check your user token", "/user/lookup"]];
|
||||||
if (config.captchaMode !== "none") {
|
if (config.captchaMode !== "none") {
|
||||||
links.unshift(["Request a user token", "/user/captcha"]);
|
links.unshift(["Request a user token", "/user/captcha"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return `<div class="self-service-links">${links
|
return `<div class="self-service-links">${links
|
||||||
.map(([text, link]) => `<a target="_blank" href="${link}">${text}</a>`)
|
.map(([t, l]) => `<a href="${l}">${t}</a>`)
|
||||||
.join(" | ")}</div>`;
|
.join(" | ")}</div>`;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getServerTitle() {
|
function getServerTitle() {
|
||||||
// Use manually set title if available
|
if (process.env.SERVER_TITLE) return process.env.SERVER_TITLE;
|
||||||
if (process.env.SERVER_TITLE) {
|
if (process.env.SPACE_ID)
|
||||||
return process.env.SERVER_TITLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Huggingface
|
|
||||||
if (process.env.SPACE_ID) {
|
|
||||||
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
|
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
|
||||||
}
|
if (process.env.RENDER)
|
||||||
|
|
||||||
// Render
|
|
||||||
if (process.env.RENDER) {
|
|
||||||
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
|
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
|
||||||
}
|
return "Tunnel";
|
||||||
|
|
||||||
return "OAI Reverse Proxy";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function buildRecentImageSection() {
|
function buildRecentImageSection() {
|
||||||
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
|
const imageModels: ModelFamily[] = [
|
||||||
|
"azure-dall-e",
|
||||||
|
"dall-e",
|
||||||
|
"gpt-image",
|
||||||
|
"azure-gpt-image",
|
||||||
|
];
|
||||||
|
// Condition 1: Is the feature enabled via config?
|
||||||
|
// Condition 2: Is at least one relevant image model family allowed in config?
|
||||||
if (
|
if (
|
||||||
!config.showRecentImages ||
|
!config.showRecentImages ||
|
||||||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
|
imageModels.every((f) => !config.allowedModelFamilies.includes(f))
|
||||||
) {
|
) {
|
||||||
|
return ""; // Exit if feature is disabled or no relevant models are allowed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Condition 3: Are there any actual images to display?
|
||||||
|
const recentImages = getLastNImages(12).reverse();
|
||||||
|
if (recentImages.length === 0) {
|
||||||
|
// If the feature is enabled and models are allowed, but no images exist,
|
||||||
|
// do not render the section, including its title.
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
let html = `<h2>Recent DALL-E Generations</h2>`;
|
// If all conditions pass (feature enabled, models allowed, images exist), build and return the HTML
|
||||||
const recentImages = getLastNImages(12).reverse();
|
let html = `<h2>Recent Image Generations</h2>`;
|
||||||
if (recentImages.length === 0) {
|
html += `<div style="display:flex;flex-wrap:wrap;" id="recent-images">`;
|
||||||
html += `<p>No images yet.</p>`;
|
|
||||||
return html;
|
|
||||||
}
|
|
||||||
|
|
||||||
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
|
|
||||||
for (const { url, prompt } of recentImages) {
|
for (const { url, prompt } of recentImages) {
|
||||||
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
|
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
|
||||||
const escapedPrompt = escapeHtml(prompt);
|
const escapedPrompt = escapeHtml(prompt);
|
||||||
html += `<div style="margin: 0.5em;" class="recent-image">
|
html += `<div style="margin:0.5em" class="recent-image">
|
||||||
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
|
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}"
|
||||||
</div>`;
|
alt="${escapedPrompt}" style="max-width:150px;max-height:150px;"/></a></div>`;
|
||||||
}
|
}
|
||||||
html += `</div>`;
|
html += `</div><p style="clear:both;text-align:center;">
|
||||||
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
|
<a href="/user/image-history">View all recent images</a></p>`;
|
||||||
|
|
||||||
return html;
|
return html;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,57 +333,71 @@ function escapeHtml(unsafe: string) {
|
|||||||
.replace(/]/g, "]");
|
.replace(/]/g, "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
|
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
|
||||||
try {
|
try {
|
||||||
const [username, spacename] = spaceId.split("/");
|
const [u, s] = spaceId.split("/");
|
||||||
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
|
return `https://${u}-${s.replace(/_/g, "-")}.hf.space`;
|
||||||
} catch (e) {
|
} catch {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkIfUnlocked(
|
/* ──────────────── ROUTER ─────────────────────────────────── */
|
||||||
req: Request,
|
|
||||||
res: Response,
|
|
||||||
next: express.NextFunction
|
|
||||||
) {
|
|
||||||
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
|
|
||||||
return res.redirect("/unlock-info");
|
|
||||||
}
|
|
||||||
next();
|
|
||||||
}
|
|
||||||
|
|
||||||
const infoPageRouter = Router();
|
const infoPageRouter = Router();
|
||||||
if (config.serviceInfoPassword?.length) {
|
|
||||||
infoPageRouter.use(
|
|
||||||
express.json({ limit: "1mb" }),
|
|
||||||
express.urlencoded({ extended: true, limit: "1mb" })
|
|
||||||
);
|
|
||||||
infoPageRouter.use(withSession);
|
|
||||||
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
|
|
||||||
infoPageRouter.post("/unlock-info", (req, res) => {
|
|
||||||
if (req.body.password !== config.serviceInfoPassword) {
|
|
||||||
return res.status(403).send("Incorrect password");
|
|
||||||
}
|
|
||||||
req.session!.unlocked = true;
|
|
||||||
res.redirect("/");
|
|
||||||
});
|
|
||||||
infoPageRouter.get("/unlock-info", (_req, res) => {
|
|
||||||
if (_req.session?.unlocked) return res.redirect("/");
|
|
||||||
|
|
||||||
res.send(`
|
infoPageRouter.use(
|
||||||
<form method="post" action="/unlock-info">
|
express.json({ limit: "1mb" }),
|
||||||
<h1>Unlock Service Info</h1>
|
express.urlencoded({ extended: true, limit: "1mb" }),
|
||||||
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
|
withSession,
|
||||||
<input type="password" name="password" placeholder="Password" />
|
injectCsrfToken,
|
||||||
<button type="submit">Unlock</button>
|
checkCsrfToken
|
||||||
</form>
|
);
|
||||||
`);
|
|
||||||
});
|
/* login attempt */
|
||||||
infoPageRouter.use(checkIfUnlocked);
|
infoPageRouter.post(LOGIN_ROUTE, (req, res) => {
|
||||||
}
|
if (config.serviceInfoAuthMode === "password") {
|
||||||
infoPageRouter.get("/", handleInfoPage);
|
const password = (req.body.password || "").trim();
|
||||||
infoPageRouter.get("/status", (req, res) => {
|
// Simple string comparison; for production, consider a timing-safe comparison library
|
||||||
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
|
if (config.serviceInfoPassword && password === config.serviceInfoPassword) {
|
||||||
|
req.session!.infoPageAuthed = true;
|
||||||
|
return res.redirect("/");
|
||||||
|
} else {
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.send(renderLoginPage(res.locals.csrfToken, "Invalid password. Please try again."));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Token-based authentication (using any valid user token)
|
||||||
|
const token = (req.body.token || "").trim();
|
||||||
|
const user = getUser(token); // returns undefined if invalid
|
||||||
|
|
||||||
|
if (user && !user.disabledAt) {
|
||||||
|
// Only allow access if user exists AND is not disabled
|
||||||
|
req.session!.infoPageAuthed = true;
|
||||||
|
return res.redirect("/");
|
||||||
|
} else if (user && user.disabledAt) {
|
||||||
|
// User exists but is disabled
|
||||||
|
const reason = user.disabledReason || "Your account has been disabled";
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.send(renderLoginPage(res.locals.csrfToken, `Access denied: ${reason}`));
|
||||||
|
} else {
|
||||||
|
// User doesn't exist
|
||||||
|
return res
|
||||||
|
.status(401)
|
||||||
|
.send(renderLoginPage(res.locals.csrfToken, "Invalid token. Please try again."));
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/* GET / – either login form or info page */
|
||||||
|
if (config.enableInfoPageLogin) {
|
||||||
|
infoPageRouter.get(LOGIN_ROUTE, requireLogin, handleInfoPage);
|
||||||
|
} else {
|
||||||
|
infoPageRouter.get(LOGIN_ROUTE, handleInfoPage);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ─── Removed the public /status route : simply not added ─── */
|
||||||
|
|
||||||
export { infoPageRouter };
|
export { infoPageRouter };
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
import { NextFunction, Request, Response } from "express";
|
||||||
|
|
||||||
|
export function addV1(req: Request, res: Response, next: NextFunction) {
|
||||||
|
// Clients don't consistently use the /v1 prefix so we'll add it for them.
|
||||||
|
if (!req.path.startsWith("/v1/") && !req.path.match(/^\/(v1alpha|v1beta)\//)) {
|
||||||
|
req.url = `/v1${req.url}`;
|
||||||
|
}
|
||||||
|
next();
|
||||||
|
}
|
||||||
+199
-170
@@ -1,22 +1,16 @@
|
|||||||
import { Request, Response, RequestHandler, Router } from "express";
|
import { Request, RequestHandler, Router } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { logger } from "../logger";
|
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
addKey,
|
addKey,
|
||||||
addAnthropicPreamble,
|
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
createOnProxyReqHandler,
|
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
ProxyResHandlerWithBody,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
createOnProxyResHandler,
|
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||||
} from "./middleware/response";
|
import { claudeModels } from "../shared/claude-models";
|
||||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
let modelsCacheTime = 0;
|
let modelsCacheTime = 0;
|
||||||
@@ -26,40 +20,32 @@ const getModelsResponse = () => {
|
|||||||
return modelsCache;
|
return modelsCache;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!config.anthropicKey) return { object: "list", data: [] };
|
if (!config.anthropicKey) return { object: "list", data: [], has_more: false, first_id: null, last_id: null };
|
||||||
|
|
||||||
const claudeVariants = [
|
const date = new Date()
|
||||||
"claude-v1",
|
const models = claudeModels.map(model => ({
|
||||||
"claude-v1-100k",
|
// Common
|
||||||
"claude-instant-v1",
|
id: model.anthropicId,
|
||||||
"claude-instant-v1-100k",
|
|
||||||
"claude-v1.3",
|
|
||||||
"claude-v1.3-100k",
|
|
||||||
"claude-v1.2",
|
|
||||||
"claude-v1.0",
|
|
||||||
"claude-instant-v1.1",
|
|
||||||
"claude-instant-v1.1-100k",
|
|
||||||
"claude-instant-v1.0",
|
|
||||||
"claude-2",
|
|
||||||
"claude-2.0",
|
|
||||||
"claude-2.1",
|
|
||||||
"claude-3-haiku-20240307",
|
|
||||||
"claude-3-opus-20240229",
|
|
||||||
"claude-3-sonnet-20240229",
|
|
||||||
];
|
|
||||||
|
|
||||||
const models = claudeVariants.map((id) => ({
|
|
||||||
id,
|
|
||||||
object: "model",
|
|
||||||
created: new Date().getTime(),
|
|
||||||
owned_by: "anthropic",
|
owned_by: "anthropic",
|
||||||
permission: [],
|
// Anthropic
|
||||||
root: "claude",
|
type: "model",
|
||||||
parent: null,
|
display_name: model.displayName,
|
||||||
}));
|
created_at: date.toISOString(),
|
||||||
|
// OpenAI
|
||||||
|
object: "model",
|
||||||
|
created: date.getTime(),
|
||||||
|
}));
|
||||||
|
|
||||||
modelsCache = { object: "list", data: models };
|
modelsCache = {
|
||||||
modelsCacheTime = new Date().getTime();
|
// Common
|
||||||
|
object: "list",
|
||||||
|
data: models,
|
||||||
|
// Anthropic
|
||||||
|
has_more: false,
|
||||||
|
first_id: models[0]?.id,
|
||||||
|
last_id: models[models.length - 1]?.id,
|
||||||
|
};
|
||||||
|
modelsCacheTime = date.getTime();
|
||||||
|
|
||||||
return modelsCache;
|
return modelsCache;
|
||||||
};
|
};
|
||||||
@@ -68,8 +54,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
|||||||
res.status(200).json(getModelsResponse());
|
res.status(200).json(getModelsResponse());
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Only used for non-streaming requests. */
|
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
|
||||||
_proxyRes,
|
_proxyRes,
|
||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
@@ -122,12 +107,6 @@ export function transformAnthropicChatResponseToAnthropicText(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Transforms a model response from the Anthropic API to match those from the
|
|
||||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
|
||||||
* is only used for non-streaming requests as streaming requests are handled
|
|
||||||
* on-the-fly.
|
|
||||||
*/
|
|
||||||
function transformAnthropicTextResponseToOpenAI(
|
function transformAnthropicTextResponseToOpenAI(
|
||||||
anthropicBody: Record<string, any>,
|
anthropicBody: Record<string, any>,
|
||||||
req: Request
|
req: Request
|
||||||
@@ -178,75 +157,187 @@ export function transformAnthropicChatResponseToOpenAI(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const anthropicProxy = createQueueMiddleware({
|
/**
|
||||||
proxyMiddleware: createProxyMiddleware({
|
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
|
||||||
target: "https://api.anthropic.com",
|
* model, reassigns it to Sonnet.
|
||||||
changeOrigin: true,
|
*/
|
||||||
selfHandleResponse: true,
|
function maybeReassignModel(req: Request) {
|
||||||
logger,
|
const model = req.body.model;
|
||||||
on: {
|
if (model.includes("claude")) return; // use whatever model the user requested
|
||||||
proxyReq: createOnProxyReqHandler({
|
req.body.model = "claude-3-5-sonnet-latest";
|
||||||
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
|
}
|
||||||
}),
|
|
||||||
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
|
/**
|
||||||
error: handleProxyError,
|
* If client requests more than 4096 output tokens the request must have a
|
||||||
},
|
* particular version header.
|
||||||
// Abusing pathFilter to rewrite the paths dynamically.
|
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
|
||||||
pathFilter: (pathname, req) => {
|
*
|
||||||
const isText = req.outboundApi === "anthropic-text";
|
* Also adds the required beta header for 1-hour cache duration if requested.
|
||||||
const isChat = req.outboundApi === "anthropic-chat";
|
* Also validates Claude 4.1 Opus parameters (temperature/top_p).
|
||||||
if (isChat && pathname === "/v1/complete") {
|
*/
|
||||||
req.url = "/v1/messages";
|
function setAnthropicBetaHeader(req: Request) {
|
||||||
}
|
// Validate Claude 4.1 Opus parameters before processing
|
||||||
if (isText && pathname === "/v1/chat/completions") {
|
validateClaude41OpusParameters(req);
|
||||||
req.url = "/v1/complete";
|
|
||||||
}
|
const { max_tokens_to_sample } = req.body;
|
||||||
if (isChat && pathname === "/v1/chat/completions") {
|
|
||||||
req.url = "/v1/messages";
|
// Initialize beta headers array
|
||||||
}
|
const betaHeaders: string[] = [];
|
||||||
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
|
||||||
req.url = "/v1/messages";
|
// Add max tokens beta header if needed
|
||||||
}
|
if (max_tokens_to_sample > 4096) {
|
||||||
return true;
|
betaHeaders.push("max-tokens-3-5-sonnet-2024-07-15");
|
||||||
},
|
}
|
||||||
}),
|
|
||||||
|
// Add extended cache TTL beta header if 1h cache is requested
|
||||||
|
if (req.body.cache_control?.ttl === "1h") {
|
||||||
|
betaHeaders.push("extended-cache-ttl-2025-04-11");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the combined beta headers if any were added
|
||||||
|
if (betaHeaders.length > 0) {
|
||||||
|
req.headers["anthropic-beta"] = betaHeaders.join(",");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds web search tool for Claude-3.5 and Claude-3.7 models when enable_web_search is true
|
||||||
|
*
|
||||||
|
* Supports all optional parameters documented in the Claude API:
|
||||||
|
* - max_uses: Limit the number of searches per request
|
||||||
|
* - allowed_domains: Only include results from these domains
|
||||||
|
* - blocked_domains: Never include results from these domains
|
||||||
|
* - user_location: Localize search results
|
||||||
|
*/
|
||||||
|
function addWebSearchTool(req: Request) {
|
||||||
|
// Check if this is a Claude model that supports web search and if web search is enabled
|
||||||
|
const isClaude35 = req.body.model?.includes("claude-3-5") || req.body.model?.includes("claude-3.5");
|
||||||
|
const isClaude37 = req.body.model?.includes("claude-3-7") || req.body.model?.includes("claude-3.7");
|
||||||
|
const isClaude4 = req.body.model?.includes("claude-sonnet-4") || req.body.model?.includes("claude-opus-4");
|
||||||
|
const useWebSearch = (isClaude35 || isClaude37 || isClaude4) && Boolean(req.body.enable_web_search);
|
||||||
|
|
||||||
|
if (useWebSearch) {
|
||||||
|
// Create the base web search tool
|
||||||
|
const webSearchTool: any = {
|
||||||
|
'type': 'web_search_20250305',
|
||||||
|
'name': 'web_search',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add optional parameters if provided by the client
|
||||||
|
|
||||||
|
// max_uses: Limit the number of searches per request
|
||||||
|
if (typeof req.body.web_search_max_uses === 'number') {
|
||||||
|
webSearchTool.max_uses = req.body.web_search_max_uses;
|
||||||
|
delete req.body.web_search_max_uses;
|
||||||
|
}
|
||||||
|
|
||||||
|
// allowed_domains: Only include results from these domains
|
||||||
|
if (Array.isArray(req.body.web_search_allowed_domains)) {
|
||||||
|
webSearchTool.allowed_domains = req.body.web_search_allowed_domains;
|
||||||
|
delete req.body.web_search_allowed_domains;
|
||||||
|
}
|
||||||
|
|
||||||
|
// blocked_domains: Never include results from these domains
|
||||||
|
if (Array.isArray(req.body.web_search_blocked_domains)) {
|
||||||
|
webSearchTool.blocked_domains = req.body.web_search_blocked_domains;
|
||||||
|
delete req.body.web_search_blocked_domains;
|
||||||
|
}
|
||||||
|
|
||||||
|
// user_location: Localize search results
|
||||||
|
if (req.body.web_search_user_location) {
|
||||||
|
webSearchTool.user_location = req.body.web_search_user_location;
|
||||||
|
delete req.body.web_search_user_location;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the web search tool to the tools array
|
||||||
|
req.body.tools = [...(req.body.tools || []), webSearchTool];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete custom parameters as they're not standard Claude API parameters
|
||||||
|
delete req.body.enable_web_search;
|
||||||
|
delete req.body.reasoning_effort;
|
||||||
|
}
|
||||||
|
|
||||||
|
function selectUpstreamPath(manager: ProxyReqManager) {
|
||||||
|
const req = manager.request;
|
||||||
|
const pathname = req.url.split("?")[0];
|
||||||
|
req.log.debug({ pathname }, "Anthropic path filter");
|
||||||
|
const isText = req.outboundApi === "anthropic-text";
|
||||||
|
const isChat = req.outboundApi === "anthropic-chat";
|
||||||
|
if (isChat && pathname === "/v1/complete") {
|
||||||
|
manager.setPath("/v1/messages");
|
||||||
|
}
|
||||||
|
if (isText && pathname === "/v1/chat/completions") {
|
||||||
|
manager.setPath("/v1/complete");
|
||||||
|
}
|
||||||
|
if (isChat && pathname === "/v1/chat/completions") {
|
||||||
|
manager.setPath("/v1/messages");
|
||||||
|
}
|
||||||
|
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
||||||
|
manager.setPath("/v1/messages");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const anthropicProxy = createQueuedProxyMiddleware({
|
||||||
|
target: "https://api.anthropic.com",
|
||||||
|
mutations: [selectUpstreamPath, addKey, finalizeBody],
|
||||||
|
blockingResponseHandler: anthropicBlockingResponseHandler,
|
||||||
});
|
});
|
||||||
|
|
||||||
const nativeTextPreprocessor = createPreprocessorMiddleware({
|
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
|
||||||
inApi: "anthropic-text",
|
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
|
||||||
outApi: "anthropic-text",
|
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||||
service: "anthropic",
|
);
|
||||||
});
|
|
||||||
|
|
||||||
const textToChatPreprocessor = createPreprocessorMiddleware({
|
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||||
inApi: "anthropic-text",
|
{
|
||||||
outApi: "anthropic-chat",
|
inApi: "anthropic-text",
|
||||||
service: "anthropic",
|
outApi: "anthropic-text",
|
||||||
});
|
service: "anthropic",
|
||||||
|
},
|
||||||
|
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||||
|
);
|
||||||
|
|
||||||
|
const textToChatPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{
|
||||||
|
inApi: "anthropic-text",
|
||||||
|
outApi: "anthropic-chat",
|
||||||
|
service: "anthropic",
|
||||||
|
},
|
||||||
|
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||||
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Routes text completion prompts to anthropic-chat if they need translation
|
* Routes text completion prompts to anthropic-chat if they need translation
|
||||||
* (claude-3 based models do not support the old text completion endpoint).
|
* (claude-3 based models do not support the old text completion endpoint).
|
||||||
*/
|
*/
|
||||||
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
|
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
|
||||||
if (req.body.model?.startsWith("claude-3")) {
|
const model = req.body.model;
|
||||||
|
const isClaude4Model = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
|
||||||
|
if (model?.startsWith("claude-3") || isClaude4Model) {
|
||||||
textToChatPreprocessor(req, res, next);
|
textToChatPreprocessor(req, res, next);
|
||||||
} else {
|
} else {
|
||||||
nativeTextPreprocessor(req, res, next);
|
nativeTextPreprocessor(req, res, next);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const oaiToTextPreprocessor = createPreprocessorMiddleware({
|
const oaiToTextPreprocessor = createPreprocessorMiddleware(
|
||||||
inApi: "openai",
|
{
|
||||||
outApi: "anthropic-text",
|
inApi: "openai",
|
||||||
service: "anthropic",
|
outApi: "anthropic-text",
|
||||||
});
|
service: "anthropic",
|
||||||
|
},
|
||||||
|
{ afterTransform: [setAnthropicBetaHeader] }
|
||||||
|
);
|
||||||
|
|
||||||
const oaiToChatPreprocessor = createPreprocessorMiddleware({
|
const oaiToChatPreprocessor = createPreprocessorMiddleware(
|
||||||
inApi: "openai",
|
{
|
||||||
outApi: "anthropic-chat",
|
inApi: "openai",
|
||||||
service: "anthropic",
|
outApi: "anthropic-chat",
|
||||||
});
|
service: "anthropic",
|
||||||
|
},
|
||||||
|
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||||
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||||
@@ -254,7 +345,9 @@ const oaiToChatPreprocessor = createPreprocessorMiddleware({
|
|||||||
*/
|
*/
|
||||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||||
maybeReassignModel(req);
|
maybeReassignModel(req);
|
||||||
if (req.body.model?.includes("claude-3")) {
|
const model = req.body.model;
|
||||||
|
const isClaude4 = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
|
||||||
|
if (model?.includes("claude-3") || isClaude4) {
|
||||||
oaiToChatPreprocessor(req, res, next);
|
oaiToChatPreprocessor(req, res, next);
|
||||||
} else {
|
} else {
|
||||||
oaiToTextPreprocessor(req, res, next);
|
oaiToTextPreprocessor(req, res, next);
|
||||||
@@ -267,11 +360,7 @@ anthropicRouter.get("/v1/models", handleModelRequest);
|
|||||||
anthropicRouter.post(
|
anthropicRouter.post(
|
||||||
"/v1/messages",
|
"/v1/messages",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({
|
nativeAnthropicChatPreprocessor,
|
||||||
inApi: "anthropic-chat",
|
|
||||||
outApi: "anthropic-chat",
|
|
||||||
service: "anthropic",
|
|
||||||
}),
|
|
||||||
anthropicProxy
|
anthropicProxy
|
||||||
);
|
);
|
||||||
// Anthropic text completion endpoint. Translates to Anthropic chat completion
|
// Anthropic text completion endpoint. Translates to Anthropic chat completion
|
||||||
@@ -291,65 +380,5 @@ anthropicRouter.post(
|
|||||||
preprocessOpenAICompatRequest,
|
preprocessOpenAICompatRequest,
|
||||||
anthropicProxy
|
anthropicProxy
|
||||||
);
|
);
|
||||||
// Temporarily force Anthropic Text to Anthropic Chat for frontends which do not
|
|
||||||
// yet support the new model. Forces claude-3. Will be removed once common
|
|
||||||
// frontends have been updated.
|
|
||||||
anthropicRouter.post(
|
|
||||||
"/v1/:type(sonnet|opus)/:action(complete|messages)",
|
|
||||||
ipLimiter,
|
|
||||||
handleAnthropicTextCompatRequest,
|
|
||||||
createPreprocessorMiddleware({
|
|
||||||
inApi: "anthropic-text",
|
|
||||||
outApi: "anthropic-chat",
|
|
||||||
service: "anthropic",
|
|
||||||
}),
|
|
||||||
anthropicProxy
|
|
||||||
);
|
|
||||||
|
|
||||||
function handleAnthropicTextCompatRequest(
|
|
||||||
req: Request,
|
|
||||||
res: Response,
|
|
||||||
next: any
|
|
||||||
) {
|
|
||||||
const type = req.params.type;
|
|
||||||
const action = req.params.action;
|
|
||||||
const alreadyInChatFormat = Boolean(req.body.messages);
|
|
||||||
const compatModel = `claude-3-${type}-20240229`;
|
|
||||||
req.log.info(
|
|
||||||
{ type, inputModel: req.body.model, compatModel, alreadyInChatFormat },
|
|
||||||
"Handling Anthropic compatibility request"
|
|
||||||
);
|
|
||||||
|
|
||||||
if (action === "messages" || alreadyInChatFormat) {
|
|
||||||
return sendErrorToClient({
|
|
||||||
req,
|
|
||||||
res,
|
|
||||||
options: {
|
|
||||||
title: "Unnecessary usage of compatibility endpoint",
|
|
||||||
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/anthropic\` proxy endpoint instead.`,
|
|
||||||
format: "unknown",
|
|
||||||
statusCode: 400,
|
|
||||||
reqId: req.id,
|
|
||||||
obj: {
|
|
||||||
requested_endpoint: "/anthropic/" + type,
|
|
||||||
correct_endpoint: "/anthropic",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
req.body.model = compatModel;
|
|
||||||
next();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
|
|
||||||
* model, reassigns it to Claude 3 Sonnet.
|
|
||||||
*/
|
|
||||||
function maybeReassignModel(req: Request) {
|
|
||||||
const model = req.body.model;
|
|
||||||
if (!model.startsWith("gpt-")) return;
|
|
||||||
req.body.model = "claude-3-sonnet-20240229";
|
|
||||||
}
|
|
||||||
|
|
||||||
export const anthropic = anthropicRouter;
|
export const anthropic = anthropicRouter;
|
||||||
|
|||||||
@@ -0,0 +1,341 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { v4 } from "uuid";
|
||||||
|
import {
|
||||||
|
transformAnthropicChatResponseToAnthropicText,
|
||||||
|
transformAnthropicChatResponseToOpenAI,
|
||||||
|
} from "./anthropic";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import {
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeSignedRequest,
|
||||||
|
signAwsRequest,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||||
|
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||||
|
|
||||||
|
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
let newBody = body;
|
||||||
|
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
||||||
|
case "openai<-anthropic-text":
|
||||||
|
req.log.info("Transforming Anthropic Text back to OpenAI format");
|
||||||
|
newBody = transformAwsTextResponseToOpenAI(body, req);
|
||||||
|
break;
|
||||||
|
case "openai<-anthropic-chat":
|
||||||
|
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
|
||||||
|
newBody = transformAnthropicChatResponseToOpenAI(body);
|
||||||
|
break;
|
||||||
|
case "anthropic-text<-anthropic-chat":
|
||||||
|
req.log.info("Transforming AWS Anthropic Chat back to Text format");
|
||||||
|
newBody = transformAnthropicChatResponseToAnthropicText(body);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWS does not always confirm the model in the response, so we have to add it
|
||||||
|
if (!newBody.model && req.body.model) {
|
||||||
|
newBody.model = req.body.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
function transformAwsTextResponseToOpenAI(
|
||||||
|
awsBody: Record<string, any>,
|
||||||
|
req: Request
|
||||||
|
): Record<string, any> {
|
||||||
|
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
|
return {
|
||||||
|
id: "aws-" + v4(),
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Date.now(),
|
||||||
|
model: req.body.model,
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: req.promptTokens,
|
||||||
|
completion_tokens: req.outputTokens,
|
||||||
|
total_tokens: totalTokens,
|
||||||
|
},
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: awsBody.completion?.trim(),
|
||||||
|
},
|
||||||
|
finish_reason: awsBody.stop_reason,
|
||||||
|
index: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const awsClaudeProxy = createQueuedProxyMiddleware({
|
||||||
|
target: ({ signedRequest }) => {
|
||||||
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
|
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||||
|
},
|
||||||
|
mutations: [signAwsRequest, finalizeSignedRequest],
|
||||||
|
blockingResponseHandler: awsBlockingResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
);
|
||||||
|
|
||||||
|
const textToChatPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Routes text completion prompts to aws anthropic-chat if they need translation
|
||||||
|
* (claude-3 based models do not support the old text completion endpoint).
|
||||||
|
*/
|
||||||
|
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
|
||||||
|
if (req.body.model?.includes("claude-3")) {
|
||||||
|
textToChatPreprocessor(req, res, next);
|
||||||
|
} else {
|
||||||
|
nativeTextPreprocessor(req, res, next);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
);
|
||||||
|
|
||||||
|
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||||
|
* or the new Claude chat completion endpoint, based on the requested model.
|
||||||
|
*/
|
||||||
|
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||||
|
if (req.body.model?.includes("claude-3")) {
|
||||||
|
oaiToAwsChatPreprocessor(req, res, next);
|
||||||
|
} else {
|
||||||
|
oaiToAwsTextPreprocessor(req, res, next);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const awsClaudeRouter = Router();
|
||||||
|
// Native(ish) Anthropic text completion endpoint.
|
||||||
|
awsClaudeRouter.post(
|
||||||
|
"/v1/complete",
|
||||||
|
ipLimiter,
|
||||||
|
preprocessAwsTextRequest,
|
||||||
|
awsClaudeProxy
|
||||||
|
);
|
||||||
|
// Native Anthropic chat completion endpoint.
|
||||||
|
awsClaudeRouter.post(
|
||||||
|
"/v1/messages",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
),
|
||||||
|
awsClaudeProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
// OpenAI-to-AWS Anthropic compatibility endpoint.
|
||||||
|
awsClaudeRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
preprocessOpenAICompatRequest,
|
||||||
|
awsClaudeProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tries to deal with:
|
||||||
|
* - frontends sending AWS model names even when they want to use the OpenAI-
|
||||||
|
* compatible endpoint
|
||||||
|
* - frontends sending Anthropic model names that AWS doesn't recognize
|
||||||
|
* - frontends sending OpenAI model names because they expect the proxy to
|
||||||
|
* translate them
|
||||||
|
*
|
||||||
|
* If client sends AWS model ID it will be used verbatim. Otherwise, various
|
||||||
|
* strategies are used to try to map a non-AWS model name to AWS model ID.
|
||||||
|
*/
|
||||||
|
function maybeReassignModel(req: Request) {
|
||||||
|
// Validate Claude 4.1 Opus parameters before processing
|
||||||
|
validateClaude41OpusParameters(req);
|
||||||
|
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// If it looks like an AWS model, use it as-is
|
||||||
|
if (model.includes("anthropic.claude")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anthropic model names can look like:
|
||||||
|
// - claude-v1
|
||||||
|
// - claude-2.1
|
||||||
|
// - claude-3-5-sonnet-20240620 (old format: number-model)
|
||||||
|
// - claude-3-opus-latest (old format: number-model)
|
||||||
|
// - claude-sonnet-4-20250514 (new format: model-number)
|
||||||
|
// - claude-opus-4-latest (new format: model-number)
|
||||||
|
// - anthropic.claude-3-sonnet-20240229-v1:0 (AWS format with old naming)
|
||||||
|
// - anthropic.claude-sonnet-4-20250514-v1:0 (AWS format with new naming)
|
||||||
|
const pattern =
|
||||||
|
/^(?:anthropic\.)?claude-(?:(?:(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*))|(?:(sonnet-|opus-|haiku-)(\d+)([.-](\d))?(-\d+k)?-(latest|\d+)))(?:-v\d+(?::\d+)?)?$/i;
|
||||||
|
const match = model.match(pattern);
|
||||||
|
|
||||||
|
if (!match) {
|
||||||
|
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check which format matched (old or new)
|
||||||
|
// New format: claude-sonnet-4-20250514 or anthropic.claude-sonnet-4-20250514-v1:0
|
||||||
|
// Old format: claude-3-sonnet-20240229 or anthropic.claude-3-sonnet-20240229-v1:0
|
||||||
|
const isNewFormat = !!match[9];
|
||||||
|
|
||||||
|
let major, minor, name, rev;
|
||||||
|
|
||||||
|
if (isNewFormat) {
|
||||||
|
// New format: claude-sonnet-4-20250514
|
||||||
|
// match[9] = sonnet-/opus-/haiku-
|
||||||
|
// match[10] = 4 (major version)
|
||||||
|
// match[12] = minor version (if any, from [.-](\d) pattern)
|
||||||
|
// match[14] = revision (latest or date)
|
||||||
|
const modelType = match[9]?.match(/([a-z]+)/)?.[1] || "";
|
||||||
|
name = modelType;
|
||||||
|
major = match[10];
|
||||||
|
minor = match[12];
|
||||||
|
rev = match[14];
|
||||||
|
|
||||||
|
// Special case: if revision is a single digit and no minor version,
|
||||||
|
// treat revision as minor version (e.g., claude-opus-4-1 -> version 4.1)
|
||||||
|
if (!minor && rev && /^\d$/.test(rev)) {
|
||||||
|
minor = rev;
|
||||||
|
rev = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle instant case for completeness
|
||||||
|
const instant = match[1];
|
||||||
|
if (instant) {
|
||||||
|
req.body.model = "anthropic.claude-instant-v1";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Old format: claude-3-sonnet-20240229
|
||||||
|
// match[1] = instant- (if any)
|
||||||
|
// match[3] = 3 (major version)
|
||||||
|
// match[5] = minor version (if any)
|
||||||
|
// match[7] = -sonnet-/-opus-/-haiku- (if any)
|
||||||
|
// match[8] = revision (latest or date)
|
||||||
|
const instant = match[1];
|
||||||
|
if (instant) {
|
||||||
|
req.body.model = "anthropic.claude-instant-v1";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
major = match[3];
|
||||||
|
minor = match[5];
|
||||||
|
name = match[7]?.match(/([a-z]+)/)?.[1] || "";
|
||||||
|
rev = match[8];
|
||||||
|
}
|
||||||
|
|
||||||
|
const ver = minor ? `${major}.${minor}` : major;
|
||||||
|
|
||||||
|
switch (ver) {
|
||||||
|
case "1":
|
||||||
|
case "1.0":
|
||||||
|
req.body.model = "anthropic.claude-v1";
|
||||||
|
return;
|
||||||
|
case "2":
|
||||||
|
case "2.0":
|
||||||
|
req.body.model = "anthropic.claude-v2";
|
||||||
|
return;
|
||||||
|
case "2.1":
|
||||||
|
req.body.model = "anthropic.claude-v2:1";
|
||||||
|
return;
|
||||||
|
case "3":
|
||||||
|
case "3.0":
|
||||||
|
// there is only one snapshot for all Claude 3 models so there is no need
|
||||||
|
// to check the revision
|
||||||
|
switch (name) {
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||||
|
return;
|
||||||
|
case "haiku":
|
||||||
|
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
||||||
|
return;
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "3.5":
|
||||||
|
switch (name) {
|
||||||
|
case "sonnet":
|
||||||
|
switch (rev) {
|
||||||
|
case "20241022":
|
||||||
|
case "latest":
|
||||||
|
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
|
||||||
|
return;
|
||||||
|
case "20240620":
|
||||||
|
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "haiku":
|
||||||
|
switch (rev) {
|
||||||
|
case "20241022":
|
||||||
|
case "latest":
|
||||||
|
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
case "opus":
|
||||||
|
// Add after model id is announced never
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "3.7":
|
||||||
|
switch (name) {
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "anthropic.claude-3-7-sonnet-20250219-v1:0";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "4":
|
||||||
|
case "4.0":
|
||||||
|
// Mapping "claude-4-..." variants to their actual AWS Bedrock IDs
|
||||||
|
// as defined in src/shared/claude-models.ts.
|
||||||
|
switch (name) {
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "anthropic.claude-sonnet-4-20250514-v1:0";
|
||||||
|
return;
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "anthropic.claude-opus-4-20250514-v1:0";
|
||||||
|
return;
|
||||||
|
// No case for "haiku" here, as "claude-4-haiku" is not defined
|
||||||
|
// in claude-models.ts. It will fall through and throw an error.
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "4.1":
|
||||||
|
// Mapping "claude-4.1-..." variants to their actual AWS Bedrock IDs
|
||||||
|
// as defined in src/shared/claude-models.ts.
|
||||||
|
switch (name) {
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "anthropic.claude-opus-4-1-20250805-v1:0";
|
||||||
|
return;
|
||||||
|
// No sonnet or haiku variants for 4.1 yet
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
export const awsClaude = awsClaudeRouter;
|
||||||
@@ -0,0 +1,95 @@
|
|||||||
|
import { Request, Router } from "express";
|
||||||
|
import {
|
||||||
|
detectMistralInputApi,
|
||||||
|
transformMistralTextToMistralChat,
|
||||||
|
} from "./mistral-ai";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import {
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeSignedRequest,
|
||||||
|
signAwsRequest,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
|
||||||
|
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
let newBody = body;
|
||||||
|
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
|
||||||
|
newBody = transformMistralTextToMistralChat(body);
|
||||||
|
}
|
||||||
|
// AWS does not always confirm the model in the response, so we have to add it
|
||||||
|
if (!newBody.model && req.body.model) {
|
||||||
|
newBody.model = req.body.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const awsMistralProxy = createQueuedProxyMiddleware({
|
||||||
|
target: ({ signedRequest }) => {
|
||||||
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
|
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||||
|
},
|
||||||
|
mutations: [signAwsRequest,finalizeSignedRequest],
|
||||||
|
blockingResponseHandler: awsMistralBlockingResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
function maybeReassignModel(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// If it looks like an AWS model, use it as-is
|
||||||
|
if (model.startsWith("mistral.")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Mistral 7B Instruct
|
||||||
|
else if (model.includes("7b")) {
|
||||||
|
req.body.model = "mistral.mistral-7b-instruct-v0:2";
|
||||||
|
}
|
||||||
|
// Mistral 8x7B Instruct
|
||||||
|
else if (model.includes("8x7b")) {
|
||||||
|
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
|
||||||
|
}
|
||||||
|
// Mistral Large (Feb 2024)
|
||||||
|
else if (model.includes("large-2402")) {
|
||||||
|
req.body.model = "mistral.mistral-large-2402-v1:0";
|
||||||
|
}
|
||||||
|
// Mistral Large 2 (July 2024)
|
||||||
|
else if (model.includes("large")) {
|
||||||
|
req.body.model = "mistral.mistral-large-2407-v1:0";
|
||||||
|
}
|
||||||
|
// Mistral Small (Feb 2024)
|
||||||
|
else if (model.includes("small")) {
|
||||||
|
req.body.model = "mistral.mistral-small-2402-v1:0";
|
||||||
|
} else {
|
||||||
|
throw new Error(
|
||||||
|
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
|
||||||
|
{
|
||||||
|
beforeTransform: [detectMistralInputApi],
|
||||||
|
afterTransform: [maybeReassignModel],
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const awsMistralRouter = Router();
|
||||||
|
awsMistralRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
nativeMistralChatPreprocessor,
|
||||||
|
awsMistralProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
export const awsMistral = awsMistralRouter;
|
||||||
+82
-319
@@ -1,335 +1,98 @@
|
|||||||
import { Request, RequestHandler, Response, Router } from "express";
|
/* Shared code between AWS Claude and AWS Mistral endpoints. */
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { v4 } from "uuid";
|
import { Request, Response, Router } from "express";
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { logger } from "../logger";
|
import { addV1 } from "./add-v1";
|
||||||
import { createQueueMiddleware } from "./queue";
|
import { awsClaude } from "./aws-claude";
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { awsMistral } from "./aws-mistral";
|
||||||
import { handleProxyError } from "./middleware/common";
|
import { AwsBedrockKey, keyPool } from "../shared/key-management";
|
||||||
import {
|
import { claudeModels, findByAwsId } from "../shared/claude-models";
|
||||||
createPreprocessorMiddleware,
|
|
||||||
signAwsRequest,
|
|
||||||
finalizeSignedRequest,
|
|
||||||
createOnProxyReqHandler,
|
|
||||||
} from "./middleware/request";
|
|
||||||
import {
|
|
||||||
ProxyResHandlerWithBody,
|
|
||||||
createOnProxyResHandler,
|
|
||||||
} from "./middleware/response";
|
|
||||||
import { transformAnthropicChatResponseToAnthropicText, transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
|
||||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
|
||||||
|
|
||||||
const LATEST_AWS_V2_MINOR_VERSION = "1";
|
|
||||||
|
|
||||||
let modelsCache: any = null;
|
|
||||||
let modelsCacheTime = 0;
|
|
||||||
|
|
||||||
const getModelsResponse = () => {
|
|
||||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
|
||||||
return modelsCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!config.awsCredentials) return { object: "list", data: [] };
|
|
||||||
|
|
||||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
|
||||||
const variants = [
|
|
||||||
"anthropic.claude-v2",
|
|
||||||
"anthropic.claude-v2:1",
|
|
||||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
|
||||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
|
||||||
"anthropic.claude-3-opus-20240229-v1:0",
|
|
||||||
];
|
|
||||||
|
|
||||||
const models = variants.map((id) => ({
|
|
||||||
id,
|
|
||||||
object: "model",
|
|
||||||
created: new Date().getTime(),
|
|
||||||
owned_by: "anthropic",
|
|
||||||
permission: [],
|
|
||||||
root: "claude",
|
|
||||||
parent: null,
|
|
||||||
}));
|
|
||||||
|
|
||||||
modelsCache = { object: "list", data: models };
|
|
||||||
modelsCacheTime = new Date().getTime();
|
|
||||||
|
|
||||||
return modelsCache;
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
|
||||||
res.status(200).json(getModelsResponse());
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Only used for non-streaming requests. */
|
|
||||||
const awsResponseHandler: ProxyResHandlerWithBody = async (
|
|
||||||
_proxyRes,
|
|
||||||
req,
|
|
||||||
res,
|
|
||||||
body
|
|
||||||
) => {
|
|
||||||
if (typeof body !== "object") {
|
|
||||||
throw new Error("Expected body to be an object");
|
|
||||||
}
|
|
||||||
|
|
||||||
let newBody = body;
|
|
||||||
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
|
||||||
case "openai<-anthropic-text":
|
|
||||||
req.log.info("Transforming Anthropic Text back to OpenAI format");
|
|
||||||
newBody = transformAwsTextResponseToOpenAI(body, req);
|
|
||||||
break;
|
|
||||||
case "openai<-anthropic-chat":
|
|
||||||
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
|
|
||||||
newBody = transformAnthropicChatResponseToOpenAI(body);
|
|
||||||
break;
|
|
||||||
case "anthropic-text<-anthropic-chat":
|
|
||||||
req.log.info("Transforming AWS Anthropic Chat back to Text format");
|
|
||||||
newBody = transformAnthropicChatResponseToAnthropicText(body);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// AWS does not always confirm the model in the response, so we have to add it
|
|
||||||
if (!newBody.model && req.body.model) {
|
|
||||||
newBody.model = req.body.model;
|
|
||||||
}
|
|
||||||
|
|
||||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Transforms a model response from the Anthropic API to match those from the
|
|
||||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
|
||||||
* is only used for non-streaming requests as streaming requests are handled
|
|
||||||
* on-the-fly.
|
|
||||||
*/
|
|
||||||
function transformAwsTextResponseToOpenAI(
|
|
||||||
awsBody: Record<string, any>,
|
|
||||||
req: Request
|
|
||||||
): Record<string, any> {
|
|
||||||
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
|
||||||
return {
|
|
||||||
id: "aws-" + v4(),
|
|
||||||
object: "chat.completion",
|
|
||||||
created: Date.now(),
|
|
||||||
model: req.body.model,
|
|
||||||
usage: {
|
|
||||||
prompt_tokens: req.promptTokens,
|
|
||||||
completion_tokens: req.outputTokens,
|
|
||||||
total_tokens: totalTokens,
|
|
||||||
},
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: {
|
|
||||||
role: "assistant",
|
|
||||||
content: awsBody.completion?.trim(),
|
|
||||||
},
|
|
||||||
finish_reason: awsBody.stop_reason,
|
|
||||||
index: 0,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
const awsProxy = createQueueMiddleware({
|
|
||||||
beforeProxy: signAwsRequest,
|
|
||||||
proxyMiddleware: createProxyMiddleware({
|
|
||||||
target: "bad-target-will-be-rewritten",
|
|
||||||
router: ({ signedRequest }) => {
|
|
||||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
|
||||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
|
||||||
},
|
|
||||||
changeOrigin: true,
|
|
||||||
selfHandleResponse: true,
|
|
||||||
logger,
|
|
||||||
on: {
|
|
||||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
|
||||||
proxyRes: createOnProxyResHandler([awsResponseHandler]),
|
|
||||||
error: handleProxyError,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
|
||||||
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
|
|
||||||
{ afterTransform: [maybeReassignModel] }
|
|
||||||
);
|
|
||||||
|
|
||||||
const textToChatPreprocessor = createPreprocessorMiddleware(
|
|
||||||
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
|
|
||||||
{ afterTransform: [maybeReassignModel] }
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Routes text completion prompts to aws anthropic-chat if they need translation
|
|
||||||
* (claude-3 based models do not support the old text completion endpoint).
|
|
||||||
*/
|
|
||||||
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
|
|
||||||
if (req.body.model?.includes("claude-3")) {
|
|
||||||
textToChatPreprocessor(req, res, next);
|
|
||||||
} else {
|
|
||||||
nativeTextPreprocessor(req, res, next);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
|
|
||||||
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
|
|
||||||
{ afterTransform: [maybeReassignModel] }
|
|
||||||
);
|
|
||||||
|
|
||||||
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
|
|
||||||
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
|
|
||||||
{ afterTransform: [maybeReassignModel] }
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
|
||||||
* or the new Claude chat completion endpoint, based on the requested model.
|
|
||||||
*/
|
|
||||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
|
||||||
if (req.body.model?.includes("claude-3")) {
|
|
||||||
oaiToAwsChatPreprocessor(req, res, next);
|
|
||||||
} else {
|
|
||||||
oaiToAwsTextPreprocessor(req, res, next);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const awsRouter = Router();
|
const awsRouter = Router();
|
||||||
awsRouter.get("/v1/models", handleModelRequest);
|
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
|
||||||
// Native(ish) Anthropic text completion endpoint.
|
awsRouter.use("/claude", addV1, awsClaude);
|
||||||
awsRouter.post("/v1/complete", ipLimiter, preprocessAwsTextRequest, awsProxy);
|
awsRouter.use("/mistral", addV1, awsMistral);
|
||||||
// Native Anthropic chat completion endpoint.
|
|
||||||
awsRouter.post(
|
|
||||||
"/v1/messages",
|
|
||||||
ipLimiter,
|
|
||||||
createPreprocessorMiddleware(
|
|
||||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
|
|
||||||
{ afterTransform: [maybeReassignModel] }
|
|
||||||
),
|
|
||||||
awsProxy
|
|
||||||
);
|
|
||||||
// Temporary force-Claude3 endpoint
|
|
||||||
awsRouter.post(
|
|
||||||
"/v1/sonnet/:action(complete|messages)",
|
|
||||||
ipLimiter,
|
|
||||||
handleCompatibilityRequest,
|
|
||||||
createPreprocessorMiddleware({
|
|
||||||
inApi: "anthropic-text",
|
|
||||||
outApi: "anthropic-chat",
|
|
||||||
service: "aws",
|
|
||||||
}),
|
|
||||||
awsProxy
|
|
||||||
);
|
|
||||||
|
|
||||||
// OpenAI-to-AWS Anthropic compatibility endpoint.
|
const MODELS_CACHE_TTL = 10000;
|
||||||
awsRouter.post(
|
let modelsCache: Record<string, any> = {};
|
||||||
"/v1/chat/completions",
|
let modelsCacheTime: Record<string, number> = {};
|
||||||
ipLimiter,
|
function handleModelsRequest(req: Request, res: Response) {
|
||||||
preprocessOpenAICompatRequest,
|
if (!config.awsCredentials) return { object: "list", data: [] };
|
||||||
awsProxy
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
const vendor = req.params.vendor?.length
|
||||||
* Tries to deal with:
|
? req.params.vendor === "claude"
|
||||||
* - frontends sending AWS model names even when they want to use the OpenAI-
|
? "anthropic"
|
||||||
* compatible endpoint
|
: req.params.vendor
|
||||||
* - frontends sending Anthropic model names that AWS doesn't recognize
|
: "all";
|
||||||
* - frontends sending OpenAI model names because they expect the proxy to
|
|
||||||
* translate them
|
|
||||||
*/
|
|
||||||
function maybeReassignModel(req: Request) {
|
|
||||||
const model = req.body.model;
|
|
||||||
|
|
||||||
// If client already specified an AWS Claude model ID, use it
|
const cacheTime = modelsCacheTime[vendor] || 0;
|
||||||
if (model.includes("anthropic.claude")) {
|
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
|
||||||
return;
|
return res.json(modelsCache[vendor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
const pattern =
|
const availableAwsModelIds = new Set<string>();
|
||||||
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?|-haiku-?)(\d*)/i;
|
for (const key of keyPool.list()) {
|
||||||
const match = model.match(pattern);
|
if (key.isDisabled || key.service !== "aws") continue;
|
||||||
|
(key as AwsBedrockKey).modelIds.forEach((id) => availableAwsModelIds.add(id));
|
||||||
// If there's no match, return the latest v2 model
|
|
||||||
if (!match) {
|
|
||||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const instant = match[2];
|
const mistralMappings = new Map([
|
||||||
const major = match[4];
|
["mistral.mistral-7b-instruct-v0:2", "Mistral 7B Instruct"],
|
||||||
const minor = match[6];
|
["mistral.mixtral-8x7b-instruct-v0:1", "Mixtral 8x7B Instruct"],
|
||||||
|
["mistral.mistral-large-2402-v1:0", "Mistral Large 2402"],
|
||||||
|
["mistral.mistral-large-2407-v1:0", "Mistral Large 2407"],
|
||||||
|
["mistral.mistral-small-2402-v1:0", "Mistral Small 2402"],
|
||||||
|
]);
|
||||||
|
|
||||||
if (instant) {
|
const date = new Date();
|
||||||
req.body.model = "anthropic.claude-instant-v1";
|
|
||||||
return;
|
const claudeModelsList = claudeModels
|
||||||
}
|
.filter(model => availableAwsModelIds.has(model.awsId))
|
||||||
|
.map(model => ({
|
||||||
// There's only one v1 model
|
id: model.anthropicId,
|
||||||
if (major === "1") {
|
owned_by: "anthropic",
|
||||||
req.body.model = "anthropic.claude-v1";
|
type: "model",
|
||||||
return;
|
display_name: model.displayName,
|
||||||
}
|
created_at: date.toISOString(),
|
||||||
|
object: "model",
|
||||||
// Try to map Anthropic API v2 models to AWS v2 models
|
created: date.getTime(),
|
||||||
if (major === "2") {
|
permission: [],
|
||||||
if (minor === "0") {
|
root: "anthropic",
|
||||||
req.body.model = "anthropic.claude-v2";
|
parent: null,
|
||||||
return;
|
}));
|
||||||
}
|
|
||||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
const mistralModelsList = Array.from(mistralMappings.keys())
|
||||||
return;
|
.filter(id => availableAwsModelIds.has(id))
|
||||||
}
|
.map(id => {
|
||||||
|
return {
|
||||||
// AWS currently only supports one v3 model.
|
id,
|
||||||
const variant = match[8]; // sonnet, opus, or haiku
|
owned_by: "mistral",
|
||||||
const variantVersion = match[9];
|
type: "model",
|
||||||
if (major === "3") {
|
display_name: mistralMappings.get(id) || id.split('.')[1],
|
||||||
if (variant.includes("opus")) {
|
created_at: date.toISOString(),
|
||||||
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
object: "model",
|
||||||
} else if (variant.includes("haiku")) {
|
created: date.getTime(),
|
||||||
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
permission: [],
|
||||||
} else {
|
root: "mistral",
|
||||||
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
parent: null,
|
||||||
}
|
};
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to latest v2 model
|
|
||||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function handleCompatibilityRequest(
|
|
||||||
req: Request,
|
|
||||||
res: Response,
|
|
||||||
next: any
|
|
||||||
) {
|
|
||||||
const action = req.params.action;
|
|
||||||
const alreadyInChatFormat = Boolean(req.body.messages);
|
|
||||||
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
||||||
req.log.info(
|
|
||||||
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
|
|
||||||
"Handling AWS compatibility request"
|
|
||||||
);
|
|
||||||
|
|
||||||
if (action === "messages" || alreadyInChatFormat) {
|
|
||||||
return sendErrorToClient({
|
|
||||||
req,
|
|
||||||
res,
|
|
||||||
options: {
|
|
||||||
title: "Unnecessary usage of compatibility endpoint",
|
|
||||||
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
|
|
||||||
format: "unknown",
|
|
||||||
statusCode: 400,
|
|
||||||
reqId: req.id,
|
|
||||||
obj: {
|
|
||||||
requested_endpoint: "/aws/claude/sonnet",
|
|
||||||
correct_endpoint: "/aws/claude",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
}
|
|
||||||
|
|
||||||
req.body.model = compatModel;
|
const allModels = [...claudeModelsList, ...mistralModelsList];
|
||||||
next();
|
const filteredModels = vendor === "all"
|
||||||
|
? allModels
|
||||||
|
: allModels.filter(m => m.root === vendor);
|
||||||
|
|
||||||
|
modelsCache[vendor] = {
|
||||||
|
object: "list",
|
||||||
|
data: filteredModels,
|
||||||
|
has_more: false,
|
||||||
|
first_id: filteredModels[0]?.id,
|
||||||
|
last_id: filteredModels[filteredModels.length - 1]?.id,
|
||||||
|
};
|
||||||
|
modelsCacheTime[vendor] = date.getTime();
|
||||||
|
|
||||||
|
return res.json(modelsCache[vendor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
export const aws = awsRouter;
|
export const aws = awsRouter;
|
||||||
|
|||||||
+23
-75
@@ -1,73 +1,30 @@
|
|||||||
import { RequestHandler, Router } from "express";
|
import { RequestHandler, Router } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { keyPool } from "../shared/key-management";
|
import { generateModelList } from "./openai";
|
||||||
import {
|
|
||||||
AzureOpenAIModelFamily,
|
|
||||||
getAzureOpenAIModelFamily,
|
|
||||||
ModelFamily,
|
|
||||||
} from "../shared/models";
|
|
||||||
import { logger } from "../logger";
|
|
||||||
import { KNOWN_OPENAI_MODELS } from "./openai";
|
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
addAzureKey,
|
addAzureKey,
|
||||||
createOnProxyReqHandler,
|
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeSignedRequest,
|
finalizeSignedRequest,
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
createOnProxyResHandler,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
ProxyResHandlerWithBody,
|
|
||||||
} from "./middleware/response";
|
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
let modelsCacheTime = 0;
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
function getModelsResponse() {
|
|
||||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
|
||||||
return modelsCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
let available = new Set<AzureOpenAIModelFamily>();
|
|
||||||
for (const key of keyPool.list()) {
|
|
||||||
if (key.isDisabled || key.service !== "azure") continue;
|
|
||||||
key.modelFamilies.forEach((family) =>
|
|
||||||
available.add(family as AzureOpenAIModelFamily)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
|
||||||
available = new Set([...available].filter((x) => allowed.has(x)));
|
|
||||||
|
|
||||||
const models = KNOWN_OPENAI_MODELS.map((id) => ({
|
|
||||||
id,
|
|
||||||
object: "model",
|
|
||||||
created: new Date().getTime(),
|
|
||||||
owned_by: "azure",
|
|
||||||
permission: [
|
|
||||||
{
|
|
||||||
id: "modelperm-" + id,
|
|
||||||
object: "model_permission",
|
|
||||||
created: new Date().getTime(),
|
|
||||||
organization: "*",
|
|
||||||
group: null,
|
|
||||||
is_blocking: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
root: id,
|
|
||||||
parent: null,
|
|
||||||
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
|
|
||||||
|
|
||||||
modelsCache = { object: "list", data: models };
|
|
||||||
modelsCacheTime = new Date().getTime();
|
|
||||||
|
|
||||||
return modelsCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
res.status(200).json(getModelsResponse());
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return res.status(200).json(modelsCache);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config.azureCredentials) return { object: "list", data: [] };
|
||||||
|
|
||||||
|
const result = generateModelList("azure");
|
||||||
|
|
||||||
|
modelsCache = { object: "list", data: result };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
res.status(200).json(modelsCache);
|
||||||
};
|
};
|
||||||
|
|
||||||
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
@@ -83,26 +40,17 @@ const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
res.status(200).json({ ...body, proxy: body.proxy });
|
res.status(200).json({ ...body, proxy: body.proxy });
|
||||||
};
|
};
|
||||||
|
|
||||||
const azureOpenAIProxy = createQueueMiddleware({
|
const azureOpenAIProxy = createQueuedProxyMiddleware({
|
||||||
beforeProxy: addAzureKey,
|
target: ({ signedRequest }) => {
|
||||||
proxyMiddleware: createProxyMiddleware({
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
target: "will be set by router",
|
const { hostname, protocol } = signedRequest;
|
||||||
router: (req) => {
|
return `${protocol}//${hostname}`;
|
||||||
if (!req.signedRequest) throw new Error("signedRequest not set");
|
},
|
||||||
const { hostname, path } = req.signedRequest;
|
mutations: [addAzureKey, finalizeSignedRequest],
|
||||||
return `https://${hostname}${path}`;
|
blockingResponseHandler: azureOpenaiResponseHandler,
|
||||||
},
|
|
||||||
changeOrigin: true,
|
|
||||||
selfHandleResponse: true,
|
|
||||||
logger,
|
|
||||||
on: {
|
|
||||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
|
||||||
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
|
|
||||||
error: handleProxyError,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
const azureOpenAIRouter = Router();
|
const azureOpenAIRouter = Router();
|
||||||
azureOpenAIRouter.get("/v1/models", handleModelRequest);
|
azureOpenAIRouter.get("/v1/models", handleModelRequest);
|
||||||
azureOpenAIRouter.post(
|
azureOpenAIRouter.post(
|
||||||
|
|||||||
@@ -0,0 +1,222 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { addKey, finalizeBody } from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import axios from "axios";
|
||||||
|
import { CohereKey, keyPool } from "../shared/key-management";
|
||||||
|
import { isCohereModel, normalizeMessages } from "../shared/api-schemas/cohere";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
|
||||||
|
const log = logger.child({ module: "proxy", service: "cohere" });
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const cohereResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...body, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const getModelsResponse = async () => {
|
||||||
|
// Return cache if less than 1 minute old
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get a Cohere key directly
|
||||||
|
const modelToUse = "command"; // Use any Cohere model here - just for key selection
|
||||||
|
const cohereKey = keyPool.get(modelToUse, "cohere") as CohereKey;
|
||||||
|
|
||||||
|
if (!cohereKey || !cohereKey.key) {
|
||||||
|
log.warn("No valid Cohere key available for model listing");
|
||||||
|
throw new Error("No valid Cohere API key available");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch models directly from Cohere API
|
||||||
|
const response = await axios.get("https://api.cohere.com/v1/models", {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${cohereKey.key}`,
|
||||||
|
"Cohere-Version": "2022-12-06"
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.data || !response.data.models) {
|
||||||
|
throw new Error("Unexpected response format from Cohere API");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract models and filter by those that support the chat endpoint
|
||||||
|
const filteredModels = response.data.models
|
||||||
|
.filter((model: any) => {
|
||||||
|
return model.endpoints && model.endpoints.includes("chat");
|
||||||
|
})
|
||||||
|
.map((model: any) => ({
|
||||||
|
id: model.name,
|
||||||
|
name: model.name,
|
||||||
|
// Adding additional OpenAI-compatible fields
|
||||||
|
context_window: model.context_window_size || 4096,
|
||||||
|
max_tokens: model.max_tokens || 4096
|
||||||
|
}));
|
||||||
|
|
||||||
|
log.debug({ modelCount: filteredModels.length, models: filteredModels.map((m: any) => m.id) }, "Filtered models from Cohere API");
|
||||||
|
|
||||||
|
// Format response to ensure OpenAI compatibility
|
||||||
|
const models = {
|
||||||
|
object: "list",
|
||||||
|
data: filteredModels.map((model: any) => ({
|
||||||
|
id: model.id,
|
||||||
|
object: "model",
|
||||||
|
created: Math.floor(Date.now() / 1000),
|
||||||
|
owned_by: "cohere",
|
||||||
|
permission: [],
|
||||||
|
root: model.id,
|
||||||
|
parent: null,
|
||||||
|
context_length: model.context_window,
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
|
||||||
|
log.debug({ modelCount: filteredModels.length }, "Retrieved models from Cohere API");
|
||||||
|
|
||||||
|
// Cache the response
|
||||||
|
modelsCache = models;
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return models;
|
||||||
|
} catch (error) {
|
||||||
|
// Provide detailed logging for better troubleshooting
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error fetching Cohere models"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error fetching Cohere models");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return empty list as fallback
|
||||||
|
return {
|
||||||
|
object: "list",
|
||||||
|
data: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const models = await getModelsResponse();
|
||||||
|
res.status(200).json(models);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error handling model request"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error handling model request");
|
||||||
|
}
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Function to prepare messages for Cohere API
|
||||||
|
function prepareMessages(req: Request) {
|
||||||
|
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
req.body.messages = normalizeMessages(req.body.messages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to remove parameters not supported by Cohere models
|
||||||
|
function removeUnsupportedParameters(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// Remove parameters that Cohere doesn't support
|
||||||
|
if (req.body.logit_bias !== undefined) {
|
||||||
|
delete req.body.logit_bias;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.body.top_logprobs !== undefined) {
|
||||||
|
delete req.body.top_logprobs;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.body.max_completion_tokens !== undefined) {
|
||||||
|
delete req.body.max_completion_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle structured output format
|
||||||
|
if (req.body.response_format && req.body.response_format.schema) {
|
||||||
|
// Transform to Cohere's format if needed
|
||||||
|
const jsonSchema = req.body.response_format.schema;
|
||||||
|
req.body.response_format = {
|
||||||
|
type: "json_object",
|
||||||
|
schema: jsonSchema
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Logging for debugging
|
||||||
|
if (process.env.NODE_ENV !== 'production') {
|
||||||
|
log.debug({ body: req.body }, "Request after parameter cleanup");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up count token functionality for Cohere models
|
||||||
|
function countCohereTokens(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
if (isCohereModel(model)) {
|
||||||
|
// Count tokens using prompt tokens (simplified)
|
||||||
|
if (req.promptTokens) {
|
||||||
|
req.log.debug(
|
||||||
|
{ tokens: req.promptTokens },
|
||||||
|
"Estimated token count for Cohere prompt"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const cohereProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [
|
||||||
|
addKey,
|
||||||
|
// Add Cohere-Version header to every request
|
||||||
|
(manager) => {
|
||||||
|
manager.setHeader("Cohere-Version", "2022-12-06");
|
||||||
|
},
|
||||||
|
finalizeBody
|
||||||
|
],
|
||||||
|
target: "https://api.cohere.ai/compatibility",
|
||||||
|
blockingResponseHandler: cohereResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const cohereRouter = Router();
|
||||||
|
|
||||||
|
cohereRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "cohere" },
|
||||||
|
{ afterTransform: [ prepareMessages, removeUnsupportedParameters, countCohereTokens ] }
|
||||||
|
),
|
||||||
|
cohereProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
cohereRouter.post(
|
||||||
|
"/v1/embeddings",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "cohere" },
|
||||||
|
{ afterTransform: [] }
|
||||||
|
),
|
||||||
|
cohereProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
cohereRouter.get("/v1/models", handleModelRequest);
|
||||||
|
|
||||||
|
export const cohere = cohereRouter;
|
||||||
@@ -0,0 +1,135 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { addKey, finalizeBody } from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import axios from "axios";
|
||||||
|
import { DeepseekKey, keyPool } from "../shared/key-management";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const deepseekResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
let newBody = body;
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const getModelsResponse = async () => {
|
||||||
|
// Return cache if less than 1 minute old
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get a Deepseek key directly using keyPool.get()
|
||||||
|
const modelToUse = "deepseek-chat"; // Use any Deepseek model here - just for key selection
|
||||||
|
const deepseekKey = keyPool.get(modelToUse, "deepseek") as DeepseekKey;
|
||||||
|
|
||||||
|
if (!deepseekKey || !deepseekKey.key) {
|
||||||
|
throw new Error("Failed to get valid Deepseek key");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch models from Deepseek API with authorization
|
||||||
|
const response = await axios.get("https://api.deepseek.com/models", {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${deepseekKey.key}`
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// If successful, update the cache
|
||||||
|
if (response.data && response.data.data) {
|
||||||
|
modelsCache = {
|
||||||
|
object: "list",
|
||||||
|
data: response.data.data.map((model: any) => ({
|
||||||
|
id: model.id,
|
||||||
|
object: "model",
|
||||||
|
owned_by: "deepseek",
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
throw new Error("Unexpected response format from Deepseek API");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching Deepseek models:", error);
|
||||||
|
throw error; // No fallback - error will be passed to caller
|
||||||
|
}
|
||||||
|
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return modelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const modelsResponse = await getModelsResponse();
|
||||||
|
res.status(200).json(modelsResponse);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error in handleModelRequest:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const deepseekProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [addKey, finalizeBody],
|
||||||
|
target: "https://api.deepseek.com/beta",
|
||||||
|
blockingResponseHandler: deepseekResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const deepseekRouter = Router();
|
||||||
|
|
||||||
|
// combines all the assistant messages at the end of the context and adds the
|
||||||
|
// beta 'prefix' option, makes prefills work the same way they work for Claude
|
||||||
|
function enablePrefill(req: Request) {
|
||||||
|
// If you want to disable
|
||||||
|
if (process.env.NO_DEEPSEEK_PREFILL) return
|
||||||
|
|
||||||
|
const msgs = req.body.messages;
|
||||||
|
if (msgs.at(-1)?.role !== 'assistant') return;
|
||||||
|
|
||||||
|
let i = msgs.length - 1;
|
||||||
|
let content = '';
|
||||||
|
|
||||||
|
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||||
|
// maybe we should also add a newline between messages? no for now.
|
||||||
|
content = msgs[i--].content + content;
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeReasonerStuff(req: Request) {
|
||||||
|
if (req.body.model === "deepseek-reasoner") {
|
||||||
|
// https://api-docs.deepseek.com/guides/reasoning_model
|
||||||
|
delete req.body.presence_penalty;
|
||||||
|
delete req.body.frequency_penalty;
|
||||||
|
delete req.body.temperature;
|
||||||
|
delete req.body.top_p;
|
||||||
|
delete req.body.logprobs;
|
||||||
|
delete req.body.top_logprobs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deepseekRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "deepseek" },
|
||||||
|
{ afterTransform: [ enablePrefill, removeReasonerStuff ] }
|
||||||
|
),
|
||||||
|
deepseekProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
deepseekRouter.get("/v1/models", handleModelRequest);
|
||||||
|
|
||||||
|
export const deepseek = deepseekRouter;
|
||||||
+18
-2
@@ -12,6 +12,7 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
|
|||||||
// pass the _proxy_ key in this header too, instead of providing it as a
|
// pass the _proxy_ key in this header too, instead of providing it as a
|
||||||
// Bearer token in the Authorization header. So we need to check both.
|
// Bearer token in the Authorization header. So we need to check both.
|
||||||
// Prefer the Authorization header if both are present.
|
// Prefer the Authorization header if both are present.
|
||||||
|
// Google AI uses a key querystring parameter.
|
||||||
|
|
||||||
if (req.headers.authorization) {
|
if (req.headers.authorization) {
|
||||||
const token = req.headers.authorization?.slice("Bearer ".length);
|
const token = req.headers.authorization?.slice("Bearer ".length);
|
||||||
@@ -25,6 +26,18 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
|
|||||||
return token;
|
return token;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (req.headers["x-goog-api-key"]) {
|
||||||
|
const token = req.headers["x-goog-api-key"]?.toString();
|
||||||
|
delete req.headers["x-goog-api-key"];
|
||||||
|
return token;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.query.key) {
|
||||||
|
const token = req.query.key?.toString();
|
||||||
|
delete req.query.key;
|
||||||
|
return token;
|
||||||
|
}
|
||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +79,8 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
|
|||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
403,
|
403,
|
||||||
"Forbidden: no more IPs can authenticate with this user token"
|
`Forbidden: no more IP addresses allowed for this user token`,
|
||||||
|
{ currentIp: ip, maxIps: user?.maxIps }
|
||||||
);
|
);
|
||||||
case "disabled":
|
case "disabled":
|
||||||
const bannedUser = getUser(token);
|
const bannedUser = getUser(token);
|
||||||
@@ -84,7 +98,8 @@ function sendError(
|
|||||||
req: Request,
|
req: Request,
|
||||||
res: Response,
|
res: Response,
|
||||||
status: number,
|
status: number,
|
||||||
message: string
|
message: string,
|
||||||
|
data: any = {}
|
||||||
) {
|
) {
|
||||||
const isPost = req.method === "POST";
|
const isPost = req.method === "POST";
|
||||||
const hasBody = isPost && req.body;
|
const hasBody = isPost && req.body;
|
||||||
@@ -103,6 +118,7 @@ function sendError(
|
|||||||
format: "unknown",
|
format: "unknown",
|
||||||
statusCode: status,
|
statusCode: status,
|
||||||
reqId: req.id,
|
reqId: req.id,
|
||||||
|
obj: data,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,257 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { config } from "../config";
|
||||||
|
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import {
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeSignedRequest,
|
||||||
|
signGcpRequest,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||||
|
|
||||||
|
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const getModelsResponse = () => {
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config.gcpCredentials) return { object: "list", data: [] };
|
||||||
|
|
||||||
|
// https://docs.anthropic.com/en/docs/about-claude/models
|
||||||
|
const variants = [
|
||||||
|
"claude-3-haiku@20240307",
|
||||||
|
"claude-3-5-haiku@20241022",
|
||||||
|
"claude-3-5-sonnet@20240620",
|
||||||
|
"claude-3-5-sonnet-v2@20241022",
|
||||||
|
"claude-3-7-sonnet@20250219",
|
||||||
|
"claude-sonnet-4@20250514",
|
||||||
|
"claude-opus-4@20250514",
|
||||||
|
"claude-opus-4-1@20250805",
|
||||||
|
];
|
||||||
|
|
||||||
|
const models = variants.map((id) => ({
|
||||||
|
id,
|
||||||
|
object: "model",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
owned_by: "anthropic",
|
||||||
|
permission: [],
|
||||||
|
root: "claude",
|
||||||
|
parent: null,
|
||||||
|
}));
|
||||||
|
|
||||||
|
modelsCache = { object: "list", data: models };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
|
||||||
|
return modelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
|
res.status(200).json(getModelsResponse());
|
||||||
|
};
|
||||||
|
|
||||||
|
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
let newBody = body;
|
||||||
|
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
||||||
|
case "openai<-anthropic-chat":
|
||||||
|
req.log.info("Transforming Anthropic Chat back to OpenAI format");
|
||||||
|
newBody = transformAnthropicChatResponseToOpenAI(body);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const gcpProxy = createQueuedProxyMiddleware({
|
||||||
|
target: ({ signedRequest }) => {
|
||||||
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
|
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||||
|
},
|
||||||
|
mutations: [signGcpRequest, finalizeSignedRequest],
|
||||||
|
blockingResponseHandler: gcpBlockingResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const oaiToChatPreprocessor = createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||||
|
* or the new Claude chat completion endpoint, based on the requested model.
|
||||||
|
*/
|
||||||
|
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||||
|
oaiToChatPreprocessor(req, res, next);
|
||||||
|
};
|
||||||
|
|
||||||
|
const gcpRouter = Router();
|
||||||
|
gcpRouter.get("/v1/models", handleModelRequest);
|
||||||
|
// Native Anthropic chat completion endpoint.
|
||||||
|
gcpRouter.post(
|
||||||
|
"/v1/messages",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
),
|
||||||
|
gcpProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
// OpenAI-to-GCP Anthropic compatibility endpoint.
|
||||||
|
gcpRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
preprocessOpenAICompatRequest,
|
||||||
|
gcpProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tries to deal with:
|
||||||
|
* - frontends sending GCP model names even when they want to use the OpenAI-
|
||||||
|
* compatible endpoint
|
||||||
|
* - frontends sending Anthropic model names that GCP doesn't recognize
|
||||||
|
* - frontends sending OpenAI model names because they expect the proxy to
|
||||||
|
* translate them
|
||||||
|
*
|
||||||
|
* If client sends GCP model ID it will be used verbatim. Otherwise, various
|
||||||
|
* strategies are used to try to map a non-GCP model name to GCP model ID.
|
||||||
|
*/
|
||||||
|
function maybeReassignModel(req: Request) {
|
||||||
|
// Validate Claude 4.1 Opus parameters before processing
|
||||||
|
validateClaude41OpusParameters(req);
|
||||||
|
|
||||||
|
const model = req.body.model;
|
||||||
|
const DEFAULT_MODEL = "claude-3-5-sonnet-v2@20241022";
|
||||||
|
|
||||||
|
// If it looks like an GCP model, use it as-is
|
||||||
|
if (model.startsWith("claude-") && model.includes("@")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anthropic model names can look like:
|
||||||
|
// - claude-3-sonnet
|
||||||
|
// - claude-3.5-sonnet
|
||||||
|
// - claude-3-5-haiku
|
||||||
|
// - claude-3-5-haiku-latest
|
||||||
|
// - claude-3-5-sonnet-20240620
|
||||||
|
// - claude-opus-4-1 (new format)
|
||||||
|
// - claude-4.1-opus (alternative format)
|
||||||
|
const pattern = /^claude-(?:(\d+)[.-]?(\d)?-(sonnet|opus|haiku)(?:-(latest|\d+))?|(opus|sonnet|haiku)-(\d+)[.-]?(\d)?(?:-(latest|\d+))?)/i;
|
||||||
|
const match = model.match(pattern);
|
||||||
|
if (!match) {
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle both formats: claude-3-5-sonnet and claude-opus-4-1
|
||||||
|
const [_, major1, minor1, flavor1, rev1, flavor2, major2, minor2, rev2] = match;
|
||||||
|
|
||||||
|
let major, minor, flavor, rev;
|
||||||
|
if (major1) {
|
||||||
|
// Old format: claude-3-5-sonnet
|
||||||
|
major = major1;
|
||||||
|
minor = minor1;
|
||||||
|
flavor = flavor1;
|
||||||
|
rev = rev1;
|
||||||
|
} else {
|
||||||
|
// New format: claude-opus-4-1
|
||||||
|
major = major2;
|
||||||
|
minor = minor2;
|
||||||
|
flavor = flavor2;
|
||||||
|
rev = rev2;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ver = minor ? `${major}.${minor}` : major;
|
||||||
|
|
||||||
|
switch (ver) {
|
||||||
|
case "3":
|
||||||
|
case "3.0":
|
||||||
|
switch (flavor) {
|
||||||
|
case "haiku":
|
||||||
|
req.body.model = "claude-3-haiku@20240307";
|
||||||
|
break;
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "claude-3-opus@20240229";
|
||||||
|
break;
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "claude-3-sonnet@20240229";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
req.body.model = "claude-3-sonnet@20240229";
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
|
||||||
|
case "3.5":
|
||||||
|
switch (flavor) {
|
||||||
|
case "haiku":
|
||||||
|
req.body.model = "claude-3-5-haiku@20241022";
|
||||||
|
return;
|
||||||
|
case "opus":
|
||||||
|
// no 3.5 opus yet
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
return;
|
||||||
|
case "sonnet":
|
||||||
|
if (rev === "20240620") {
|
||||||
|
req.body.model = "claude-3-5-sonnet@20240620";
|
||||||
|
} else {
|
||||||
|
// includes -latest, edit if anthropic actually releases 3.5 sonnet v3
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
|
||||||
|
case "3.7":
|
||||||
|
switch (flavor) {
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "claude-3-7-sonnet@20250219";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "4":
|
||||||
|
case "4.0":
|
||||||
|
switch (flavor) {
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "claude-opus-4@20250514";
|
||||||
|
return;
|
||||||
|
case "sonnet":
|
||||||
|
req.body.model = "claude-sonnet-4@20250514";
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "4.1":
|
||||||
|
switch (flavor) {
|
||||||
|
case "opus":
|
||||||
|
req.body.model = "claude-opus-4-1@20250805";
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
req.body.model = DEFAULT_MODEL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const gcp = gcpRouter;
|
||||||
+203
-42
@@ -1,25 +1,24 @@
|
|||||||
import { Request, RequestHandler, Router } from "express";
|
import { Request, RequestHandler, Router, Response, NextFunction } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { v4 } from "uuid";
|
import { v4 } from "uuid";
|
||||||
|
import { GoogleAIKey, keyPool } from "../shared/key-management";
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { logger } from "../logger";
|
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
createOnProxyReqHandler,
|
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeSignedRequest,
|
finalizeSignedRequest,
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
createOnProxyResHandler,
|
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
|
||||||
ProxyResHandlerWithBody,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
} from "./middleware/response";
|
import axios from "axios";
|
||||||
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
|
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
let modelsCacheTime = 0;
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
// Cache for native Google AI models
|
||||||
|
let nativeModelsCache: any = null;
|
||||||
|
let nativeModelsCacheTime = 0;
|
||||||
|
|
||||||
// https://ai.google.dev/models/gemini
|
// https://ai.google.dev/models/gemini
|
||||||
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
|
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
|
||||||
|
|
||||||
@@ -30,10 +29,24 @@ const getModelsResponse = () => {
|
|||||||
|
|
||||||
if (!config.googleAIKey) return { object: "list", data: [] };
|
if (!config.googleAIKey) return { object: "list", data: [] };
|
||||||
|
|
||||||
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
|
const keys = keyPool
|
||||||
|
.list()
|
||||||
|
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
|
||||||
|
if (keys.length === 0) {
|
||||||
|
modelsCache = { object: "list", data: [] };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
const models = googleAIVariants.map((id) => ({
|
// Get all model IDs from keys, excluding any with "bard" in the name
|
||||||
id,
|
const modelIds = Array.from(
|
||||||
|
new Set(keys.map((k) => k.modelIds).flat())
|
||||||
|
).filter((id) => id.startsWith("models/") && !id.includes("bard"));
|
||||||
|
|
||||||
|
// Strip "models/" prefix from IDs before creating model objects
|
||||||
|
const models = modelIds.map((id) => ({
|
||||||
|
// Strip "models/" prefix from ID for consistency with request processing
|
||||||
|
id: id.startsWith("models/") ? id.slice("models/".length) : id,
|
||||||
object: "model",
|
object: "model",
|
||||||
created: new Date().getTime(),
|
created: new Date().getTime(),
|
||||||
owned_by: "google",
|
owned_by: "google",
|
||||||
@@ -48,12 +61,51 @@ const getModelsResponse = () => {
|
|||||||
return modelsCache;
|
return modelsCache;
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
// Function to fetch native models from Google AI API
|
||||||
|
const getNativeModelsResponse = async () => {
|
||||||
|
// Return cached value if it was refreshed in the last minute
|
||||||
|
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
|
||||||
|
return nativeModelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The official Google API requires an API key. However SillyTavern only needs
|
||||||
|
* a list of model IDs and does not care about any other model metadata. We
|
||||||
|
* can therefore generate a **synthetic** response from the keys already
|
||||||
|
* loaded into the proxy (same source we use for the OpenAI-compatible
|
||||||
|
* endpoint) and completely avoid the outbound request. This removes the
|
||||||
|
* need for the frontend to supply the proxy password as an API key and
|
||||||
|
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
|
||||||
|
* is missing.
|
||||||
|
*/
|
||||||
|
const openaiStyle = getModelsResponse();
|
||||||
|
const models = (openaiStyle.data || []).map((m: any) => ({
|
||||||
|
// Google AI Studio returns names in the format "models/<id>"
|
||||||
|
name: `models/${m.id}`,
|
||||||
|
supportedGenerationMethods: ["generateContent"],
|
||||||
|
}));
|
||||||
|
|
||||||
|
nativeModelsCache = { models };
|
||||||
|
nativeModelsCacheTime = new Date().getTime();
|
||||||
|
return nativeModelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
|
||||||
res.status(200).json(getModelsResponse());
|
res.status(200).json(getModelsResponse());
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Only used for non-streaming requests. */
|
// Native Gemini API model list request
|
||||||
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
|
const handleNativeModelRequest: RequestHandler = async (_req: Request, res: any) => {
|
||||||
|
try {
|
||||||
|
const modelsResponse = await getNativeModelsResponse();
|
||||||
|
res.status(200).json(modelsResponse);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error in handleNativeModelRequest:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
_proxyRes,
|
_proxyRes,
|
||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
@@ -77,8 +129,30 @@ function transformGoogleAIResponse(
|
|||||||
req: Request
|
req: Request
|
||||||
): Record<string, any> {
|
): Record<string, any> {
|
||||||
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
|
|
||||||
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
|
// Handle the case where content might have different structures
|
||||||
|
let content = "";
|
||||||
|
|
||||||
|
// Check if the response has the expected structure
|
||||||
|
if (resBody.candidates && resBody.candidates[0]) {
|
||||||
|
const candidate = resBody.candidates[0];
|
||||||
|
|
||||||
|
// Extract content text with multiple fallbacks
|
||||||
|
if (candidate.content?.parts && candidate.content.parts[0]?.text) {
|
||||||
|
// Regular format with parts array containing text
|
||||||
|
content = candidate.content.parts[0].text;
|
||||||
|
} else if (candidate.content?.text) {
|
||||||
|
// Alternate format with direct text property
|
||||||
|
content = candidate.content.text;
|
||||||
|
} else if (typeof candidate.content?.parts?.[0] === 'string') {
|
||||||
|
// Some formats might have string parts
|
||||||
|
content = candidate.content.parts[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply cleanup to the content if needed
|
||||||
|
content = content.replace(/^(.{0,50}?): /, () => "");
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: "goo-" + v4(),
|
id: "goo-" + v4(),
|
||||||
object: "chat.completion",
|
object: "chat.completion",
|
||||||
@@ -92,43 +166,130 @@ function transformGoogleAIResponse(
|
|||||||
choices: [
|
choices: [
|
||||||
{
|
{
|
||||||
message: { role: "assistant", content },
|
message: { role: "assistant", content },
|
||||||
finish_reason: resBody.candidates[0].finishReason,
|
finish_reason: resBody.candidates?.[0]?.finishReason || "STOP",
|
||||||
index: 0,
|
index: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const googleAIProxy = createQueueMiddleware({
|
const googleAIProxy = createQueuedProxyMiddleware({
|
||||||
beforeProxy: addGoogleAIKey,
|
target: ({ signedRequest }: { signedRequest: any }) => {
|
||||||
proxyMiddleware: createProxyMiddleware({
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
target: "bad-target-will-be-rewritten",
|
const { protocol, hostname} = signedRequest;
|
||||||
router: ({ signedRequest }) => {
|
return `${protocol}//${hostname}`;
|
||||||
const { protocol, hostname, path } = signedRequest;
|
},
|
||||||
return `${protocol}//${hostname}${path}`;
|
mutations: [addGoogleAIKey, finalizeSignedRequest],
|
||||||
},
|
blockingResponseHandler: googleAIBlockingResponseHandler,
|
||||||
changeOrigin: true,
|
|
||||||
selfHandleResponse: true,
|
|
||||||
logger,
|
|
||||||
on: {
|
|
||||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
|
||||||
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
|
|
||||||
error: handleProxyError,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const googleAIRouter = Router();
|
const googleAIRouter = Router();
|
||||||
googleAIRouter.get("/v1/models", handleModelRequest);
|
googleAIRouter.get("/v1/models", handleModelRequest);
|
||||||
|
googleAIRouter.get("/:apiVersion(v1alpha|v1beta)/models", handleNativeModelRequest);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Processes the thinking budget for Gemini 2.5 Flash model.
|
||||||
|
* Validation has been disabled - budget is passed through without limits.
|
||||||
|
*/
|
||||||
|
function processThinkingBudget(req: Request) {
|
||||||
|
// Validation disabled - budget is passed through without any range limits
|
||||||
|
// Previously enforced 0-24576 token limit
|
||||||
|
}
|
||||||
|
|
||||||
|
function setStreamFlag(req: Request) {
|
||||||
|
const isStreaming = req.url.includes("streamGenerateContent");
|
||||||
|
if (isStreaming) {
|
||||||
|
req.body.stream = true;
|
||||||
|
req.isStreaming = true;
|
||||||
|
} else {
|
||||||
|
req.body.stream = false;
|
||||||
|
req.isStreaming = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Strips 'models/' prefix from the beginning of model IDs if present.
|
||||||
|
* No longer forces redirection to gemini-1.5-pro-latest for non-Gemini models.
|
||||||
|
**/
|
||||||
|
function maybeReassignModel(req: Request) {
|
||||||
|
// Ensure model is on body as a lot of middleware will expect it.
|
||||||
|
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
|
||||||
|
if (!model) {
|
||||||
|
throw new Error("You must specify a model with your request.");
|
||||||
|
}
|
||||||
|
req.body.model = model;
|
||||||
|
|
||||||
|
// Only strip the 'models/' prefix if present
|
||||||
|
if (model.startsWith("models/")) {
|
||||||
|
req.body.model = model.slice("models/".length);
|
||||||
|
req.log.info({ originalModel: model, updatedModel: req.body.model }, "Stripped 'models/' prefix from model ID");
|
||||||
|
}
|
||||||
|
|
||||||
|
// No longer redirecting non-Gemini models to gemini-1.5-pro-latest
|
||||||
|
// This allows the original model to be passed through to the API
|
||||||
|
// If it's an invalid model, the Google AI API will return the appropriate error
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Middleware to check for and block requests to experimental models.
|
||||||
|
* This function is intended to be used as a RequestPreprocessor.
|
||||||
|
* It throws an error if an experimental model is detected, which should be
|
||||||
|
* caught by the proxy's onError handler.
|
||||||
|
*
|
||||||
|
* Models can be allowed through the ALLOWED_EXP_MODELS environment variable.
|
||||||
|
*/
|
||||||
|
function checkAndBlockExperimentalModels(req: Request) { // Changed signature
|
||||||
|
const modelId = req.body.model as string | undefined;
|
||||||
|
|
||||||
|
// Check if the model ID contains "exp" (case-insensitive)
|
||||||
|
if (modelId && modelId.toLowerCase().includes("exp")) {
|
||||||
|
// Check if this specific model is in the allowlist
|
||||||
|
const allowedModels = config.allowedExpModels
|
||||||
|
?.split(",")
|
||||||
|
.map(model => model.trim())
|
||||||
|
.filter(model => model.length > 0) || [];
|
||||||
|
|
||||||
|
const isAllowed = allowedModels.some(allowedModel =>
|
||||||
|
modelId.toLowerCase() === allowedModel.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (isAllowed) {
|
||||||
|
req.log.info({ modelId }, "Allowing experimental Google AI model via allowlist.");
|
||||||
|
return; // Allow the request to proceed
|
||||||
|
}
|
||||||
|
|
||||||
|
req.log.warn({ modelId }, "Blocking request to experimental Google AI model.");
|
||||||
|
const err: any = new Error("Experimental models are too unstable to be supported in proxy code. Please use preview models instead.");
|
||||||
|
err.statusCode = 400;
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
// If no experimental model, do nothing, allowing request to proceed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Native Google AI chat completion endpoint
|
||||||
|
googleAIRouter.post(
|
||||||
|
"/:apiVersion(v1alpha|v1beta)/models/:modelId:(generateContent|streamGenerateContent)",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
|
||||||
|
{
|
||||||
|
beforeTransform: [maybeReassignModel],
|
||||||
|
afterTransform: [checkAndBlockExperimentalModels, setStreamFlag, processThinkingBudget]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
googleAIProxy
|
||||||
|
);
|
||||||
|
|
||||||
// OpenAI-to-Google AI compatibility endpoint.
|
// OpenAI-to-Google AI compatibility endpoint.
|
||||||
googleAIRouter.post(
|
googleAIRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({
|
createPreprocessorMiddleware(
|
||||||
inApi: "openai",
|
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
|
||||||
outApi: "google-ai",
|
{
|
||||||
service: "google-ai",
|
afterTransform: [maybeReassignModel, checkAndBlockExperimentalModels, processThinkingBudget]
|
||||||
}),
|
}
|
||||||
|
),
|
||||||
googleAIProxy
|
googleAIProxy
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { Request, Response } from "express";
|
import { Request, Response } from "express";
|
||||||
import http from "http";
|
import http from "http";
|
||||||
import httpProxy from "http-proxy";
|
import { Socket } from "net";
|
||||||
import { ZodError } from "zod";
|
import { ZodError } from "zod";
|
||||||
import { generateErrorMessage } from "zod-error";
|
import { generateErrorMessage } from "zod-error";
|
||||||
import { HttpError } from "../../shared/errors";
|
import { HttpError } from "../../shared/errors";
|
||||||
@@ -12,10 +12,13 @@ const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
|
|||||||
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
|
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
|
||||||
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
|
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
|
||||||
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
|
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
|
||||||
|
const OPENAI_RESPONSES_ENDPOINT = "/v1/responses";
|
||||||
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
||||||
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
|
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
|
||||||
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
|
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
|
||||||
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
|
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
|
||||||
|
const GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT = "/v1alpha/models";
|
||||||
|
const GOOGLE_AI_BETA_COMPLETION_ENDPOINT = "/v1beta/models";
|
||||||
|
|
||||||
export function isTextGenerationRequest(req: Request) {
|
export function isTextGenerationRequest(req: Request) {
|
||||||
return (
|
return (
|
||||||
@@ -23,10 +26,13 @@ export function isTextGenerationRequest(req: Request) {
|
|||||||
[
|
[
|
||||||
OPENAI_CHAT_COMPLETION_ENDPOINT,
|
OPENAI_CHAT_COMPLETION_ENDPOINT,
|
||||||
OPENAI_TEXT_COMPLETION_ENDPOINT,
|
OPENAI_TEXT_COMPLETION_ENDPOINT,
|
||||||
|
OPENAI_RESPONSES_ENDPOINT,
|
||||||
ANTHROPIC_COMPLETION_ENDPOINT,
|
ANTHROPIC_COMPLETION_ENDPOINT,
|
||||||
ANTHROPIC_MESSAGES_ENDPOINT,
|
ANTHROPIC_MESSAGES_ENDPOINT,
|
||||||
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
|
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
|
||||||
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
|
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
|
||||||
|
GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT,
|
||||||
|
GOOGLE_AI_BETA_COMPLETION_ENDPOINT,
|
||||||
].some((endpoint) => req.path.startsWith(endpoint))
|
].some((endpoint) => req.path.startsWith(endpoint))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -70,16 +76,23 @@ export function sendProxyError(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
|
/**
|
||||||
req.log.error(err, `Error during http-proxy-middleware request`);
|
* Handles errors thrown during preparation of a proxy request (before it is
|
||||||
classifyErrorAndSend(err, req as Request, res as Response);
|
* sent to the upstream API), typically due to validation, quota, or other
|
||||||
};
|
* pre-flight checks. Depending on the error class, this function will send an
|
||||||
|
* appropriate error response to the client, streaming it if necessary.
|
||||||
|
*/
|
||||||
export const classifyErrorAndSend = (
|
export const classifyErrorAndSend = (
|
||||||
err: Error,
|
err: Error,
|
||||||
req: Request,
|
req: Request,
|
||||||
res: Response
|
res: Response | Socket
|
||||||
) => {
|
) => {
|
||||||
|
if (res instanceof Socket) {
|
||||||
|
// We should always have an Express response object here, but http-proxy's
|
||||||
|
// ErrorCallback type says it could be just a Socket.
|
||||||
|
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
|
||||||
|
return res.destroy();
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
const { statusCode, statusMessage, userMessage, ...errorDetails } =
|
const { statusCode, statusMessage, userMessage, ...errorDetails } =
|
||||||
classifyError(err);
|
classifyError(err);
|
||||||
@@ -221,9 +234,28 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
|||||||
switch (format) {
|
switch (format) {
|
||||||
case "openai":
|
case "openai":
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
// Can be null if the model wants to invoke tools rather than return a
|
// Few possible values:
|
||||||
// completion.
|
// - choices[0].message.content
|
||||||
return body.choices[0].message.content || "";
|
// - choices[0].message with no content if model is invoking a tool
|
||||||
|
return body.choices?.[0]?.message?.content || "";
|
||||||
|
case "openai-responses":
|
||||||
|
// Handle the original Responses API format
|
||||||
|
if (body.output && Array.isArray(body.output)) {
|
||||||
|
// Look for a message type in the output array
|
||||||
|
for (const item of body.output) {
|
||||||
|
if (item.type === "message" && item.content && Array.isArray(item.content)) {
|
||||||
|
// Extract text content from each content item
|
||||||
|
return item.content
|
||||||
|
.filter((contentItem: any) => contentItem.type === "output_text")
|
||||||
|
.map((contentItem: any) => contentItem.text)
|
||||||
|
.join("");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If we've been transformed to chat completion format already
|
||||||
|
return body.choices?.[0]?.message?.content || "";
|
||||||
|
case "mistral-text":
|
||||||
|
return body.outputs?.[0]?.text || "";
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
return body.choices[0].text;
|
return body.choices[0].text;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
@@ -252,7 +284,15 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
|||||||
if ("choices" in body) {
|
if ("choices" in body) {
|
||||||
return body.choices[0].message.content;
|
return body.choices[0].message.content;
|
||||||
}
|
}
|
||||||
return body.candidates[0].content.parts[0].text;
|
const text = body.candidates[0].content?.parts?.[0]?.text;
|
||||||
|
if (!text) {
|
||||||
|
req.log.warn(
|
||||||
|
{ body: JSON.stringify(body) },
|
||||||
|
"Received empty Google AI text completion"
|
||||||
|
);
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
return text;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
return body.data?.map((item: any) => item.url).join("\n");
|
return body.data?.map((item: any) => item.url).join("\n");
|
||||||
default:
|
default:
|
||||||
@@ -260,22 +300,23 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getModelFromBody(req: Request, body: Record<string, any>) {
|
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
|
||||||
const format = req.outboundApi;
|
const format = req.outboundApi;
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case "openai":
|
case "openai":
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
|
case "openai-responses":
|
||||||
|
return resBody.model;
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
return body.model;
|
case "mistral-text":
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
|
case "google-ai":
|
||||||
|
// These formats don't have a model in the response body.
|
||||||
return req.body.model;
|
return req.body.model;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
case "anthropic-text":
|
case "anthropic-text":
|
||||||
// Anthropic confirms the model in the response, but AWS Claude doesn't.
|
// Anthropic confirms the model in the response, but AWS Claude doesn't.
|
||||||
return body.model || req.body.model;
|
return resBody.model || req.body.model;
|
||||||
case "google-ai":
|
|
||||||
// Google doesn't confirm the model in the response.
|
|
||||||
return req.body.model;
|
|
||||||
default:
|
default:
|
||||||
assertNever(format);
|
assertNever(format);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,43 +1,38 @@
|
|||||||
import type { Request } from "express";
|
import type { Request } from "express";
|
||||||
import type { ClientRequest } from "http";
|
|
||||||
import type { ProxyReqCallback } from "http-proxy";
|
|
||||||
|
|
||||||
export { createOnProxyReqHandler } from "./onproxyreq-factory";
|
import { ProxyReqManager } from "./proxy-req-manager";
|
||||||
export {
|
export {
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
createEmbeddingsPreprocessorMiddleware,
|
createEmbeddingsPreprocessorMiddleware,
|
||||||
} from "./preprocessor-factory";
|
} from "./preprocessor-factory";
|
||||||
|
|
||||||
// Express middleware (runs before http-proxy-middleware, can be async)
|
// Preprocessors (runs before request is queued, usually body transformation/validation)
|
||||||
export { addAzureKey } from "./preprocessors/add-azure-key";
|
|
||||||
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
|
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
|
||||||
|
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
|
||||||
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
|
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
|
||||||
export { languageFilter } from "./preprocessors/language-filter";
|
export { languageFilter } from "./preprocessors/language-filter";
|
||||||
export { setApiFormat } from "./preprocessors/set-api-format";
|
export { setApiFormat } from "./preprocessors/set-api-format";
|
||||||
export { signAwsRequest } from "./preprocessors/sign-aws-request";
|
|
||||||
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
|
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
|
||||||
export { validateContextSize } from "./preprocessors/validate-context-size";
|
export { validateContextSize } from "./preprocessors/validate-context-size";
|
||||||
|
export { validateModelFamily } from "./preprocessors/validate-model-family";
|
||||||
export { validateVision } from "./preprocessors/validate-vision";
|
export { validateVision } from "./preprocessors/validate-vision";
|
||||||
|
|
||||||
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
|
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
|
||||||
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
|
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
|
||||||
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
|
export { addAzureKey } from "./mutators/add-azure-key";
|
||||||
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
|
export { finalizeBody } from "./mutators/finalize-body";
|
||||||
export { checkModelFamily } from "./onproxyreq/check-model-family";
|
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
|
||||||
export { finalizeBody } from "./onproxyreq/finalize-body";
|
export { signAwsRequest } from "./mutators/sign-aws-request";
|
||||||
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
|
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
|
||||||
export { stripHeaders } from "./onproxyreq/strip-headers";
|
export { stripHeaders } from "./mutators/strip-headers";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Middleware that runs prior to the request being handled by http-proxy-
|
* Middleware that runs prior to the request being queued or handled by
|
||||||
* middleware.
|
* http-proxy-middleware. You will not have access to the proxied
|
||||||
|
* request/response objects since they have not yet been sent to the API.
|
||||||
*
|
*
|
||||||
* Async functions can be used here, but you will not have access to the proxied
|
* User will have been authenticated by the proxy's gatekeeper, but the request
|
||||||
* request/response objects, nor the data set by ProxyRequestMiddleware
|
* won't have been assigned an upstream API key yet.
|
||||||
* functions as they have not yet been run.
|
|
||||||
*
|
|
||||||
* User will have been authenticated by the time this middleware runs, but your
|
|
||||||
* request won't have been assigned an API key yet.
|
|
||||||
*
|
*
|
||||||
* Note that these functions only run once ever per request, even if the request
|
* Note that these functions only run once ever per request, even if the request
|
||||||
* is automatically retried by the request queue middleware.
|
* is automatically retried by the request queue middleware.
|
||||||
@@ -45,17 +40,14 @@ export { stripHeaders } from "./onproxyreq/strip-headers";
|
|||||||
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Callbacks that run immediately before the request is sent to the API in
|
* Middleware that runs immediately before the request is proxied to the
|
||||||
* response to http-proxy-middleware's `proxyReq` event.
|
* upstream API, after dequeueing the request from the request queue.
|
||||||
*
|
*
|
||||||
* Async functions cannot be used here as HPM's event emitter is not async and
|
* Because these middleware may be run multiple times per request if a retryable
|
||||||
* will not wait for the promise to resolve before sending the request.
|
* error occurs and the request put back in the queue, they must be idempotent.
|
||||||
*
|
* A change manager is provided to allow the middleware to make changes to the
|
||||||
* Note that these functions may be run multiple times per request if the
|
* request which can be automatically reverted.
|
||||||
* first attempt is rate limited and the request is automatically retried by the
|
|
||||||
* request queue middleware.
|
|
||||||
*/
|
*/
|
||||||
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
|
export type ProxyReqMutator = (
|
||||||
|
changeManager: ProxyReqManager
|
||||||
export const forceModel = (model: string) => (req: Request) =>
|
) => void | Promise<void>;
|
||||||
void (req.body.model = model);
|
|
||||||
|
|||||||
+13
-7
@@ -3,14 +3,16 @@ import {
|
|||||||
AzureOpenAIKey,
|
AzureOpenAIKey,
|
||||||
keyPool,
|
keyPool,
|
||||||
} from "../../../../shared/key-management";
|
} from "../../../../shared/key-management";
|
||||||
import { RequestPreprocessor } from "../index";
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
export const addAzureKey: RequestPreprocessor = (req) => {
|
export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
const validAPIs: APIFormat[] = ["openai", "openai-image"];
|
const validAPIs: APIFormat[] = ["openai", "openai-image"];
|
||||||
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
|
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
|
||||||
validAPIs.includes(api)
|
validAPIs.includes(api)
|
||||||
);
|
);
|
||||||
const serviceValid = req.service === "azure";
|
const serviceValid = req.service === "azure";
|
||||||
|
|
||||||
if (!apisValid || !serviceValid) {
|
if (!apisValid || !serviceValid) {
|
||||||
throw new Error("addAzureKey called on invalid request");
|
throw new Error("addAzureKey called on invalid request");
|
||||||
}
|
}
|
||||||
@@ -22,11 +24,15 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
|||||||
const model = req.body.model.startsWith("azure-")
|
const model = req.body.model.startsWith("azure-")
|
||||||
? req.body.model
|
? req.body.model
|
||||||
: `azure-${req.body.model}`;
|
: `azure-${req.body.model}`;
|
||||||
|
// TODO: untracked mutation to body, I think this should just be a
|
||||||
req.key = keyPool.get(model, "azure");
|
// RequestPreprocessor because we don't need to do it every dequeue.
|
||||||
req.body.model = model;
|
req.body.model = model;
|
||||||
|
|
||||||
|
const key = keyPool.get(model, "azure");
|
||||||
|
manager.setKey(key);
|
||||||
|
|
||||||
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
|
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
|
||||||
|
// TODO: this should also probably be a RequestPreprocessor
|
||||||
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
|
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
|
||||||
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
|
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
|
||||||
// OpenAI wants logprobs: true/false and top_logprobs: number
|
// OpenAI wants logprobs: true/false and top_logprobs: number
|
||||||
@@ -43,7 +49,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
req.log.info(
|
req.log.info(
|
||||||
{ key: req.key.hash, model },
|
{ key: key.hash, model },
|
||||||
"Assigned Azure OpenAI key to request"
|
"Assigned Azure OpenAI key to request"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -55,7 +61,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
|||||||
const apiVersion =
|
const apiVersion =
|
||||||
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
|
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
|
||||||
|
|
||||||
req.signedRequest = {
|
manager.setSignedRequest({
|
||||||
method: "POST",
|
method: "POST",
|
||||||
protocol: "https:",
|
protocol: "https:",
|
||||||
hostname: `${resourceName}.openai.azure.com`,
|
hostname: `${resourceName}.openai.azure.com`,
|
||||||
@@ -66,7 +72,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
|||||||
["api-key"]: apiKey,
|
["api-key"]: apiKey,
|
||||||
},
|
},
|
||||||
body: JSON.stringify(req.body),
|
body: JSON.stringify(req.body),
|
||||||
};
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
function getCredentialsFromKey(key: AzureOpenAIKey) {
|
function getCredentialsFromKey(key: AzureOpenAIKey) {
|
||||||
@@ -0,0 +1,47 @@
|
|||||||
|
import { keyPool } from "../../../../shared/key-management";
|
||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
const inboundValid =
|
||||||
|
req.inboundApi === "openai" || req.inboundApi === "google-ai";
|
||||||
|
const outboundValid = req.outboundApi === "google-ai";
|
||||||
|
|
||||||
|
const serviceValid = req.service === "google-ai";
|
||||||
|
if (!inboundValid || !outboundValid || !serviceValid) {
|
||||||
|
throw new Error("addGoogleAIKey called on invalid request");
|
||||||
|
}
|
||||||
|
|
||||||
|
const model = req.body.model;
|
||||||
|
const key = keyPool.get(model, "google-ai");
|
||||||
|
manager.setKey(key);
|
||||||
|
|
||||||
|
req.log.info(
|
||||||
|
{ key: key.hash, model, stream: req.isStreaming },
|
||||||
|
"Assigned Google AI API key to request"
|
||||||
|
);
|
||||||
|
|
||||||
|
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
|
||||||
|
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
|
||||||
|
const payload = { ...req.body, stream: undefined, model: undefined };
|
||||||
|
|
||||||
|
// For OpenAI -> Google conversion we don't actually have the API version
|
||||||
|
const apiVersion = req.params.apiVersion || "v1beta"
|
||||||
|
|
||||||
|
// TODO: this isn't actually signed, so the manager api is a little unclear
|
||||||
|
// with the ProxyReqManager refactor, it's probably no longer necesasry to
|
||||||
|
// do this because we can modify the path using Manager.setPath.
|
||||||
|
manager.setSignedRequest({
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: "generativelanguage.googleapis.com",
|
||||||
|
path: `/${apiVersion}/models/${model}:${
|
||||||
|
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
|
||||||
|
}key=${key.key}`,
|
||||||
|
headers: {
|
||||||
|
["host"]: `generativelanguage.googleapis.com`,
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify(payload),
|
||||||
|
});
|
||||||
|
};
|
||||||
+50
-23
@@ -2,10 +2,12 @@ import { AnthropicChatMessage } from "../../../../shared/api-schemas";
|
|||||||
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
|
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
|
||||||
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
|
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
|
||||||
import { isEmbeddingsRequest } from "../../common";
|
import { isEmbeddingsRequest } from "../../common";
|
||||||
import { HPMRequestCallback } from "../index";
|
|
||||||
import { assertNever } from "../../../../shared/utils";
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
export const addKey: ProxyReqMutator = (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
|
||||||
export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
|
||||||
let assignedKey: Key;
|
let assignedKey: Key;
|
||||||
const { service, inboundApi, outboundApi, body } = req;
|
const { service, inboundApi, outboundApi, body } = req;
|
||||||
|
|
||||||
@@ -29,7 +31,9 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (inboundApi === outboundApi) {
|
if (inboundApi === outboundApi) {
|
||||||
assignedKey = keyPool.get(body.model, service, needsMultimodal);
|
// Pass streaming information for GPT-5 models that require verified keys for streaming
|
||||||
|
const isStreaming = body.stream === true;
|
||||||
|
assignedKey = keyPool.get(body.model, service, needsMultimodal, isStreaming);
|
||||||
} else {
|
} else {
|
||||||
switch (outboundApi) {
|
switch (outboundApi) {
|
||||||
// If we are translating between API formats we may need to select a model
|
// If we are translating between API formats we may need to select a model
|
||||||
@@ -38,26 +42,32 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
|||||||
// translation now reassigns the model earlier in the request pipeline.
|
// translation now reassigns the model earlier in the request pipeline.
|
||||||
case "anthropic-text":
|
case "anthropic-text":
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
assignedKey = keyPool.get("claude-v1", service, needsMultimodal);
|
case "mistral-ai":
|
||||||
|
case "mistral-text":
|
||||||
|
case "google-ai":
|
||||||
|
assignedKey = keyPool.get(body.model, service);
|
||||||
break;
|
break;
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
|
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
|
||||||
break;
|
break;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
assignedKey = keyPool.get("dall-e-3", service);
|
// Use the actual model from the request body instead of defaulting to dall-e-3
|
||||||
|
// This ensures that gpt-image-1 requests get keys that are verified for gpt-image-1
|
||||||
|
assignedKey = keyPool.get(body.model, service);
|
||||||
|
break;
|
||||||
|
case "openai-responses":
|
||||||
|
assignedKey = keyPool.get(body.model, service);
|
||||||
break;
|
break;
|
||||||
case "openai":
|
case "openai":
|
||||||
case "google-ai":
|
|
||||||
case "mistral-ai":
|
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`add-key should not be called for outbound API ${outboundApi}`
|
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
|
||||||
);
|
);
|
||||||
default:
|
default:
|
||||||
assertNever(outboundApi);
|
assertNever(outboundApi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
req.key = assignedKey;
|
manager.setKey(assignedKey);
|
||||||
req.log.info(
|
req.log.info(
|
||||||
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
|
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
|
||||||
"Assigned key to request"
|
"Assigned key to request"
|
||||||
@@ -66,23 +76,42 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
|||||||
// TODO: KeyProvider should assemble all necessary headers
|
// TODO: KeyProvider should assemble all necessary headers
|
||||||
switch (assignedKey.service) {
|
switch (assignedKey.service) {
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
proxyReq.setHeader("X-API-Key", assignedKey.key);
|
manager.setHeader("X-API-Key", assignedKey.key);
|
||||||
|
if (!manager.request.headers["anthropic-version"]) {
|
||||||
|
manager.setHeader("anthropic-version", "2023-06-01");
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case "openai":
|
case "openai":
|
||||||
const key: OpenAIKey = assignedKey as OpenAIKey;
|
const key: OpenAIKey = assignedKey as OpenAIKey;
|
||||||
if (key.organizationId) {
|
if (key.organizationId && !key.key.includes("svcacct")) {
|
||||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||||
}
|
}
|
||||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
break;
|
break;
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
break;
|
break;
|
||||||
case "azure":
|
case "azure":
|
||||||
const azureKey = assignedKey.key;
|
const azureKey = assignedKey.key;
|
||||||
proxyReq.setHeader("api-key", azureKey);
|
manager.setHeader("api-key", azureKey);
|
||||||
|
break;
|
||||||
|
case "deepseek":
|
||||||
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
|
break;
|
||||||
|
case "cohere":
|
||||||
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
|
break;
|
||||||
|
case "qwen":
|
||||||
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
|
break;
|
||||||
|
case "moonshot":
|
||||||
|
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
break;
|
break;
|
||||||
case "aws":
|
case "aws":
|
||||||
|
case "gcp":
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
throw new Error("add-key should not be used for this service.");
|
throw new Error("add-key should not be used for this service.");
|
||||||
default:
|
default:
|
||||||
@@ -94,10 +123,8 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
|||||||
* Special case for embeddings requests which don't go through the normal
|
* Special case for embeddings requests which don't go through the normal
|
||||||
* request pipeline.
|
* request pipeline.
|
||||||
*/
|
*/
|
||||||
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
|
||||||
proxyReq,
|
const req = manager.request;
|
||||||
req
|
|
||||||
) => {
|
|
||||||
if (!isEmbeddingsRequest(req)) {
|
if (!isEmbeddingsRequest(req)) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"addKeyForEmbeddingsRequest called on non-embeddings request"
|
"addKeyForEmbeddingsRequest called on non-embeddings request"
|
||||||
@@ -108,18 +135,18 @@ export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
|||||||
throw new Error("Embeddings requests must be from OpenAI");
|
throw new Error("Embeddings requests must be from OpenAI");
|
||||||
}
|
}
|
||||||
|
|
||||||
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
|
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
|
||||||
|
|
||||||
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
|
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
|
||||||
|
|
||||||
req.key = key;
|
manager.setKey(key);
|
||||||
req.log.info(
|
req.log.info(
|
||||||
{ key: key.hash, toApi: req.outboundApi },
|
{ key: key.hash, toApi: req.outboundApi },
|
||||||
"Assigned Turbo key to embeddings request"
|
"Assigned Turbo key to embeddings request"
|
||||||
);
|
);
|
||||||
|
|
||||||
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
|
manager.setHeader("Authorization", `Bearer ${key.key}`);
|
||||||
if (key.organizationId) {
|
if (key.organizationId) {
|
||||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
import type { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
/** Finalize the rewritten request body. Must be the last mutator. */
|
||||||
|
export const finalizeBody: ProxyReqMutator = (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
|
||||||
|
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||||
|
// For image generation requests, remove stream flag.
|
||||||
|
if (req.outboundApi === "openai-image") {
|
||||||
|
delete req.body.stream;
|
||||||
|
}
|
||||||
|
// For anthropic text to chat requests, remove undefined prompt.
|
||||||
|
if (req.outboundApi === "anthropic-chat") {
|
||||||
|
delete req.body.prompt;
|
||||||
|
}
|
||||||
|
// For OpenAI Responses API, ensure messages is in the correct format
|
||||||
|
if (req.outboundApi === "openai-responses") {
|
||||||
|
// Format messages for the Responses API
|
||||||
|
if (req.body.messages) {
|
||||||
|
req.log.info("Formatting messages for Responses API in finalizeBody");
|
||||||
|
// The Responses API expects input to be an array, not an object
|
||||||
|
req.body.input = req.body.messages;
|
||||||
|
delete req.body.messages;
|
||||||
|
} else if (req.body.input && req.body.input.messages) {
|
||||||
|
req.log.info("Reformatting input.messages for Responses API in finalizeBody");
|
||||||
|
// If input already exists but contains a messages object, replace input with the messages array
|
||||||
|
req.body.input = req.body.input.messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final check to ensure max_completion_tokens is converted to max_output_tokens
|
||||||
|
if (req.body.max_completion_tokens) {
|
||||||
|
req.log.info("Converting max_completion_tokens to max_output_tokens in finalizeBody");
|
||||||
|
if (!req.body.max_output_tokens) {
|
||||||
|
req.body.max_output_tokens = req.body.max_completion_tokens;
|
||||||
|
}
|
||||||
|
delete req.body.max_completion_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final check to ensure max_tokens is converted to max_output_tokens
|
||||||
|
if (req.body.max_tokens) {
|
||||||
|
req.log.info("Converting max_tokens to max_output_tokens in finalizeBody");
|
||||||
|
if (!req.body.max_output_tokens) {
|
||||||
|
req.body.max_output_tokens = req.body.max_tokens;
|
||||||
|
}
|
||||||
|
delete req.body.max_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all parameters not supported by Responses API
|
||||||
|
const unsupportedParams = [
|
||||||
|
'frequency_penalty',
|
||||||
|
'presence_penalty',
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const param of unsupportedParams) {
|
||||||
|
if (req.body[param] !== undefined) {
|
||||||
|
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
|
||||||
|
delete req.body[param];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const serialized =
|
||||||
|
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
|
||||||
|
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||||
|
manager.setBody(serialized);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
|
||||||
|
* pipeline, before the proxy middleware. This function just assigns the path
|
||||||
|
* and headers to the proxy request.
|
||||||
|
*/
|
||||||
|
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
if (!req.signedRequest) {
|
||||||
|
throw new Error("Expected req.signedRequest to be set");
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path depends on the selected model and the assigned key's region.
|
||||||
|
manager.setPath(req.signedRequest.path);
|
||||||
|
|
||||||
|
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||||
|
// reassign only the ones specified in the signed request.
|
||||||
|
const headers = req.signedRequest.headers;
|
||||||
|
Object.keys(headers).forEach((key) => {
|
||||||
|
manager.removeHeader(key);
|
||||||
|
});
|
||||||
|
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||||
|
manager.setHeader(key, value);
|
||||||
|
});
|
||||||
|
const serialized =
|
||||||
|
typeof req.signedRequest.body === "string"
|
||||||
|
? req.signedRequest.body
|
||||||
|
: JSON.stringify(req.signedRequest.body);
|
||||||
|
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||||
|
manager.setBody(serialized);
|
||||||
|
};
|
||||||
@@ -0,0 +1,159 @@
|
|||||||
|
import express, { Request } from "express";
|
||||||
|
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||||
|
import { SignatureV4 } from "@smithy/signature-v4";
|
||||||
|
import { HttpRequest } from "@smithy/protocol-http";
|
||||||
|
import {
|
||||||
|
AnthropicV1TextSchema,
|
||||||
|
AnthropicV1MessagesSchema,
|
||||||
|
} from "../../../../shared/api-schemas";
|
||||||
|
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
|
||||||
|
import {
|
||||||
|
AWSMistralV1ChatCompletionsSchema,
|
||||||
|
AWSMistralV1TextCompletionsSchema,
|
||||||
|
} from "../../../../shared/api-schemas/mistral-ai";
|
||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
const AMZ_HOST =
|
||||||
|
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Signs an outgoing AWS request with the appropriate headers modifies the
|
||||||
|
* request object in place to fix the path.
|
||||||
|
* This happens AFTER request transformation.
|
||||||
|
*/
|
||||||
|
export const signAwsRequest: ProxyReqMutator = async (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
const { model, stream } = req.body;
|
||||||
|
const key = keyPool.get(model, "aws") as AwsBedrockKey;
|
||||||
|
manager.setKey(key);
|
||||||
|
|
||||||
|
let system = req.body.system ?? "";
|
||||||
|
if (Array.isArray(system)) {
|
||||||
|
system = system
|
||||||
|
.map((m: { type: string; text: string }) => m.text)
|
||||||
|
.join("\n");
|
||||||
|
req.body.system = system;
|
||||||
|
}
|
||||||
|
|
||||||
|
const credential = getCredentialParts(req);
|
||||||
|
const host = AMZ_HOST.replace("%REGION%", credential.region);
|
||||||
|
|
||||||
|
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
||||||
|
// set it so that the stream adapter always selects the correct transformer.
|
||||||
|
manager.setHeader("anthropic-version", "2023-06-01");
|
||||||
|
|
||||||
|
// If our key has an inference profile compatible with the requested model,
|
||||||
|
// we want to use the inference profile instead of the model ID when calling
|
||||||
|
// InvokeModel as that will give us higher rate limits.
|
||||||
|
const profile =
|
||||||
|
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
|
||||||
|
|
||||||
|
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
||||||
|
// with the headers generated by the SDK.
|
||||||
|
const newRequest = new HttpRequest({
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: host,
|
||||||
|
path: `/model/${profile}/invoke${stream ? "-with-response-stream" : ""}`,
|
||||||
|
headers: {
|
||||||
|
["Host"]: host,
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
|
||||||
|
} else {
|
||||||
|
newRequest.headers["accept"] = "*/*";
|
||||||
|
}
|
||||||
|
|
||||||
|
const { body, inboundApi, outboundApi } = req;
|
||||||
|
req.log.info(
|
||||||
|
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
|
||||||
|
"Assigned AWS credentials to request"
|
||||||
|
);
|
||||||
|
|
||||||
|
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
|
||||||
|
};
|
||||||
|
|
||||||
|
type Credential = {
|
||||||
|
accessKeyId: string;
|
||||||
|
secretAccessKey: string;
|
||||||
|
region: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
function getCredentialParts(req: express.Request): Credential {
|
||||||
|
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
|
||||||
|
|
||||||
|
if (!accessKeyId || !secretAccessKey || !region) {
|
||||||
|
req.log.error(
|
||||||
|
{ key: req.key!.hash },
|
||||||
|
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
|
||||||
|
);
|
||||||
|
throw new Error("The key assigned to this request is invalid.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return { accessKeyId, secretAccessKey, region };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function sign(request: HttpRequest, credential: Credential) {
|
||||||
|
const { accessKeyId, secretAccessKey, region } = credential;
|
||||||
|
|
||||||
|
const signer = new SignatureV4({
|
||||||
|
sha256: Sha256,
|
||||||
|
credentials: { accessKeyId, secretAccessKey },
|
||||||
|
region,
|
||||||
|
service: "bedrock",
|
||||||
|
});
|
||||||
|
|
||||||
|
return signer.sign(request);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
|
||||||
|
// AWS uses vendor API formats but imposes additional (more strict) validation
|
||||||
|
// rules, namely that extraneous parameters are not allowed. We will validate
|
||||||
|
// using the vendor's zod schema but apply `.strip` to ensure that any
|
||||||
|
// extraneous parameters are removed.
|
||||||
|
let strippedParams: Record<string, unknown> = {};
|
||||||
|
switch (req.outboundApi) {
|
||||||
|
case "anthropic-text":
|
||||||
|
strippedParams = AnthropicV1TextSchema.pick({
|
||||||
|
prompt: true,
|
||||||
|
max_tokens_to_sample: true,
|
||||||
|
stop_sequences: true,
|
||||||
|
temperature: true,
|
||||||
|
top_k: true,
|
||||||
|
top_p: true,
|
||||||
|
})
|
||||||
|
.strip()
|
||||||
|
.parse(req.body);
|
||||||
|
break;
|
||||||
|
case "anthropic-chat":
|
||||||
|
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||||
|
messages: true,
|
||||||
|
system: true,
|
||||||
|
max_tokens: true,
|
||||||
|
stop_sequences: true,
|
||||||
|
temperature: true,
|
||||||
|
top_k: true,
|
||||||
|
top_p: true,
|
||||||
|
tools: true,
|
||||||
|
tool_choice: true,
|
||||||
|
thinking: true
|
||||||
|
})
|
||||||
|
.strip()
|
||||||
|
.parse(req.body);
|
||||||
|
strippedParams.anthropic_version = "bedrock-2023-05-31";
|
||||||
|
break;
|
||||||
|
case "mistral-ai":
|
||||||
|
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
|
||||||
|
break;
|
||||||
|
case "mistral-text":
|
||||||
|
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error("Unexpected outbound API for AWS.");
|
||||||
|
}
|
||||||
|
return strippedParams;
|
||||||
|
}
|
||||||
@@ -0,0 +1,78 @@
|
|||||||
|
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
|
||||||
|
import { GcpKey, keyPool } from "../../../../shared/key-management";
|
||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
import {
|
||||||
|
getCredentialsFromGcpKey,
|
||||||
|
refreshGcpAccessToken,
|
||||||
|
} from "../../../../shared/key-management/gcp/oauth";
|
||||||
|
|
||||||
|
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
|
||||||
|
|
||||||
|
export const signGcpRequest: ProxyReqMutator = async (manager) => {
|
||||||
|
const req = manager.request;
|
||||||
|
const serviceValid = req.service === "gcp";
|
||||||
|
if (!serviceValid) {
|
||||||
|
throw new Error("addVertexAIKey called on invalid request");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!req.body?.model) {
|
||||||
|
throw new Error("You must specify a model with your request.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const { model } = req.body;
|
||||||
|
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
|
||||||
|
|
||||||
|
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
|
||||||
|
const [token, durationSec] = await refreshGcpAccessToken(key);
|
||||||
|
keyPool.update(key, {
|
||||||
|
accessToken: token,
|
||||||
|
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
|
||||||
|
} as GcpKey);
|
||||||
|
// nb: key received by `get` is a clone and will not have the new access
|
||||||
|
// token we just set, so it must be manually updated.
|
||||||
|
key.accessToken = token;
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.setKey(key);
|
||||||
|
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
|
||||||
|
|
||||||
|
// TODO: This should happen in transform-outbound-payload.ts
|
||||||
|
// TODO: Support tools
|
||||||
|
let strippedParams: Record<string, unknown>;
|
||||||
|
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||||
|
messages: true,
|
||||||
|
system: true,
|
||||||
|
max_tokens: true,
|
||||||
|
stop_sequences: true,
|
||||||
|
temperature: true,
|
||||||
|
top_k: true,
|
||||||
|
top_p: true,
|
||||||
|
stream: true,
|
||||||
|
tools: true,
|
||||||
|
tool_choice: true,
|
||||||
|
thinking: true
|
||||||
|
})
|
||||||
|
.strip()
|
||||||
|
.parse(req.body);
|
||||||
|
strippedParams.anthropic_version = "vertex-2023-10-16";
|
||||||
|
|
||||||
|
const credential = await getCredentialsFromGcpKey(key);
|
||||||
|
|
||||||
|
const host = GCP_HOST.replace("%REGION%", credential.region);
|
||||||
|
// GCP doesn't use the anthropic-version header, but we set it to ensure the
|
||||||
|
// stream adapter selects the correct transformer.
|
||||||
|
manager.setHeader("anthropic-version", "2023-06-01");
|
||||||
|
|
||||||
|
manager.setSignedRequest({
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: host,
|
||||||
|
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
|
||||||
|
headers: {
|
||||||
|
["host"]: host,
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
["authorization"]: `Bearer ${key.accessToken}`,
|
||||||
|
},
|
||||||
|
body: JSON.stringify(strippedParams),
|
||||||
|
});
|
||||||
|
};
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
import { ProxyReqMutator } from "../index";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes origin and referer headers before sending the request to the API for
|
||||||
|
* privacy reasons.
|
||||||
|
*/
|
||||||
|
export const stripHeaders: ProxyReqMutator = (manager) => {
|
||||||
|
manager.removeHeader("origin");
|
||||||
|
manager.removeHeader("referer");
|
||||||
|
|
||||||
|
// Some APIs refuse requests coming from browsers to discourage embedding
|
||||||
|
// API keys in client-side code, so we must remove all CORS/fetch headers.
|
||||||
|
Object.keys(manager.request.headers).forEach((key) => {
|
||||||
|
if (key.startsWith("sec-")) {
|
||||||
|
manager.removeHeader(key);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
manager.removeHeader("tailscale-user-login");
|
||||||
|
manager.removeHeader("tailscale-user-name");
|
||||||
|
manager.removeHeader("tailscale-headers-info");
|
||||||
|
manager.removeHeader("tailscale-user-profile-pic");
|
||||||
|
manager.removeHeader("cf-connecting-ip");
|
||||||
|
manager.removeHeader("cf-ray");
|
||||||
|
manager.removeHeader("cf-visitor");
|
||||||
|
manager.removeHeader("cf-warp-tag-id");
|
||||||
|
manager.removeHeader("forwarded");
|
||||||
|
manager.removeHeader("true-client-ip");
|
||||||
|
manager.removeHeader("x-forwarded-for");
|
||||||
|
manager.removeHeader("x-forwarded-host");
|
||||||
|
manager.removeHeader("x-forwarded-proto");
|
||||||
|
manager.removeHeader("x-real-ip");
|
||||||
|
};
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
import {
|
|
||||||
applyQuotaLimits,
|
|
||||||
blockZoomerOrigins,
|
|
||||||
checkModelFamily,
|
|
||||||
HPMRequestCallback,
|
|
||||||
stripHeaders,
|
|
||||||
} from "./index";
|
|
||||||
|
|
||||||
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns an http-proxy-middleware request handler that runs the given set of
|
|
||||||
* onProxyReq callback functions in sequence.
|
|
||||||
*
|
|
||||||
* These will run each time a request is proxied, including on automatic retries
|
|
||||||
* by the queue after encountering a rate limit.
|
|
||||||
*/
|
|
||||||
export const createOnProxyReqHandler = ({
|
|
||||||
pipeline,
|
|
||||||
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
|
|
||||||
const callbackPipeline = [
|
|
||||||
checkModelFamily,
|
|
||||||
applyQuotaLimits,
|
|
||||||
blockZoomerOrigins,
|
|
||||||
stripHeaders,
|
|
||||||
...pipeline,
|
|
||||||
];
|
|
||||||
return (proxyReq, req, res, options) => {
|
|
||||||
// The streaming flag must be set before any other onProxyReq handler runs,
|
|
||||||
// as it may influence the behavior of subsequent handlers.
|
|
||||||
// Image generation requests can't be streamed.
|
|
||||||
// TODO: this flag is set in too many places
|
|
||||||
req.isStreaming =
|
|
||||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
|
||||||
req.body.stream = req.isStreaming;
|
|
||||||
|
|
||||||
try {
|
|
||||||
for (const fn of callbackPipeline) {
|
|
||||||
fn(proxyReq, req, res, options);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
proxyReq.destroy(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
import { AnthropicKey, Key } from "../../../../shared/key-management";
|
|
||||||
import { isTextGenerationRequest } from "../../common";
|
|
||||||
import { HPMRequestCallback } from "../index";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
|
|
||||||
* know this without trying to send the request and seeing if it fails. If a
|
|
||||||
* key is marked as requiring a preamble, it will be added here.
|
|
||||||
*/
|
|
||||||
export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
|
|
||||||
if (
|
|
||||||
!isTextGenerationRequest(req) ||
|
|
||||||
req.key?.service !== "anthropic" ||
|
|
||||||
req.outboundApi !== "anthropic-text"
|
|
||||||
) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let preamble = "";
|
|
||||||
let prompt = req.body.prompt;
|
|
||||||
assertAnthropicKey(req.key);
|
|
||||||
if (req.key.requiresPreamble && prompt) {
|
|
||||||
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
|
||||||
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
|
|
||||||
}
|
|
||||||
req.body.prompt = preamble + prompt;
|
|
||||||
};
|
|
||||||
|
|
||||||
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
|
|
||||||
if (key.service !== "anthropic") {
|
|
||||||
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
import { fixRequestBody } from "http-proxy-middleware";
|
|
||||||
import type { HPMRequestCallback } from "../index";
|
|
||||||
|
|
||||||
/** Finalize the rewritten request body. Must be the last rewriter. */
|
|
||||||
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
|
|
||||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
|
||||||
// For image generation requests, remove stream flag.
|
|
||||||
if (req.outboundApi === "openai-image") {
|
|
||||||
delete req.body.stream;
|
|
||||||
}
|
|
||||||
// For anthropic text to chat requests, remove undefined prompt.
|
|
||||||
if (req.outboundApi === "anthropic-chat") {
|
|
||||||
delete req.body.prompt;
|
|
||||||
}
|
|
||||||
|
|
||||||
const updatedBody = JSON.stringify(req.body);
|
|
||||||
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
|
|
||||||
(req as any).rawBody = Buffer.from(updatedBody);
|
|
||||||
|
|
||||||
// body-parser and http-proxy-middleware don't play nice together
|
|
||||||
fixRequestBody(proxyReq, req);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
import type { HPMRequestCallback } from "../index";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* For AWS/Azure/Google requests, the body is signed earlier in the request
|
|
||||||
* pipeline, before the proxy middleware. This function just assigns the path
|
|
||||||
* and headers to the proxy request.
|
|
||||||
*/
|
|
||||||
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
|
|
||||||
if (!req.signedRequest) {
|
|
||||||
throw new Error("Expected req.signedRequest to be set");
|
|
||||||
}
|
|
||||||
|
|
||||||
// The path depends on the selected model and the assigned key's region.
|
|
||||||
proxyReq.path = req.signedRequest.path;
|
|
||||||
|
|
||||||
// Amazon doesn't want extra headers, so we need to remove all of them and
|
|
||||||
// reassign only the ones specified in the signed request.
|
|
||||||
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
|
|
||||||
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
|
||||||
proxyReq.setHeader(key, value);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Don't use fixRequestBody here because it adds a content-length header.
|
|
||||||
// Amazon doesn't want that and it breaks the signature.
|
|
||||||
proxyReq.write(req.signedRequest.body);
|
|
||||||
};
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
import { HPMRequestCallback } from "../index";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Removes origin and referer headers before sending the request to the API for
|
|
||||||
* privacy reasons.
|
|
||||||
**/
|
|
||||||
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
|
|
||||||
proxyReq.setHeader("origin", "");
|
|
||||||
proxyReq.setHeader("referer", "");
|
|
||||||
proxyReq.removeHeader("tailscale-user-login");
|
|
||||||
proxyReq.removeHeader("tailscale-user-name");
|
|
||||||
proxyReq.removeHeader("tailscale-headers-info");
|
|
||||||
proxyReq.removeHeader("tailscale-user-profile-pic")
|
|
||||||
proxyReq.removeHeader("cf-connecting-ip");
|
|
||||||
proxyReq.removeHeader("forwarded");
|
|
||||||
proxyReq.removeHeader("true-client-ip");
|
|
||||||
proxyReq.removeHeader("x-forwarded-for");
|
|
||||||
proxyReq.removeHeader("x-forwarded-host");
|
|
||||||
proxyReq.removeHeader("x-forwarded-proto");
|
|
||||||
proxyReq.removeHeader("x-real-ip");
|
|
||||||
};
|
|
||||||
@@ -4,12 +4,15 @@ import { initializeSseStream } from "../../../shared/streaming";
|
|||||||
import { classifyErrorAndSend } from "../common";
|
import { classifyErrorAndSend } from "../common";
|
||||||
import {
|
import {
|
||||||
RequestPreprocessor,
|
RequestPreprocessor,
|
||||||
|
blockZoomerOrigins,
|
||||||
countPromptTokens,
|
countPromptTokens,
|
||||||
languageFilter,
|
languageFilter,
|
||||||
setApiFormat,
|
setApiFormat,
|
||||||
transformOutboundPayload,
|
transformOutboundPayload,
|
||||||
validateContextSize,
|
validateContextSize,
|
||||||
|
validateModelFamily,
|
||||||
validateVision,
|
validateVision,
|
||||||
|
applyQuotaLimits,
|
||||||
} from ".";
|
} from ".";
|
||||||
|
|
||||||
type RequestPreprocessorOptions = {
|
type RequestPreprocessorOptions = {
|
||||||
@@ -30,14 +33,15 @@ type RequestPreprocessorOptions = {
|
|||||||
/**
|
/**
|
||||||
* Returns a middleware function that processes the request body into the given
|
* Returns a middleware function that processes the request body into the given
|
||||||
* API format, and then sequentially runs the given additional preprocessors.
|
* API format, and then sequentially runs the given additional preprocessors.
|
||||||
|
* These should be used for validation and transformations that only need to
|
||||||
|
* happen once per request.
|
||||||
*
|
*
|
||||||
* These run first in the request lifecycle, a single time per request before it
|
* These run first in the request lifecycle, a single time per request before it
|
||||||
* is added to the request queue. They aren't run again if the request is
|
* is added to the request queue. They aren't run again if the request is
|
||||||
* re-attempted after a rate limit.
|
* re-attempted after a rate limit.
|
||||||
*
|
*
|
||||||
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
|
* To run functions against requests every time they are re-attempted, write a
|
||||||
* It will run after these preprocessors, but before the request is sent to
|
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
|
||||||
* http-proxy-middleware.
|
|
||||||
*/
|
*/
|
||||||
export const createPreprocessorMiddleware = (
|
export const createPreprocessorMiddleware = (
|
||||||
apiFormat: Parameters<typeof setApiFormat>[0],
|
apiFormat: Parameters<typeof setApiFormat>[0],
|
||||||
@@ -45,6 +49,7 @@ export const createPreprocessorMiddleware = (
|
|||||||
): RequestHandler => {
|
): RequestHandler => {
|
||||||
const preprocessors: RequestPreprocessor[] = [
|
const preprocessors: RequestPreprocessor[] = [
|
||||||
setApiFormat(apiFormat),
|
setApiFormat(apiFormat),
|
||||||
|
blockZoomerOrigins,
|
||||||
...(beforeTransform ?? []),
|
...(beforeTransform ?? []),
|
||||||
transformOutboundPayload,
|
transformOutboundPayload,
|
||||||
countPromptTokens,
|
countPromptTokens,
|
||||||
@@ -52,6 +57,8 @@ export const createPreprocessorMiddleware = (
|
|||||||
...(afterTransform ?? []),
|
...(afterTransform ?? []),
|
||||||
validateContextSize,
|
validateContextSize,
|
||||||
validateVision,
|
validateVision,
|
||||||
|
validateModelFamily,
|
||||||
|
applyQuotaLimits,
|
||||||
];
|
];
|
||||||
return async (...args) => executePreprocessors(preprocessors, args);
|
return async (...args) => executePreprocessors(preprocessors, args);
|
||||||
};
|
};
|
||||||
@@ -83,10 +90,10 @@ async function executePreprocessors(
|
|||||||
next();
|
next();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (error.constructor.name === "ZodError") {
|
if (error.constructor.name === "ZodError") {
|
||||||
const msg = error?.issues
|
const issues = error?.issues
|
||||||
?.map((issue: ZodIssue) => issue.message)
|
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
|
||||||
.join("; ");
|
.join("; ");
|
||||||
req.log.info(msg, "Prompt validation failed.");
|
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
|
||||||
} else {
|
} else {
|
||||||
req.log.error(error, "Error while executing request preprocessor");
|
req.log.error(error, "Error while executing request preprocessor");
|
||||||
}
|
}
|
||||||
@@ -136,14 +143,21 @@ const handleTestMessage: RequestHandler = (req, res) => {
|
|||||||
completion: "Hello!",
|
completion: "Hello!",
|
||||||
// anthropic chat
|
// anthropic chat
|
||||||
content: [{ type: "text", text: "Hello!" }],
|
content: [{ type: "text", text: "Hello!" }],
|
||||||
|
// gemini
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: { parts: [{ text: "Hello!" }] },
|
||||||
|
finishReason: "stop",
|
||||||
|
},
|
||||||
|
],
|
||||||
proxy_note:
|
proxy_note:
|
||||||
"This response was generated by the proxy's test message handler and did not go to the API.",
|
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
function isTestMessage(body: any) {
|
function isTestMessage(body: any) {
|
||||||
const { messages, prompt } = body;
|
const { messages, prompt, contents } = body;
|
||||||
|
|
||||||
if (messages) {
|
if (messages) {
|
||||||
return (
|
return (
|
||||||
@@ -151,6 +165,8 @@ function isTestMessage(body: any) {
|
|||||||
messages[0].role === "user" &&
|
messages[0].role === "user" &&
|
||||||
messages[0].content === "Hi"
|
messages[0].content === "Hi"
|
||||||
);
|
);
|
||||||
|
} else if (contents) {
|
||||||
|
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
|
||||||
} else {
|
} else {
|
||||||
return (
|
return (
|
||||||
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
|
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
import { keyPool } from "../../../../shared/key-management";
|
|
||||||
import { RequestPreprocessor } from "../index";
|
|
||||||
|
|
||||||
export const addGoogleAIKey: RequestPreprocessor = (req) => {
|
|
||||||
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
|
|
||||||
const serviceValid = req.service === "google-ai";
|
|
||||||
if (!apisValid || !serviceValid) {
|
|
||||||
throw new Error("addGoogleAIKey called on invalid request");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.body?.model) {
|
|
||||||
throw new Error("You must specify a model with your request.");
|
|
||||||
}
|
|
||||||
|
|
||||||
const model = req.body.model;
|
|
||||||
req.key = keyPool.get(model, "google-ai");
|
|
||||||
|
|
||||||
req.log.info(
|
|
||||||
{ key: req.key.hash, model },
|
|
||||||
"Assigned Google AI API key to request"
|
|
||||||
);
|
|
||||||
|
|
||||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
|
|
||||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
|
|
||||||
|
|
||||||
req.isStreaming = req.isStreaming || req.body.stream;
|
|
||||||
delete req.body.stream;
|
|
||||||
|
|
||||||
req.signedRequest = {
|
|
||||||
method: "POST",
|
|
||||||
protocol: "https:",
|
|
||||||
hostname: "generativelanguage.googleapis.com",
|
|
||||||
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
|
|
||||||
headers: {
|
|
||||||
["host"]: `generativelanguage.googleapis.com`,
|
|
||||||
["content-type"]: "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify(req.body),
|
|
||||||
};
|
|
||||||
};
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import { hasAvailableQuota } from "../../../../shared/users/user-store";
|
import { hasAvailableQuota } from "../../../../shared/users/user-store";
|
||||||
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
|
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
|
||||||
import { HPMRequestCallback } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
export class QuotaExceededError extends Error {
|
export class QuotaExceededError extends Error {
|
||||||
public quotaInfo: any;
|
public quotaInfo: any;
|
||||||
@@ -11,7 +11,7 @@ export class QuotaExceededError extends Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
export const applyQuotaLimits: RequestPreprocessor = (req) => {
|
||||||
const subjectToQuota =
|
const subjectToQuota =
|
||||||
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
||||||
if (!subjectToQuota || !req.user) return;
|
if (!subjectToQuota || !req.user) return;
|
||||||
@@ -34,4 +34,4 @@ export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
+4
-4
@@ -1,6 +1,6 @@
|
|||||||
import { HPMRequestCallback } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
|
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai,vip.jewproxy.tech,jewproxy.tech".split(",");
|
||||||
|
|
||||||
class ZoomerForbiddenError extends Error {
|
class ZoomerForbiddenError extends Error {
|
||||||
constructor(message: string) {
|
constructor(message: string) {
|
||||||
@@ -13,8 +13,8 @@ class ZoomerForbiddenError extends Error {
|
|||||||
* Blocks requests from Janitor AI users with a fake, scary error message so I
|
* Blocks requests from Janitor AI users with a fake, scary error message so I
|
||||||
* stop getting emails asking for tech support.
|
* stop getting emails asking for tech support.
|
||||||
*/
|
*/
|
||||||
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
|
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
|
||||||
const origin = req.headers.origin || req.headers.referer;
|
const origin = req.headers.origin || req.headers.referer || req.headers.host;
|
||||||
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
|
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
|
||||||
// Venus-derivatives send a test prompt to check if the proxy is working.
|
// Venus-derivatives send a test prompt to check if the proxy is working.
|
||||||
// We don't want to block that just yet.
|
// We don't want to block that just yet.
|
||||||
@@ -1,12 +1,18 @@
|
|||||||
import { RequestPreprocessor } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
import { countTokens } from "../../../../shared/tokenization";
|
import { countTokens } from "../../../../shared/tokenization";
|
||||||
import { assertNever } from "../../../../shared/utils";
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import { OpenAIChatMessage } from "../../../../shared/api-schemas";
|
||||||
|
import { GoogleAIChatMessage } from "../../../../shared/api-schemas/google-ai";
|
||||||
import {
|
import {
|
||||||
AnthropicChatMessage,
|
AnthropicChatMessage,
|
||||||
GoogleAIChatMessage,
|
flattenAnthropicMessages,
|
||||||
MistralAIChatMessage,
|
} from "../../../../shared/api-schemas/anthropic";
|
||||||
OpenAIChatMessage,
|
import {
|
||||||
} from "../../../../shared/api-schemas";
|
MistralAIChatMessage,
|
||||||
|
ContentItem,
|
||||||
|
isMistralVisionModel
|
||||||
|
} from "../../../../shared/api-schemas/mistral-ai";
|
||||||
|
import { isGrokVisionModel } from "../../../../shared/api-schemas/xai";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given a request with an already-transformed body, counts the number of
|
* Given a request with an already-transformed body, counts the number of
|
||||||
@@ -18,7 +24,13 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
|||||||
|
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "openai": {
|
case "openai": {
|
||||||
req.outputTokens = req.body.max_tokens;
|
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
|
||||||
|
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||||
|
result = await countTokens({ req, prompt, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "openai-responses": {
|
||||||
|
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
|
||||||
const prompt: OpenAIChatMessage[] = req.body.messages;
|
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||||
result = await countTokens({ req, prompt, service });
|
result = await countTokens({ req, prompt, service });
|
||||||
break;
|
break;
|
||||||
@@ -31,10 +43,13 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
|||||||
}
|
}
|
||||||
case "anthropic-chat": {
|
case "anthropic-chat": {
|
||||||
req.outputTokens = req.body.max_tokens;
|
req.outputTokens = req.body.max_tokens;
|
||||||
const prompt = {
|
let system = req.body.system ?? "";
|
||||||
system: req.body.system ?? "",
|
if (Array.isArray(system)) {
|
||||||
messages: req.body.messages,
|
system = system
|
||||||
};
|
.map((m: { type: string; text: string }) => m.text)
|
||||||
|
.join("\n");
|
||||||
|
}
|
||||||
|
const prompt = { system, messages: req.body.messages };
|
||||||
result = await countTokens({ req, prompt, service });
|
result = await countTokens({ req, prompt, service });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -50,10 +65,50 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
|||||||
result = await countTokens({ req, prompt, service });
|
result = await countTokens({ req, prompt, service });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case "mistral-ai": {
|
case "mistral-ai":
|
||||||
|
case "mistral-text": {
|
||||||
req.outputTokens = req.body.max_tokens;
|
req.outputTokens = req.body.max_tokens;
|
||||||
const prompt: MistralAIChatMessage[] = req.body.messages;
|
|
||||||
|
// Handle multimodal content (vision) in Mistral models
|
||||||
|
const isVisionModel = isMistralVisionModel(req.body.model);
|
||||||
|
const messages = req.body.messages;
|
||||||
|
|
||||||
|
// Check if this is a vision request with images
|
||||||
|
const hasImageContent = Array.isArray(messages) && messages.some(
|
||||||
|
(msg: MistralAIChatMessage) => Array.isArray(msg.content) &&
|
||||||
|
msg.content.some((item: ContentItem) => item.type === "image_url")
|
||||||
|
);
|
||||||
|
|
||||||
|
// For vision content, we add a fixed token count per image
|
||||||
|
// This is an estimate as the actual token count depends on image size and complexity
|
||||||
|
const TOKENS_PER_IMAGE = 1200; // Conservative estimate
|
||||||
|
let imageTokens = 0;
|
||||||
|
|
||||||
|
if (hasImageContent && Array.isArray(messages)) {
|
||||||
|
// Count images in the request
|
||||||
|
for (const msg of messages) {
|
||||||
|
if (Array.isArray(msg.content)) {
|
||||||
|
const imageCount = msg.content.filter(
|
||||||
|
(item: ContentItem) => item.type === "image_url"
|
||||||
|
).length;
|
||||||
|
imageTokens += imageCount * TOKENS_PER_IMAGE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.log.debug(
|
||||||
|
{ imageCount: imageTokens / TOKENS_PER_IMAGE, tokenEstimate: imageTokens },
|
||||||
|
"Estimated token count for Mistral vision images"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const prompt: string | MistralAIChatMessage[] = messages ?? req.body.prompt;
|
||||||
result = await countTokens({ req, prompt, service });
|
result = await countTokens({ req, prompt, service });
|
||||||
|
|
||||||
|
// Add the image tokens to the total count
|
||||||
|
if (imageTokens > 0) {
|
||||||
|
result.token_count += imageTokens;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case "openai-image": {
|
case "openai-image": {
|
||||||
@@ -61,6 +116,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
|||||||
result = await countTokens({ req, service });
|
result = await countTokens({ req, service });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle XAI (Grok) vision models
|
||||||
|
// Since it uses the OpenAI API format, it's caught in the "openai" case,
|
||||||
|
// but we need to add additional handling for image tokens after that
|
||||||
default:
|
default:
|
||||||
assertNever(service);
|
assertNever(service);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import { Request } from "express";
|
import { Request } from "express";
|
||||||
|
import { z } from "zod";
|
||||||
import { config } from "../../../../config";
|
import { config } from "../../../../config";
|
||||||
import { assertNever } from "../../../../shared/utils";
|
import { assertNever } from "../../../../shared/utils";
|
||||||
import { RequestPreprocessor } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
@@ -8,6 +9,7 @@ import {
|
|||||||
OpenAIChatMessage,
|
OpenAIChatMessage,
|
||||||
flattenAnthropicMessages,
|
flattenAnthropicMessages,
|
||||||
} from "../../../../shared/api-schemas";
|
} from "../../../../shared/api-schemas";
|
||||||
|
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
|
||||||
|
|
||||||
const rejectedClients = new Map<string, number>();
|
const rejectedClients = new Map<string, number>();
|
||||||
|
|
||||||
@@ -50,14 +52,16 @@ export const languageFilter: RequestPreprocessor = async (req) => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
TODO: this is not type safe and does not raise errors if request body zod schema
|
||||||
|
is changed.
|
||||||
|
*/
|
||||||
function getPromptFromRequest(req: Request) {
|
function getPromptFromRequest(req: Request) {
|
||||||
const service = req.outboundApi;
|
const service = req.outboundApi;
|
||||||
const body = req.body;
|
const body = req.body;
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
return flattenAnthropicMessages(body.messages);
|
return flattenAnthropicMessages(body.messages);
|
||||||
case "anthropic-text":
|
|
||||||
return body.prompt;
|
|
||||||
case "openai":
|
case "openai":
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
return body.messages
|
return body.messages
|
||||||
@@ -72,11 +76,19 @@ function getPromptFromRequest(req: Request) {
|
|||||||
return `${msg.role}: ${text}`;
|
return `${msg.role}: ${text}`;
|
||||||
})
|
})
|
||||||
.join("\n\n");
|
.join("\n\n");
|
||||||
|
case "anthropic-text":
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
|
case "openai-responses":
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
|
case "mistral-text":
|
||||||
return body.prompt;
|
return body.prompt;
|
||||||
case "google-ai":
|
case "google-ai": {
|
||||||
return body.prompt.text;
|
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
|
||||||
|
return [
|
||||||
|
b.systemInstruction?.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text),
|
||||||
|
...b.contents.flatMap((c) => c.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text)),
|
||||||
|
].join("\n");
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
assertNever(service);
|
assertNever(service);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,22 @@ import { LLMService } from "../../../../shared/models";
|
|||||||
import { RequestPreprocessor } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
export const setApiFormat = (api: {
|
export const setApiFormat = (api: {
|
||||||
|
/**
|
||||||
|
* The API format the user made the request in and expects the response to be
|
||||||
|
* in.
|
||||||
|
*/
|
||||||
inApi: Request["inboundApi"];
|
inApi: Request["inboundApi"];
|
||||||
|
/**
|
||||||
|
* The API format the proxy will make the request in and expects the response
|
||||||
|
* to be in. If different from `inApi`, the proxy will transform the user's
|
||||||
|
* request body to this format, and will transform the response body or stream
|
||||||
|
* events from this format.
|
||||||
|
*/
|
||||||
outApi: APIFormat;
|
outApi: APIFormat;
|
||||||
|
/**
|
||||||
|
* The service the request will be sent to, which determines authentication
|
||||||
|
* and possibly the streaming transport.
|
||||||
|
*/
|
||||||
service: LLMService;
|
service: LLMService;
|
||||||
}): RequestPreprocessor => {
|
}): RequestPreprocessor => {
|
||||||
return function configureRequestApiFormat(req) {
|
return function configureRequestApiFormat(req) {
|
||||||
|
|||||||
@@ -1,130 +0,0 @@
|
|||||||
import express from "express";
|
|
||||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
|
||||||
import { SignatureV4 } from "@smithy/signature-v4";
|
|
||||||
import { HttpRequest } from "@smithy/protocol-http";
|
|
||||||
import {
|
|
||||||
AnthropicV1TextSchema,
|
|
||||||
AnthropicV1MessagesSchema,
|
|
||||||
} from "../../../../shared/api-schemas";
|
|
||||||
import { keyPool } from "../../../../shared/key-management";
|
|
||||||
import { RequestPreprocessor } from "../index";
|
|
||||||
|
|
||||||
const AMZ_HOST =
|
|
||||||
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Signs an outgoing AWS request with the appropriate headers modifies the
|
|
||||||
* request object in place to fix the path.
|
|
||||||
* This happens AFTER request transformation.
|
|
||||||
*/
|
|
||||||
export const signAwsRequest: RequestPreprocessor = async (req) => {
|
|
||||||
const { model, stream } = req.body;
|
|
||||||
req.key = keyPool.get(model, "aws");
|
|
||||||
|
|
||||||
req.isStreaming = stream === true || stream === "true";
|
|
||||||
|
|
||||||
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
|
|
||||||
if (req.outboundApi === "anthropic-text") {
|
|
||||||
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
|
||||||
req.body.prompt = preamble + req.body.prompt;
|
|
||||||
}
|
|
||||||
|
|
||||||
// AWS uses mostly the same parameters as Anthropic, with a few removed params
|
|
||||||
// and much stricter validation on unused parameters. Rather than treating it
|
|
||||||
// as a separate schema we will use the anthropic ones and strip the unused
|
|
||||||
// parameters.
|
|
||||||
// TODO: This should happen in transform-outbound-payload.ts
|
|
||||||
let strippedParams: Record<string, unknown>;
|
|
||||||
if (req.outboundApi === "anthropic-chat") {
|
|
||||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
|
||||||
messages: true,
|
|
||||||
system: true,
|
|
||||||
max_tokens: true,
|
|
||||||
stop_sequences: true,
|
|
||||||
temperature: true,
|
|
||||||
top_k: true,
|
|
||||||
top_p: true,
|
|
||||||
})
|
|
||||||
.strip()
|
|
||||||
.parse(req.body);
|
|
||||||
strippedParams.anthropic_version = "bedrock-2023-05-31";
|
|
||||||
} else {
|
|
||||||
strippedParams = AnthropicV1TextSchema.pick({
|
|
||||||
prompt: true,
|
|
||||||
max_tokens_to_sample: true,
|
|
||||||
stop_sequences: true,
|
|
||||||
temperature: true,
|
|
||||||
top_k: true,
|
|
||||||
top_p: true,
|
|
||||||
})
|
|
||||||
.strip()
|
|
||||||
.parse(req.body);
|
|
||||||
}
|
|
||||||
|
|
||||||
const credential = getCredentialParts(req);
|
|
||||||
const host = AMZ_HOST.replace("%REGION%", credential.region);
|
|
||||||
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
|
||||||
// set it so that the stream adapter always selects the correct transformer.
|
|
||||||
req.headers["anthropic-version"] = "2023-06-01";
|
|
||||||
|
|
||||||
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
|
||||||
// with the headers generated by the SDK.
|
|
||||||
const newRequest = new HttpRequest({
|
|
||||||
method: "POST",
|
|
||||||
protocol: "https:",
|
|
||||||
hostname: host,
|
|
||||||
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
|
|
||||||
headers: {
|
|
||||||
["Host"]: host,
|
|
||||||
["content-type"]: "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify(strippedParams),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (stream) {
|
|
||||||
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
|
|
||||||
} else {
|
|
||||||
newRequest.headers["accept"] = "*/*";
|
|
||||||
}
|
|
||||||
|
|
||||||
const { key, body, inboundApi, outboundApi } = req;
|
|
||||||
req.log.info(
|
|
||||||
{ key: key.hash, model: body.model, inboundApi, outboundApi },
|
|
||||||
"Assigned AWS credentials to request"
|
|
||||||
);
|
|
||||||
|
|
||||||
req.signedRequest = await sign(newRequest, getCredentialParts(req));
|
|
||||||
};
|
|
||||||
|
|
||||||
type Credential = {
|
|
||||||
accessKeyId: string;
|
|
||||||
secretAccessKey: string;
|
|
||||||
region: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
function getCredentialParts(req: express.Request): Credential {
|
|
||||||
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
|
|
||||||
|
|
||||||
if (!accessKeyId || !secretAccessKey || !region) {
|
|
||||||
req.log.error(
|
|
||||||
{ key: req.key!.hash },
|
|
||||||
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
|
|
||||||
);
|
|
||||||
throw new Error("The key assigned to this request is invalid.");
|
|
||||||
}
|
|
||||||
|
|
||||||
return { accessKeyId, secretAccessKey, region };
|
|
||||||
}
|
|
||||||
|
|
||||||
async function sign(request: HttpRequest, credential: Credential) {
|
|
||||||
const { accessKeyId, secretAccessKey, region } = credential;
|
|
||||||
|
|
||||||
const signer = new SignatureV4({
|
|
||||||
sha256: Sha256,
|
|
||||||
credentials: { accessKeyId, secretAccessKey },
|
|
||||||
region,
|
|
||||||
service: "bedrock",
|
|
||||||
});
|
|
||||||
|
|
||||||
return signer.sign(request);
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
|
import { Request } from "express";
|
||||||
import {
|
import {
|
||||||
API_REQUEST_VALIDATORS,
|
API_REQUEST_VALIDATORS,
|
||||||
API_REQUEST_TRANSFORMERS,
|
API_REQUEST_TRANSFORMERS,
|
||||||
} from "../../../../shared/api-schemas";
|
} from "../../../../shared/api-schemas";
|
||||||
import { BadRequestError } from "../../../../shared/errors";
|
import { BadRequestError } from "../../../../shared/errors";
|
||||||
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
|
import { fixMistralPrompt, isMistralVisionModel } from "../../../../shared/api-schemas/mistral-ai";
|
||||||
import {
|
import {
|
||||||
isImageGenerationRequest,
|
isImageGenerationRequest,
|
||||||
isTextGenerationRequest,
|
isTextGenerationRequest,
|
||||||
@@ -12,41 +13,41 @@ import { RequestPreprocessor } from "../index";
|
|||||||
|
|
||||||
/** Transforms an incoming request body to one that matches the target API. */
|
/** Transforms an incoming request body to one that matches the target API. */
|
||||||
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||||
const sameService = req.inboundApi === req.outboundApi;
|
|
||||||
const alreadyTransformed = req.retryCount > 0;
|
const alreadyTransformed = req.retryCount > 0;
|
||||||
const notTransformable =
|
const notTransformable =
|
||||||
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
|
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
|
||||||
|
|
||||||
if (alreadyTransformed || notTransformable) return;
|
if (alreadyTransformed) {
|
||||||
|
return;
|
||||||
// TODO: this should be an APIFormatTransformer
|
} else if (notTransformable) {
|
||||||
if (req.inboundApi === "mistral-ai") {
|
// This is probably an indication of a bug in the proxy.
|
||||||
const messages = req.body.messages;
|
const { inboundApi, outboundApi, method, path } = req;
|
||||||
req.body.messages = fixMistralPrompt(messages);
|
req.log.warn(
|
||||||
req.log.info(
|
{ inboundApi, outboundApi, method, path },
|
||||||
{ old: messages.length, new: req.body.messages.length },
|
"`transformOutboundPayload` called on a non-transformable request."
|
||||||
"Fixed Mistral prompt"
|
|
||||||
);
|
);
|
||||||
}
|
|
||||||
|
|
||||||
if (sameService) {
|
|
||||||
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
|
|
||||||
if (!result.success) {
|
|
||||||
req.log.warn(
|
|
||||||
{ issues: result.error.issues, body: req.body },
|
|
||||||
"Request validation failed"
|
|
||||||
);
|
|
||||||
throw result.error;
|
|
||||||
}
|
|
||||||
req.body = result.data;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
applyMistralPromptFixes(req);
|
||||||
|
applyGoogleAIKeyTransforms(req);
|
||||||
|
applyOpenAIResponsesTransform(req);
|
||||||
|
|
||||||
|
// Native prompts are those which were already provided by the client in the
|
||||||
|
// target API format. We don't need to transform them.
|
||||||
|
const isNativePrompt = req.inboundApi === req.outboundApi;
|
||||||
|
if (isNativePrompt) {
|
||||||
|
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
|
||||||
|
req.body = result;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompt requires translation from one API format to another.
|
||||||
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
|
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
|
||||||
const transFn = API_REQUEST_TRANSFORMERS[transformation];
|
const transFn = API_REQUEST_TRANSFORMERS[transformation];
|
||||||
|
|
||||||
if (transFn) {
|
if (transFn) {
|
||||||
req.log.info({ transformation }, "Transforming request");
|
req.log.info({ transformation }, "Transforming request...");
|
||||||
req.body = await transFn(req);
|
req.body = await transFn(req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -55,3 +56,182 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
|||||||
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
|
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Handle OpenAI Responses API transformation
|
||||||
|
function applyOpenAIResponsesTransform(req: Request): void {
|
||||||
|
if (req.outboundApi === "openai-responses") {
|
||||||
|
req.log.info("Transforming request to OpenAI Responses API format");
|
||||||
|
|
||||||
|
// Store the original body for reference if needed
|
||||||
|
const originalBody = { ...req.body };
|
||||||
|
|
||||||
|
// Map standard OpenAI chat completions format to Responses API format
|
||||||
|
// The main differences are:
|
||||||
|
// 1. Endpoint is /v1/responses instead of /v1/chat/completions
|
||||||
|
// 2. 'messages' field moves to 'input.messages'
|
||||||
|
|
||||||
|
// Move messages to input.messages
|
||||||
|
if (req.body.messages && !req.body.input) {
|
||||||
|
req.body.input = {
|
||||||
|
messages: req.body.messages
|
||||||
|
};
|
||||||
|
delete req.body.messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep all the original properties of the request but ensure compatibility
|
||||||
|
// with Responses API specifics
|
||||||
|
if (!req.body.previousResponseId && req.body.conversation_id) {
|
||||||
|
req.body.previousResponseId = req.body.conversation_id;
|
||||||
|
delete req.body.conversation_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert max_tokens to max_output_tokens if present and not already set
|
||||||
|
if (req.body.max_tokens && !req.body.max_output_tokens) {
|
||||||
|
req.body.max_output_tokens = req.body.max_tokens;
|
||||||
|
delete req.body.max_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the correct tools format if needed
|
||||||
|
if (req.body.tools) {
|
||||||
|
// Tools structure is maintained but might need conversion if non-standard
|
||||||
|
if (!req.body.tools.some((tool: any) => tool.type === "function" || tool.type === "web_search")) {
|
||||||
|
req.body.tools = req.body.tools.map((tool: any) => ({
|
||||||
|
...tool,
|
||||||
|
type: tool.type || "function"
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.log.info({
|
||||||
|
originalModel: originalBody.model,
|
||||||
|
newFormat: "openai-responses"
|
||||||
|
}, "Successfully transformed request to Responses API format");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handles weird cases that don't fit into our abstractions
|
||||||
|
function applyMistralPromptFixes(req: Request): void {
|
||||||
|
if (req.inboundApi === "mistral-ai") {
|
||||||
|
// Mistral Chat is very similar to OpenAI but not identical and many clients
|
||||||
|
// don't properly handle the differences. We will try to validate the
|
||||||
|
// mistral prompt and try to fix it if it fails. It will be re-validated
|
||||||
|
// after this function returns.
|
||||||
|
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
|
||||||
|
|
||||||
|
// Check if this is a vision model request
|
||||||
|
const isVisionModel = isMistralVisionModel(req.body.model);
|
||||||
|
|
||||||
|
// Check if the request contains image content
|
||||||
|
const hasImageContent = result.messages?.some((msg: {content: string | any[]}) =>
|
||||||
|
Array.isArray(msg.content) &&
|
||||||
|
msg.content.some((item: any) => item.type === "image_url")
|
||||||
|
);
|
||||||
|
|
||||||
|
// For vision requests, normalize the image_url format
|
||||||
|
if (hasImageContent && Array.isArray(result.messages)) {
|
||||||
|
// Process each message with image content
|
||||||
|
result.messages.forEach((msg: any) => {
|
||||||
|
if (Array.isArray(msg.content)) {
|
||||||
|
// Process each content item
|
||||||
|
msg.content.forEach((item: any) => {
|
||||||
|
if (item.type === "image_url") {
|
||||||
|
// Normalize the image_url field to a string format that Mistral expects
|
||||||
|
if (typeof item.image_url === "object") {
|
||||||
|
// If it's an object, extract the URL or base64 data
|
||||||
|
if (item.image_url.url) {
|
||||||
|
item.image_url = item.image_url.url;
|
||||||
|
} else if (item.image_url.data) {
|
||||||
|
item.image_url = item.image_url.data;
|
||||||
|
}
|
||||||
|
|
||||||
|
req.log.info(
|
||||||
|
{ model: req.body.model },
|
||||||
|
"Normalized object-format image_url to string format"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply Mistral prompt fixes while preserving multimodal content
|
||||||
|
req.body.messages = fixMistralPrompt(result.messages);
|
||||||
|
req.log.info(
|
||||||
|
{
|
||||||
|
n: req.body.messages.length,
|
||||||
|
prev: result.messages.length,
|
||||||
|
isVisionModel,
|
||||||
|
hasImageContent
|
||||||
|
},
|
||||||
|
"Applied Mistral chat prompt fixes."
|
||||||
|
);
|
||||||
|
|
||||||
|
// If this is a vision model with image content, it MUST use the chat API
|
||||||
|
// and cannot be converted to text completions
|
||||||
|
if (hasImageContent) {
|
||||||
|
req.log.info(
|
||||||
|
{ model: req.body.model },
|
||||||
|
"Detected Mistral vision request with image content. Keeping as chat format."
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the prompt relies on `prefix: true` for the last message, we need to
|
||||||
|
// convert it to a text completions request because AWS Mistral support for
|
||||||
|
// this feature is broken.
|
||||||
|
// On Mistral La Plateforme, we can't do this because they don't expose
|
||||||
|
// a text completions endpoint.
|
||||||
|
const { messages } = req.body;
|
||||||
|
const lastMessage = messages && messages[messages.length - 1];
|
||||||
|
if (lastMessage?.role === "assistant" && req.service === "aws") {
|
||||||
|
// enable prefix if client forgot, otherwise the template will insert an
|
||||||
|
// eos token which is very unlikely to be what the client wants.
|
||||||
|
lastMessage.prefix = true;
|
||||||
|
req.outboundApi = "mistral-text";
|
||||||
|
req.log.info(
|
||||||
|
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function toCamelCase(str: string): string {
|
||||||
|
return str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
|
||||||
|
}
|
||||||
|
|
||||||
|
function transformKeysToCamelCase(obj: any, hasTransformed = { value: false }): any {
|
||||||
|
if (Array.isArray(obj)) {
|
||||||
|
return obj.map(item => transformKeysToCamelCase(item, hasTransformed));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (obj !== null && typeof obj === 'object') {
|
||||||
|
return Object.fromEntries(
|
||||||
|
Object.entries(obj).map(([key, value]) => {
|
||||||
|
const camelKey = toCamelCase(key);
|
||||||
|
if (camelKey !== key) {
|
||||||
|
hasTransformed.value = true;
|
||||||
|
}
|
||||||
|
return [
|
||||||
|
camelKey,
|
||||||
|
transformKeysToCamelCase(value, hasTransformed)
|
||||||
|
];
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
function applyGoogleAIKeyTransforms(req: Request): void {
|
||||||
|
// Google (Gemini) API in their infinite wisdom accepts both snake_case and camelCase
|
||||||
|
// for some params even though in the docs they use snake_case.
|
||||||
|
// Some frontends (e.g. ST) use snake_case and camelCase so we normalize all keys to camelCase
|
||||||
|
if (req.outboundApi === "google-ai") {
|
||||||
|
const hasTransformed = { value: false };
|
||||||
|
req.body = transformKeysToCamelCase(req.body, hasTransformed);
|
||||||
|
if (hasTransformed.value) {
|
||||||
|
req.log.info("Applied Gemini camelCase -> snake_case transform");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,8 +6,9 @@ import { RequestPreprocessor } from "../index";
|
|||||||
|
|
||||||
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
|
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
|
||||||
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
|
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
|
||||||
const GOOGLE_AI_MAX_CONTEXT = 32000;
|
// todo: make configurable
|
||||||
const MISTRAL_AI_MAX_CONTENT = 32768;
|
const GOOGLE_AI_MAX_CONTEXT = 2048000;
|
||||||
|
const MISTRAL_AI_MAX_CONTENT = 131072;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
|
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
|
||||||
@@ -27,6 +28,7 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
|||||||
switch (req.outboundApi) {
|
switch (req.outboundApi) {
|
||||||
case "openai":
|
case "openai":
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
|
case "openai-responses":
|
||||||
proxyMax = OPENAI_MAX_CONTEXT;
|
proxyMax = OPENAI_MAX_CONTEXT;
|
||||||
break;
|
break;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
@@ -37,6 +39,7 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
|||||||
proxyMax = GOOGLE_AI_MAX_CONTEXT;
|
proxyMax = GOOGLE_AI_MAX_CONTEXT;
|
||||||
break;
|
break;
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
|
case "mistral-text":
|
||||||
proxyMax = MISTRAL_AI_MAX_CONTENT;
|
proxyMax = MISTRAL_AI_MAX_CONTENT;
|
||||||
break;
|
break;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
@@ -56,6 +59,24 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
|||||||
modelMax = 16384;
|
modelMax = 16384;
|
||||||
} else if (model.match(/^gpt-4o/)) {
|
} else if (model.match(/^gpt-4o/)) {
|
||||||
modelMax = 128000;
|
modelMax = 128000;
|
||||||
|
} else if (model.match(/^gpt-4.5/)) {
|
||||||
|
modelMax = 128000;
|
||||||
|
} else if (model.match(/^gpt-4\.1(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 1000000;
|
||||||
|
} else if (model.match(/^gpt-4\.1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 1000000;
|
||||||
|
} else if (model.match(/^gpt-4\.1-nano(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 1000000;
|
||||||
|
} else if (model.match(/^gpt-5(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 400000;
|
||||||
|
} else if (model.match(/^gpt-5-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 400000;
|
||||||
|
} else if (model.match(/^gpt-5-nano(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 400000;
|
||||||
|
} else if (model.match(/^gpt-5-chat-latest$/)) {
|
||||||
|
modelMax = 400000;
|
||||||
|
} else if (model.match(/^chatgpt-4o/)) {
|
||||||
|
modelMax = 128000;
|
||||||
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
|
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
modelMax = 131072;
|
modelMax = 131072;
|
||||||
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
|
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
|
||||||
@@ -64,6 +85,24 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
|||||||
modelMax = 131072;
|
modelMax = 131072;
|
||||||
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
|
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
|
||||||
modelMax = 131072;
|
modelMax = 131072;
|
||||||
|
} else if (model.match(/^o3-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^o3(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^o4-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000; // 200k context window for codex-mini-latest
|
||||||
|
} else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 128000;
|
||||||
|
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||||
|
modelMax = 128000;
|
||||||
} else if (model.match(/gpt-3.5-turbo/)) {
|
} else if (model.match(/gpt-3.5-turbo/)) {
|
||||||
modelMax = 16384;
|
modelMax = 16384;
|
||||||
} else if (model.match(/gpt-4-32k/)) {
|
} else if (model.match(/gpt-4-32k/)) {
|
||||||
@@ -80,17 +119,43 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
|||||||
modelMax = 200000;
|
modelMax = 200000;
|
||||||
} else if (model.match(/^claude-3/)) {
|
} else if (model.match(/^claude-3/)) {
|
||||||
modelMax = 200000;
|
modelMax = 200000;
|
||||||
} else if (model.match(/^gemini-\d{3}$/)) {
|
} else if (model.match(/^claude-(?:sonnet|opus)-4/)) {
|
||||||
modelMax = GOOGLE_AI_MAX_CONTEXT;
|
modelMax = 200000;
|
||||||
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
|
} else if (model.match(/^gemini-/)) {
|
||||||
modelMax = MISTRAL_AI_MAX_CONTENT;
|
modelMax = 1024000;
|
||||||
} else if (model.match(/^anthropic\.claude-3/)) {
|
} else if (model.match(/^anthropic\.claude-3/)) {
|
||||||
modelMax = 200000;
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^anthropic\.claude-(?:sonnet|opus)-4/)) {
|
||||||
|
modelMax = 200000;
|
||||||
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
|
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
|
||||||
modelMax = 200000;
|
modelMax = 200000;
|
||||||
} else if (model.match(/^anthropic\.claude/)) {
|
} else if (model.match(/^anthropic\.claude/)) {
|
||||||
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
|
|
||||||
modelMax = 100000;
|
modelMax = 100000;
|
||||||
|
} else if (model.match(/^deepseek/)) {
|
||||||
|
modelMax = 64000;
|
||||||
|
} else if (model.match(/^kimi-k2/)) {
|
||||||
|
// Kimi K2 models have 131k context window
|
||||||
|
modelMax = 131000;
|
||||||
|
} else if (model.match(/moonshot/)) {
|
||||||
|
// Moonshot models typically have 200k context window
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/command[\w-]*-03-202[0-9]/)) {
|
||||||
|
// Cohere's command-a-03 models have 256k context window
|
||||||
|
modelMax = 256000;
|
||||||
|
} else if (model.match(/command/) || model.match(/cohere/)) {
|
||||||
|
// Default for all other Cohere models
|
||||||
|
modelMax = 128000;
|
||||||
|
} else if (model.match(/^grok-4/)) {
|
||||||
|
modelMax = 256000;
|
||||||
|
} else if (model.match(/^grok/)) {
|
||||||
|
modelMax = 128000;
|
||||||
|
} else if (model.match(/^magistral/)) {
|
||||||
|
modelMax = 40000;
|
||||||
|
} else if (model.match(/tral/)) {
|
||||||
|
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
|
||||||
|
// no name convention and wildly different context windows so this is a
|
||||||
|
// catch-all
|
||||||
|
modelMax = MISTRAL_AI_MAX_CONTENT;
|
||||||
} else {
|
} else {
|
||||||
req.log.warn({ model }, "Unknown model, using 200k token limit.");
|
req.log.warn({ model }, "Unknown model, using 200k token limit.");
|
||||||
modelMax = 200000;
|
modelMax = 200000;
|
||||||
@@ -126,4 +191,4 @@ function assertRequestHasTokenCounts(
|
|||||||
})
|
})
|
||||||
.nonstrict()
|
.nonstrict()
|
||||||
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
|
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
|
||||||
}
|
}
|
||||||
+6
-4
@@ -1,14 +1,16 @@
|
|||||||
import { HPMRequestCallback } from "../index";
|
|
||||||
import { config } from "../../../../config";
|
import { config } from "../../../../config";
|
||||||
import { ForbiddenError } from "../../../../shared/errors";
|
import { ForbiddenError } from "../../../../shared/errors";
|
||||||
import { getModelFamilyForRequest } from "../../../../shared/models";
|
import { getModelFamilyForRequest } from "../../../../shared/models";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensures the selected model family is enabled by the proxy configuration.
|
* Ensures the selected model family is enabled by the proxy configuration.
|
||||||
**/
|
*/
|
||||||
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
|
export const validateModelFamily: RequestPreprocessor = (req) => {
|
||||||
const family = getModelFamilyForRequest(req);
|
const family = getModelFamilyForRequest(req);
|
||||||
if (!config.allowedModelFamilies.includes(family)) {
|
if (!config.allowedModelFamilies.includes(family)) {
|
||||||
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
|
throw new ForbiddenError(
|
||||||
|
`Model family '${family}' is not enabled on this proxy`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -3,26 +3,38 @@ import { assertNever } from "../../../../shared/utils";
|
|||||||
import { RequestPreprocessor } from "../index";
|
import { RequestPreprocessor } from "../index";
|
||||||
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
|
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
|
||||||
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
|
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
|
||||||
|
import { containsImageContent as containsImageContentGoogleAI } from "../../../../shared/api-schemas/google-ai";
|
||||||
import { ForbiddenError } from "../../../../shared/errors";
|
import { ForbiddenError } from "../../../../shared/errors";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Rejects prompts containing images if multimodal prompts are disabled.
|
* Rejects prompts containing images if multimodal prompts are disabled.
|
||||||
*/
|
*/
|
||||||
export const validateVision: RequestPreprocessor = async (req) => {
|
export const validateVision: RequestPreprocessor = async (req) => {
|
||||||
if (config.allowImagePrompts) return;
|
if (req.service === undefined) {
|
||||||
if (req.user?.type === "special") return;
|
throw new Error("Request service must be set before validateVision");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.user?.type === "special") return;
|
||||||
|
if (config.allowedVisionServices.includes(req.service)) return;
|
||||||
|
|
||||||
|
// vision not allowed for req's service, block prompts with images
|
||||||
let hasImage = false;
|
let hasImage = false;
|
||||||
switch (req.outboundApi) {
|
switch (req.outboundApi) {
|
||||||
case "openai":
|
case "openai":
|
||||||
hasImage = containsImageContentOpenAI(req.body.messages);
|
hasImage = containsImageContentOpenAI(req.body.messages);
|
||||||
break;
|
break;
|
||||||
|
case "openai-responses":
|
||||||
|
hasImage = containsImageContentOpenAI(req.body.messages);
|
||||||
|
break;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
hasImage = containsImageContentAnthropic(req.body.messages);
|
hasImage = containsImageContentAnthropic(req.body.messages);
|
||||||
break;
|
break;
|
||||||
case "anthropic-text":
|
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
|
hasImage = containsImageContentGoogleAI(req.body.contents);
|
||||||
|
break;
|
||||||
|
case "anthropic-text":
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
|
case "mistral-text":
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -0,0 +1,135 @@
|
|||||||
|
import { Request, Response } from "express";
|
||||||
|
import http from "http";
|
||||||
|
import ProxyServer from "http-proxy";
|
||||||
|
import { Readable } from "stream";
|
||||||
|
import {
|
||||||
|
createProxyMiddleware,
|
||||||
|
Options,
|
||||||
|
debugProxyErrorsPlugin,
|
||||||
|
proxyEventsPlugin,
|
||||||
|
} from "http-proxy-middleware";
|
||||||
|
import { ProxyReqMutator, stripHeaders } from "./index";
|
||||||
|
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
|
||||||
|
import { createQueueMiddleware } from "../../queue";
|
||||||
|
import { getHttpAgents } from "../../../shared/network";
|
||||||
|
import { classifyErrorAndSend } from "../common";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for the `createQueuedProxyMiddleware` factory function.
|
||||||
|
*/
|
||||||
|
type ProxyMiddlewareFactoryOptions = {
|
||||||
|
/**
|
||||||
|
* Functions which receive a ProxyReqManager and can modify the request before
|
||||||
|
* it is proxied. The modifications will be automatically reverted if the
|
||||||
|
* request needs to be returned to the queue.
|
||||||
|
*/
|
||||||
|
mutations?: ProxyReqMutator[];
|
||||||
|
/**
|
||||||
|
* The target URL to proxy requests to. This can be a string or a function
|
||||||
|
* which accepts the request and returns a string.
|
||||||
|
*/
|
||||||
|
target: string | Options<Request>["router"];
|
||||||
|
/**
|
||||||
|
* A function which receives the proxy response and the JSON-decoded request
|
||||||
|
* body. Only fired for non-streaming responses; streaming responses are
|
||||||
|
* handled in `handle-streaming-response.ts`.
|
||||||
|
*/
|
||||||
|
blockingResponseHandler?: ProxyResHandlerWithBody;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a middleware function that accepts incoming requests and places them
|
||||||
|
* into the request queue. When the request is dequeued, it is proxied to the
|
||||||
|
* target URL using the given options and middleware. Non-streaming responses
|
||||||
|
* are handled by the given `blockingResponseHandler`.
|
||||||
|
*/
|
||||||
|
export function createQueuedProxyMiddleware({
|
||||||
|
target,
|
||||||
|
mutations,
|
||||||
|
blockingResponseHandler,
|
||||||
|
}: ProxyMiddlewareFactoryOptions) {
|
||||||
|
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
|
||||||
|
const hpmRouter = typeof target === "function" ? target : undefined;
|
||||||
|
|
||||||
|
const [httpAgent, httpsAgent] = getHttpAgents();
|
||||||
|
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
|
||||||
|
|
||||||
|
const proxyMiddleware = createProxyMiddleware<Request, Response>({
|
||||||
|
target: hpmTarget,
|
||||||
|
router: hpmRouter,
|
||||||
|
agent,
|
||||||
|
changeOrigin: true,
|
||||||
|
toProxy: true,
|
||||||
|
selfHandleResponse: typeof blockingResponseHandler === "function",
|
||||||
|
// Disable HPM logger plugin (requires re-adding the other default plugins).
|
||||||
|
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
|
||||||
|
// fixes several error handling/connection close issues in http-proxy core.
|
||||||
|
ejectPlugins: true,
|
||||||
|
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
|
||||||
|
// the default plugins only allow http.IncomingMessage for TReq. They are
|
||||||
|
// compatible with express.Request, so we can use them. `Plugin` type is not
|
||||||
|
// exported for some reason.
|
||||||
|
plugins: [
|
||||||
|
debugProxyErrorsPlugin,
|
||||||
|
pinoLoggerPlugin,
|
||||||
|
proxyEventsPlugin,
|
||||||
|
] as any,
|
||||||
|
on: {
|
||||||
|
proxyRes: createOnProxyResHandler(
|
||||||
|
blockingResponseHandler ? [blockingResponseHandler] : []
|
||||||
|
),
|
||||||
|
error: classifyErrorAndSend,
|
||||||
|
},
|
||||||
|
buffer: ((req: Request) => {
|
||||||
|
// This is a hack/monkey patch and is not part of the official
|
||||||
|
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
|
||||||
|
let payload = req.body;
|
||||||
|
if (typeof payload === "string") {
|
||||||
|
payload = Buffer.from(payload);
|
||||||
|
}
|
||||||
|
const stream = new Readable();
|
||||||
|
stream.push(payload);
|
||||||
|
stream.push(null);
|
||||||
|
return stream;
|
||||||
|
}) as any,
|
||||||
|
});
|
||||||
|
|
||||||
|
return createQueueMiddleware({
|
||||||
|
mutations: [stripHeaders, ...(mutations ?? [])],
|
||||||
|
proxyMiddleware,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProxiedResponse = http.IncomingMessage & Response & any;
|
||||||
|
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
|
||||||
|
proxyServer.on("error", (err, req, res, target) => {
|
||||||
|
req.log.error(
|
||||||
|
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
|
||||||
|
"Error occurred while proxying request to target"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
proxyServer.on("proxyReq", (proxyReq, req) => {
|
||||||
|
const { protocol, host, path } = proxyReq;
|
||||||
|
req.log.info(
|
||||||
|
{
|
||||||
|
from: req.originalUrl,
|
||||||
|
to: `${protocol}//${host}${path}`,
|
||||||
|
},
|
||||||
|
"Sending request to upstream API..."
|
||||||
|
);
|
||||||
|
});
|
||||||
|
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
|
||||||
|
const { protocol, host, path } = proxyRes.req;
|
||||||
|
req.log.info(
|
||||||
|
{
|
||||||
|
target: `${protocol}//${host}${path}`,
|
||||||
|
status: proxyRes.statusCode,
|
||||||
|
contentType: proxyRes.headers["content-type"],
|
||||||
|
contentEncoding: proxyRes.headers["content-encoding"],
|
||||||
|
contentLength: proxyRes.headers["content-length"],
|
||||||
|
transferEncoding: proxyRes.headers["transfer-encoding"],
|
||||||
|
},
|
||||||
|
"Got response from upstream API."
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -0,0 +1,112 @@
|
|||||||
|
import { Request } from "express";
|
||||||
|
import { Key } from "../../../shared/key-management";
|
||||||
|
import { assertNever } from "../../../shared/utils";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a change to the request that will be reverted if the request
|
||||||
|
* fails.
|
||||||
|
*/
|
||||||
|
interface ProxyReqMutation {
|
||||||
|
target: "header" | "path" | "body" | "api-key" | "signed-request";
|
||||||
|
key?: string;
|
||||||
|
originalValue: any | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages a request's headers, body, and path, allowing them to be modified
|
||||||
|
* before the request is proxied and automatically reverted if the request
|
||||||
|
* needs to be retried.
|
||||||
|
*/
|
||||||
|
export class ProxyReqManager {
|
||||||
|
private req: Request;
|
||||||
|
private mutations: ProxyReqMutation[] = [];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A read-only proxy of the request object. Avoid changing any properties
|
||||||
|
* here as they will persist across retries.
|
||||||
|
*/
|
||||||
|
public readonly request: Readonly<Request>;
|
||||||
|
|
||||||
|
constructor(req: Request) {
|
||||||
|
this.req = req;
|
||||||
|
|
||||||
|
this.request = new Proxy(req, {
|
||||||
|
get: (target, prop) => {
|
||||||
|
if (typeof prop === "string") return target[prop as keyof Request];
|
||||||
|
return undefined;
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
setHeader(name: string, newValue: string): void {
|
||||||
|
const originalValue = this.req.get(name);
|
||||||
|
this.mutations.push({ target: "header", key: name, originalValue });
|
||||||
|
this.req.headers[name.toLowerCase()] = newValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
removeHeader(name: string): void {
|
||||||
|
const originalValue = this.req.get(name);
|
||||||
|
this.mutations.push({ target: "header", key: name, originalValue });
|
||||||
|
delete this.req.headers[name.toLowerCase()];
|
||||||
|
}
|
||||||
|
|
||||||
|
setBody(newBody: any): void {
|
||||||
|
const originalValue = this.req.body;
|
||||||
|
this.mutations.push({ target: "body", key: "body", originalValue });
|
||||||
|
this.req.body = newBody;
|
||||||
|
}
|
||||||
|
|
||||||
|
setKey(newKey: Key): void {
|
||||||
|
const originalValue = this.req.key;
|
||||||
|
this.mutations.push({ target: "api-key", key: "key", originalValue });
|
||||||
|
this.req.key = newKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
setPath(newPath: string): void {
|
||||||
|
const originalValue = this.req.path;
|
||||||
|
this.mutations.push({ target: "path", key: "path", originalValue });
|
||||||
|
this.req.url = newPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
|
||||||
|
const originalValue = this.req.signedRequest;
|
||||||
|
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
|
||||||
|
this.req.signedRequest = newSignedRequest;
|
||||||
|
}
|
||||||
|
|
||||||
|
hasChanged(): boolean {
|
||||||
|
return this.mutations.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
revert(): void {
|
||||||
|
for (const mutation of this.mutations.reverse()) {
|
||||||
|
switch (mutation.target) {
|
||||||
|
case "header":
|
||||||
|
if (mutation.originalValue === undefined) {
|
||||||
|
delete this.req.headers[mutation.key!.toLowerCase()];
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
this.req.headers[mutation.key!.toLowerCase()] =
|
||||||
|
mutation.originalValue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "path":
|
||||||
|
this.req.url = mutation.originalValue;
|
||||||
|
break;
|
||||||
|
case "body":
|
||||||
|
this.req.body = mutation.originalValue;
|
||||||
|
break;
|
||||||
|
case "api-key":
|
||||||
|
// We don't reset the key here because it's not a property of the
|
||||||
|
// inbound request, so we'd only ever be reverting it to null.
|
||||||
|
break;
|
||||||
|
case "signed-request":
|
||||||
|
this.req.signedRequest = mutation.originalValue;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assertNever(mutation.target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.mutations = [];
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
import util from "util";
|
||||||
|
import zlib from "zlib";
|
||||||
|
import { PassThrough } from "stream";
|
||||||
|
|
||||||
|
const BUFFER_DECODER_MAP = {
|
||||||
|
gzip: util.promisify(zlib.gunzip),
|
||||||
|
deflate: util.promisify(zlib.inflate),
|
||||||
|
br: util.promisify(zlib.brotliDecompress),
|
||||||
|
text: (data: Buffer) => data,
|
||||||
|
};
|
||||||
|
|
||||||
|
const STREAM_DECODER_MAP = {
|
||||||
|
gzip: zlib.createGunzip,
|
||||||
|
deflate: zlib.createInflate,
|
||||||
|
br: zlib.createBrotliDecompress,
|
||||||
|
text: () => new PassThrough(),
|
||||||
|
};
|
||||||
|
|
||||||
|
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
|
||||||
|
const isSupportedContentEncoding = (
|
||||||
|
encoding: string
|
||||||
|
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
|
||||||
|
|
||||||
|
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
|
||||||
|
if (isSupportedContentEncoding(encoding)) {
|
||||||
|
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
|
||||||
|
}
|
||||||
|
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getStreamDecompressor(encoding: string = "text") {
|
||||||
|
if (isSupportedContentEncoding(encoding)) {
|
||||||
|
return STREAM_DECODER_MAP[encoding]();
|
||||||
|
}
|
||||||
|
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||||
|
}
|
||||||
@@ -2,36 +2,33 @@ import express from "express";
|
|||||||
import { APIFormat } from "../../../shared/key-management";
|
import { APIFormat } from "../../../shared/key-management";
|
||||||
import { assertNever } from "../../../shared/utils";
|
import { assertNever } from "../../../shared/utils";
|
||||||
import { initializeSseStream } from "../../../shared/streaming";
|
import { initializeSseStream } from "../../../shared/streaming";
|
||||||
|
import http from "http";
|
||||||
|
|
||||||
function getMessageContent({
|
/**
|
||||||
title,
|
* Returns a Markdown-formatted message that renders semi-nicely in most chat
|
||||||
message,
|
* frontends. For example:
|
||||||
obj,
|
*
|
||||||
}: {
|
* **Proxy error (HTTP 404 Not Found)**
|
||||||
|
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
||||||
|
* ***
|
||||||
|
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
|
||||||
|
* ```
|
||||||
|
* {
|
||||||
|
* "type": "error",
|
||||||
|
* "error": {
|
||||||
|
* "type": "not_found_error",
|
||||||
|
* "message": "model: some-invalid-model-id",
|
||||||
|
* },
|
||||||
|
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
function getMessageContent(params: {
|
||||||
title: string;
|
title: string;
|
||||||
message: string;
|
message: string;
|
||||||
obj?: Record<string, any>;
|
obj?: Record<string, any>;
|
||||||
}) {
|
}) {
|
||||||
/*
|
const { title, message, obj } = params;
|
||||||
Constructs a Markdown-formatted message that renders semi-nicely in most chat
|
|
||||||
frontends. For example:
|
|
||||||
|
|
||||||
**Proxy error (HTTP 404 Not Found)**
|
|
||||||
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
|
||||||
***
|
|
||||||
*The requested Claude model might not exist, or the key might not be provisioned for it.*
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"type": "error",
|
|
||||||
"error": {
|
|
||||||
"type": "not_found_error",
|
|
||||||
"message": "model: some-invalid-model-id",
|
|
||||||
},
|
|
||||||
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
|
||||||
}
|
|
||||||
```
|
|
||||||
*/
|
|
||||||
|
|
||||||
const note = obj?.proxy_note || obj?.error?.message || "";
|
const note = obj?.proxy_note || obj?.error?.message || "";
|
||||||
const header = `### **${title}**`;
|
const header = `### **${title}**`;
|
||||||
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
|
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
|
||||||
@@ -52,20 +49,30 @@ function getMessageContent({
|
|||||||
delete obj.stack;
|
delete obj.stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
return [header, friendlyMessage, serializedObj, prettyTrace].join("\n\n");
|
return [
|
||||||
|
header,
|
||||||
|
friendlyMessage,
|
||||||
|
serializedObj,
|
||||||
|
prettyTrace,
|
||||||
|
"<!-- oai-proxy-error -->",
|
||||||
|
].join("\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorGeneratorOptions = {
|
type ErrorGeneratorOptions = {
|
||||||
format: APIFormat | "unknown";
|
format: APIFormat | "unknown";
|
||||||
title: string;
|
title: string;
|
||||||
message: string;
|
message: string;
|
||||||
obj?: object;
|
obj?: Record<string, any>;
|
||||||
reqId: string | number | object;
|
reqId: string | number | object;
|
||||||
model?: string;
|
model?: string;
|
||||||
statusCode?: number;
|
statusCode?: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
export function tryInferFormat(body: any): APIFormat | "unknown" {
|
/**
|
||||||
|
* Very crude inference of the request format based on the request body. Don't
|
||||||
|
* rely on this to be very accurate.
|
||||||
|
*/
|
||||||
|
function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||||
if (typeof body !== "object" || !body.model) {
|
if (typeof body !== "object" || !body.model) {
|
||||||
return "unknown";
|
return "unknown";
|
||||||
}
|
}
|
||||||
@@ -89,42 +96,82 @@ export function tryInferFormat(body: any): APIFormat | "unknown" {
|
|||||||
return "unknown";
|
return "unknown";
|
||||||
}
|
}
|
||||||
|
|
||||||
export function sendErrorToClient({
|
/**
|
||||||
options,
|
* Redacts the hostname from the error message if it contains a DNS resolution
|
||||||
req,
|
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
|
||||||
res,
|
* as those may contain sensitive information about the proxy's configuration.
|
||||||
}: {
|
*/
|
||||||
|
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
|
||||||
|
if (!options.message.includes("getaddrinfo")) return options;
|
||||||
|
|
||||||
|
const redacted = { ...options };
|
||||||
|
redacted.message = "Could not resolve hostname";
|
||||||
|
|
||||||
|
if (typeof redacted.obj?.error === "object") {
|
||||||
|
redacted.obj = {
|
||||||
|
...redacted.obj,
|
||||||
|
error: { message: "Could not resolve hostname" },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return redacted;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates an appropriately-formatted error response and sends it to the
|
||||||
|
* client over their requested transport (blocking or SSE stream).
|
||||||
|
*/
|
||||||
|
export function sendErrorToClient(params: {
|
||||||
options: ErrorGeneratorOptions;
|
options: ErrorGeneratorOptions;
|
||||||
req: express.Request;
|
req: express.Request;
|
||||||
res: express.Response;
|
res: express.Response;
|
||||||
}) {
|
}) {
|
||||||
const { format: inputFormat } = options;
|
const { req, res } = params;
|
||||||
|
const options = redactHostname(params.options);
|
||||||
|
const { statusCode, message, title, obj: details } = options;
|
||||||
|
|
||||||
// This is an error thrown before we know the format of the request, so we
|
// Since we want to send the error in a format the client understands, we
|
||||||
// can't send a response in the format the client expects.
|
// need to know the request format. `setApiFormat` might not have been called
|
||||||
|
// yet, so we'll try to infer it from the request body.
|
||||||
const format =
|
const format =
|
||||||
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
|
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
|
||||||
if (format === "unknown") {
|
if (format === "unknown") {
|
||||||
return res.status(options.statusCode || 400).json({
|
// Early middleware error (auth, rate limit) so we can only send something
|
||||||
error: options.message,
|
// generic.
|
||||||
details: options.obj,
|
const code = statusCode || 400;
|
||||||
|
const hasDetails = details && Object.keys(details).length > 0;
|
||||||
|
return res.status(code).json({
|
||||||
|
error: {
|
||||||
|
message,
|
||||||
|
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
|
||||||
|
},
|
||||||
|
...(hasDetails ? { details } : {}),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const completion = buildSpoofedCompletion({ ...options, format });
|
// Cannot modify headers if client opted into streaming and made it into the
|
||||||
const event = buildSpoofedSSE({ ...options, format });
|
// proxy request queue, because that immediately starts an SSE stream.
|
||||||
const isStreaming =
|
if (!res.headersSent) {
|
||||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
res.setHeader("x-oai-proxy-error", title);
|
||||||
|
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
// By this point, we know the request format. To get the error to display in
|
||||||
|
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
|
||||||
|
// from the language model. Depending on whether the client is streaming, we
|
||||||
|
// will either send an SSE event or a JSON response.
|
||||||
|
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||||
if (isStreaming) {
|
if (isStreaming) {
|
||||||
|
// User can have opted into streaming but not made it into the queue yet,
|
||||||
|
// in which case the stream must be started first.
|
||||||
if (!res.headersSent) {
|
if (!res.headersSent) {
|
||||||
initializeSseStream(res);
|
initializeSseStream(res);
|
||||||
}
|
}
|
||||||
res.write(event);
|
res.write(buildSpoofedSSE({ ...options, format }));
|
||||||
res.write(`data: [DONE]\n\n`);
|
res.write(`data: [DONE]\n\n`);
|
||||||
res.end();
|
res.end();
|
||||||
} else {
|
} else {
|
||||||
res.status(200).json(completion);
|
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,6 +194,21 @@ export function buildSpoofedCompletion({
|
|||||||
|
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case "openai":
|
case "openai":
|
||||||
|
case "openai-responses":
|
||||||
|
return {
|
||||||
|
id: "error-" + id,
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Date.now(),
|
||||||
|
model,
|
||||||
|
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { role: "assistant", content },
|
||||||
|
finish_reason: title,
|
||||||
|
index: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
return {
|
return {
|
||||||
id: "error-" + id,
|
id: "error-" + id,
|
||||||
@@ -162,6 +224,11 @@ export function buildSpoofedCompletion({
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
case "mistral-text":
|
||||||
|
return {
|
||||||
|
outputs: [{ text: content, stop_reason: title }],
|
||||||
|
model,
|
||||||
|
};
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
return {
|
return {
|
||||||
id: "error-" + id,
|
id: "error-" + id,
|
||||||
@@ -193,13 +260,7 @@ export function buildSpoofedCompletion({
|
|||||||
stop_sequence: null,
|
stop_sequence: null,
|
||||||
};
|
};
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
// TODO: Native Google AI non-streaming responses are not supported, this
|
|
||||||
// is an untested guess at what the response should look like.
|
|
||||||
return {
|
return {
|
||||||
id: "error-" + id,
|
|
||||||
object: "chat.completion",
|
|
||||||
created: Date.now(),
|
|
||||||
model,
|
|
||||||
candidates: [
|
candidates: [
|
||||||
{
|
{
|
||||||
content: { parts: [{ text: content }], role: "model" },
|
content: { parts: [{ text: content }], role: "model" },
|
||||||
@@ -237,6 +298,15 @@ export function buildSpoofedSSE({
|
|||||||
|
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case "openai":
|
case "openai":
|
||||||
|
case "openai-responses":
|
||||||
|
event = {
|
||||||
|
id: "chatcmpl-" + id,
|
||||||
|
object: "chat.completion.chunk",
|
||||||
|
created: Date.now(),
|
||||||
|
model,
|
||||||
|
choices: [{ delta: { content }, index: 0, finish_reason: title }],
|
||||||
|
};
|
||||||
|
break;
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
event = {
|
event = {
|
||||||
id: "chatcmpl-" + id,
|
id: "chatcmpl-" + id,
|
||||||
@@ -246,6 +316,11 @@ export function buildSpoofedSSE({
|
|||||||
choices: [{ delta: { content }, index: 0, finish_reason: title }],
|
choices: [{ delta: { content }, index: 0, finish_reason: title }],
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
case "mistral-text":
|
||||||
|
event = {
|
||||||
|
outputs: [{ text: content, stop_reason: title }],
|
||||||
|
};
|
||||||
|
break;
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
event = {
|
event = {
|
||||||
id: "cmpl-" + id,
|
id: "cmpl-" + id,
|
||||||
@@ -275,7 +350,10 @@ export function buildSpoofedSSE({
|
|||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
return JSON.stringify({
|
// TODO: google ai supports two streaming transports, SSE and JSON.
|
||||||
|
// we currently only support SSE.
|
||||||
|
// return JSON.stringify({
|
||||||
|
event = {
|
||||||
candidates: [
|
candidates: [
|
||||||
{
|
{
|
||||||
content: { parts: [{ text: content }], role: "model" },
|
content: { parts: [{ text: content }], role: "model" },
|
||||||
@@ -285,7 +363,8 @@ export function buildSpoofedSSE({
|
|||||||
safetyRatings: [],
|
safetyRatings: [],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
});
|
};
|
||||||
|
break;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
return JSON.stringify(obj);
|
return JSON.stringify(obj);
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -1,19 +1,6 @@
|
|||||||
import util from "util";
|
|
||||||
import zlib from "zlib";
|
|
||||||
import { sendProxyError } from "../common";
|
import { sendProxyError } from "../common";
|
||||||
import type { RawResponseBodyHandler } from "./index";
|
import type { RawResponseBodyHandler } from "./index";
|
||||||
|
import { decompressBuffer } from "./compression";
|
||||||
const DECODER_MAP = {
|
|
||||||
gzip: util.promisify(zlib.gunzip),
|
|
||||||
deflate: util.promisify(zlib.inflate),
|
|
||||||
br: util.promisify(zlib.brotliDecompress),
|
|
||||||
};
|
|
||||||
|
|
||||||
const isSupportedContentEncoding = (
|
|
||||||
contentEncoding: string
|
|
||||||
): contentEncoding is keyof typeof DECODER_MAP => {
|
|
||||||
return contentEncoding in DECODER_MAP;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handles the response from the upstream service and decodes the body if
|
* Handles the response from the upstream service and decodes the body if
|
||||||
@@ -35,42 +22,49 @@ export const handleBlockingResponse: RawResponseBodyHandler = async (
|
|||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise<string>((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
let chunks: Buffer[] = [];
|
let chunks: Buffer[] = [];
|
||||||
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
||||||
proxyRes.on("end", async () => {
|
proxyRes.on("end", async () => {
|
||||||
let body = Buffer.concat(chunks);
|
|
||||||
|
|
||||||
const contentEncoding = proxyRes.headers["content-encoding"];
|
const contentEncoding = proxyRes.headers["content-encoding"];
|
||||||
if (contentEncoding) {
|
const contentType = proxyRes.headers["content-type"];
|
||||||
if (isSupportedContentEncoding(contentEncoding)) {
|
let body: string | Buffer = Buffer.concat(chunks);
|
||||||
const decoder = DECODER_MAP[contentEncoding];
|
const rejectWithMessage = function (msg: string, err: Error) {
|
||||||
// @ts-ignore - started failing after upgrading TypeScript, don't care
|
const error = `${msg} (${err.message})`;
|
||||||
// as it was never a problem.
|
req.log.warn(
|
||||||
body = await decoder(body);
|
{ msg: error, stack: err.stack },
|
||||||
} else {
|
"Error in blocking response handler"
|
||||||
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
|
);
|
||||||
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
|
sendProxyError(req, res, 500, "Internal Server Error", { error });
|
||||||
sendProxyError(req, res, 500, "Internal Server Error", {
|
return reject(error);
|
||||||
error,
|
};
|
||||||
contentEncoding,
|
|
||||||
});
|
try {
|
||||||
return reject(error);
|
body = await decompressBuffer(body, contentEncoding);
|
||||||
}
|
} catch (e) {
|
||||||
|
return rejectWithMessage(`Could not decode response body`, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (proxyRes.headers["content-type"]?.includes("application/json")) {
|
return resolve(tryParseAsJson(body, contentType));
|
||||||
const json = JSON.parse(body.toString());
|
|
||||||
return resolve(json);
|
|
||||||
}
|
|
||||||
return resolve(body.toString());
|
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
const msg = `Proxy received response with invalid JSON: ${e.message}`;
|
return rejectWithMessage("API responded with invalid JSON", e);
|
||||||
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
|
|
||||||
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
|
|
||||||
return reject(msg);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
function tryParseAsJson(body: string, contentType?: string) {
|
||||||
|
// If the response is declared as JSON, it must parse or we will throw
|
||||||
|
if (contentType?.includes("application/json")) {
|
||||||
|
return JSON.parse(body);
|
||||||
|
}
|
||||||
|
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
|
||||||
|
// anyway since some APIs return the wrong content-type header in some cases.
|
||||||
|
// If it fails to parse, we'll just return the raw body without throwing.
|
||||||
|
try {
|
||||||
|
return JSON.parse(body);
|
||||||
|
} catch (e) {
|
||||||
|
return body;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import express from "express";
|
import express from "express";
|
||||||
import { pipeline, Readable, Transform } from "stream";
|
import { pipeline, Readable, Transform } from "stream";
|
||||||
import StreamArray from "stream-json/streamers/StreamArray";
|
|
||||||
import { StringDecoder } from "string_decoder";
|
import { StringDecoder } from "string_decoder";
|
||||||
import { promisify } from "util";
|
import { promisify } from "util";
|
||||||
import type { logger } from "../../../logger";
|
import type { logger } from "../../../logger";
|
||||||
@@ -18,43 +17,45 @@ import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
|
|||||||
import { EventAggregator } from "./streaming/event-aggregator";
|
import { EventAggregator } from "./streaming/event-aggregator";
|
||||||
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
|
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
|
||||||
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
|
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
|
||||||
|
import { getStreamDecompressor } from "./compression";
|
||||||
|
|
||||||
const pipelineAsync = promisify(pipeline);
|
const pipelineAsync = promisify(pipeline);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* `handleStreamedResponse` consumes and transforms a streamed response from the
|
* `handleStreamedResponse` consumes a streamed response from the upstream API,
|
||||||
* upstream service, forwarding events to the client in their requested format.
|
* decodes chunk-by-chunk into a stream of events, transforms those events into
|
||||||
|
* the client's requested format, and forwards the result to the client.
|
||||||
|
*
|
||||||
* After the entire stream has been consumed, it resolves with the full response
|
* After the entire stream has been consumed, it resolves with the full response
|
||||||
* body so that subsequent middleware in the chain can process it as if it were
|
* body so that subsequent middleware in the chain can process it as if it were
|
||||||
* a non-streaming response.
|
* a non-streaming response (to count output tokens, track usage, etc).
|
||||||
*
|
*
|
||||||
* In the event of an error, the request's streaming flag is unset and the non-
|
* In the event of an error, the request's streaming flag is unset and the
|
||||||
* streaming response handler is called instead.
|
* request is bounced back to the non-streaming response handler. If the error
|
||||||
*
|
* is retryable, that handler will re-enqueue the request and also reset the
|
||||||
* If the error is retryable, that handler will re-enqueue the request and also
|
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
|
||||||
* reset the streaming flag. Unfortunately the streaming flag is set and unset
|
* places, so it's hard to keep track of.
|
||||||
* in multiple places, so it's hard to keep track of.
|
|
||||||
*/
|
*/
|
||||||
export const handleStreamedResponse: RawResponseBodyHandler = async (
|
export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||||
proxyRes,
|
proxyRes,
|
||||||
req,
|
req,
|
||||||
res
|
res
|
||||||
) => {
|
) => {
|
||||||
const { hash } = req.key!;
|
const { headers, statusCode } = proxyRes;
|
||||||
if (!req.isStreaming) {
|
if (!req.isStreaming) {
|
||||||
throw new Error("handleStreamedResponse called for non-streaming request.");
|
throw new Error("handleStreamedResponse called for non-streaming request.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (proxyRes.statusCode! > 201) {
|
if (statusCode! > 201) {
|
||||||
req.isStreaming = false;
|
req.isStreaming = false;
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
{ statusCode: proxyRes.statusCode, key: hash },
|
{ statusCode },
|
||||||
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
||||||
);
|
);
|
||||||
return handleBlockingResponse(proxyRes, req, res);
|
return handleBlockingResponse(proxyRes, req, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
|
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
|
||||||
|
|
||||||
// Typically, streaming will have already been initialized by the request
|
// Typically, streaming will have already been initialized by the request
|
||||||
// queue to send heartbeat pings.
|
// queue to send heartbeat pings.
|
||||||
@@ -65,18 +66,25 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
|||||||
|
|
||||||
const prefersNativeEvents = req.inboundApi === req.outboundApi;
|
const prefersNativeEvents = req.inboundApi === req.outboundApi;
|
||||||
const streamOptions = {
|
const streamOptions = {
|
||||||
contentType: proxyRes.headers["content-type"],
|
contentType: headers["content-type"],
|
||||||
api: req.outboundApi,
|
api: req.outboundApi,
|
||||||
logger: req.log,
|
logger: req.log,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Decoder turns the raw response stream into a stream of events in some
|
// While the request is streaming, aggregator collects all events so that we
|
||||||
// format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
|
// can compile them into a single response object and publish that to the
|
||||||
|
// remaining middleware. Because we have an OpenAI transformer for every
|
||||||
|
// supported format, EventAggregator always consumes OpenAI events so that we
|
||||||
|
// only have to write one aggregator (OpenAI input) for each output format.
|
||||||
|
const aggregator = new EventAggregator(req);
|
||||||
|
|
||||||
|
const decompressor = getStreamDecompressor(headers["content-encoding"]);
|
||||||
|
// Decoder reads from the response bytes to produce a stream of plaintext.
|
||||||
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
|
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
|
||||||
// Adapter transforms the decoded events into server-sent events.
|
// Adapter consumes the decoded text and produces server-sent events so we
|
||||||
|
// have a standard event format for the client and to translate between API
|
||||||
|
// message formats.
|
||||||
const adapter = new SSEStreamAdapter(streamOptions);
|
const adapter = new SSEStreamAdapter(streamOptions);
|
||||||
// Aggregator compiles all events into a single response object.
|
|
||||||
const aggregator = new EventAggregator({ format: req.outboundApi });
|
|
||||||
// Transformer converts server-sent events from one vendor's API message
|
// Transformer converts server-sent events from one vendor's API message
|
||||||
// format to another.
|
// format to another.
|
||||||
const transformer = new SSEMessageTransformer({
|
const transformer = new SSEMessageTransformer({
|
||||||
@@ -98,7 +106,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
|||||||
try {
|
try {
|
||||||
await Promise.race([
|
await Promise.race([
|
||||||
handleAbortedStream(req, res),
|
handleAbortedStream(req, res),
|
||||||
pipelineAsync(proxyRes, decoder, adapter, transformer),
|
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
|
||||||
]);
|
]);
|
||||||
req.log.debug(`Finished proxying SSE stream.`);
|
req.log.debug(`Finished proxying SSE stream.`);
|
||||||
res.end();
|
res.end();
|
||||||
@@ -165,14 +173,13 @@ function getDecoder(options: {
|
|||||||
logger: typeof logger;
|
logger: typeof logger;
|
||||||
contentType?: string;
|
contentType?: string;
|
||||||
}) {
|
}) {
|
||||||
const { api, contentType, input, logger } = options;
|
const { contentType, input, logger } = options;
|
||||||
if (contentType?.includes("application/vnd.amazon.eventstream")) {
|
if (contentType?.includes("application/vnd.amazon.eventstream")) {
|
||||||
return getAwsEventStreamDecoder({ input, logger });
|
return getAwsEventStreamDecoder({ input, logger });
|
||||||
} else if (api === "google-ai") {
|
} else if (contentType?.includes("application/json")) {
|
||||||
return StreamArray.withParser();
|
throw new Error("JSON streaming not supported, request SSE instead");
|
||||||
} else {
|
} else {
|
||||||
// Passthrough stream, but ensures split chunks across multi-byte characters
|
// Ensures split chunks across multi-byte characters are handled correctly.
|
||||||
// are handled correctly.
|
|
||||||
const stringDecoder = new StringDecoder("utf8");
|
const stringDecoder = new StringDecoder("utf8");
|
||||||
return new Transform({
|
return new Transform({
|
||||||
readableObjectMode: true,
|
readableObjectMode: true,
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
/* This file is fucking horrendous, sorry */
|
/* This file is fucking horrendous, sorry */
|
||||||
|
// TODO: extract all per-service error response handling into its own modules
|
||||||
import { Request, Response } from "express";
|
import { Request, Response } from "express";
|
||||||
import * as http from "http";
|
import * as http from "http";
|
||||||
import { config } from "../../../config";
|
import { config } from "../../../config";
|
||||||
import { HttpError, RetryableError } from "../../../shared/errors";
|
import { HttpError, RetryableError } from "../../../shared/errors";
|
||||||
import { keyPool } from "../../../shared/key-management";
|
import { keyPool, GoogleAIKey } from "../../../shared/key-management";
|
||||||
import { getOpenAIModelFamily } from "../../../shared/models";
|
import { logger } from "../../../logger";
|
||||||
|
import { getOpenAIModelFamily, GoogleAIModelFamily } from "../../../shared/models";
|
||||||
import { countTokens } from "../../../shared/tokenization";
|
import { countTokens } from "../../../shared/tokenization";
|
||||||
import {
|
import {
|
||||||
incrementPromptCount,
|
incrementPromptCount,
|
||||||
@@ -46,7 +48,7 @@ export type ProxyResHandlerWithBody = (
|
|||||||
*/
|
*/
|
||||||
body: string | Record<string, any>
|
body: string | Record<string, any>
|
||||||
) => Promise<void>;
|
) => Promise<void>;
|
||||||
export type ProxyResMiddleware = ProxyResHandlerWithBody[];
|
export type ProxyResMiddleware = ProxyResHandlerWithBody[] | undefined;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a on.proxyRes handler that executes the given middleware stack after
|
* Returns a on.proxyRes handler that executes the given middleware stack after
|
||||||
@@ -70,11 +72,22 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
|||||||
req: Request,
|
req: Request,
|
||||||
res: Response
|
res: Response
|
||||||
) => {
|
) => {
|
||||||
const initialHandler: RawResponseBodyHandler = req.isStreaming
|
// Proxied request has by now been sent to the upstream API, so we revert
|
||||||
|
// tracked mutations that were only needed to send the request.
|
||||||
|
// This generally means path adjustment, headers, and body serialization.
|
||||||
|
if (req.changeManager) {
|
||||||
|
req.changeManager.revert();
|
||||||
|
}
|
||||||
|
|
||||||
|
const initialHandler = req.isStreaming
|
||||||
? handleStreamedResponse
|
? handleStreamedResponse
|
||||||
: handleBlockingResponse;
|
: handleBlockingResponse;
|
||||||
let lastMiddleware = initialHandler.name;
|
let lastMiddleware = initialHandler.name;
|
||||||
|
|
||||||
|
if (Buffer.isBuffer(req.body)) {
|
||||||
|
req.body = JSON.parse(req.body.toString());
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const body = await initialHandler(proxyRes, req, res);
|
const body = await initialHandler(proxyRes, req, res);
|
||||||
const middlewareStack: ProxyResMiddleware = [];
|
const middlewareStack: ProxyResMiddleware = [];
|
||||||
@@ -99,7 +112,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
|||||||
saveImage,
|
saveImage,
|
||||||
logPrompt,
|
logPrompt,
|
||||||
logEvent,
|
logEvent,
|
||||||
...apiMiddleware
|
...(apiMiddleware ?? [])
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,15 +136,15 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const { stack, message } = error;
|
const { stack, message } = error;
|
||||||
const info = { stack, lastMiddleware, key: req.key?.hash };
|
const details = { stack, message, lastMiddleware, key: req.key?.hash };
|
||||||
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
|
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
|
||||||
|
|
||||||
if (res.headersSent) {
|
if (res.headersSent) {
|
||||||
req.log.error(info, description);
|
req.log.error(details, description);
|
||||||
if (!res.writableEnded) res.end();
|
if (!res.writableEnded) res.end();
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
req.log.error(info, description);
|
req.log.error(details, description);
|
||||||
res
|
res
|
||||||
.status(500)
|
.status(500)
|
||||||
.json({ error: "Internal server error", proxy_note: description });
|
.json({ error: "Internal server error", proxy_note: description });
|
||||||
@@ -162,53 +175,64 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
) => {
|
) => {
|
||||||
const statusCode = proxyRes.statusCode || 500;
|
const statusCode = proxyRes.statusCode || 500;
|
||||||
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
|
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
|
||||||
let errorPayload: ProxiedErrorPayload;
|
const service = req.key!.service;
|
||||||
|
// Not an error, continue to next response handler
|
||||||
if (statusCode < 400) return;
|
if (statusCode < 400) return;
|
||||||
|
|
||||||
|
// Parse the error response body
|
||||||
|
let errorPayload: ProxiedErrorPayload;
|
||||||
try {
|
try {
|
||||||
assertJsonResponse(body);
|
assertJsonResponse(body);
|
||||||
errorPayload = body;
|
errorPayload = body;
|
||||||
} catch (parseError) {
|
} catch (parseError) {
|
||||||
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
|
const strBody = String(body).slice(0, 128);
|
||||||
const hash = req.key?.hash;
|
req.log.error({ statusCode, strBody }, "Error body is not JSON");
|
||||||
req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
|
|
||||||
|
|
||||||
const errorObject = {
|
const details = {
|
||||||
error: parseError.message,
|
error: parseError.message,
|
||||||
status: statusCode,
|
status: statusCode,
|
||||||
statusMessage,
|
statusMessage,
|
||||||
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
|
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service. Response body: ${strBody}`,
|
||||||
};
|
};
|
||||||
|
|
||||||
sendProxyError(req, res, statusCode, statusMessage, errorObject);
|
sendProxyError(req, res, statusCode, statusMessage, details);
|
||||||
throw new HttpError(statusCode, parseError.message);
|
throw new HttpError(statusCode, parseError.message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extract the error type from the response body depending on the service
|
||||||
|
if (service === "gcp") {
|
||||||
|
if (Array.isArray(errorPayload)) {
|
||||||
|
errorPayload = errorPayload[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
const errorType =
|
const errorType =
|
||||||
errorPayload.error?.code ||
|
errorPayload.error?.code ||
|
||||||
errorPayload.error?.type ||
|
errorPayload.error?.type ||
|
||||||
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
|
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
|
||||||
|
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
{ statusCode, type: errorType, errorPayload, key: req.key?.hash },
|
{ statusCode, statusMessage, errorType, errorPayload, key: req.key?.hash },
|
||||||
`Received error response from upstream. (${proxyRes.statusMessage})`
|
`API returned an error.`
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: split upstream error handling into separate modules for each service,
|
|
||||||
// this is out of control.
|
|
||||||
|
|
||||||
const service = req.key!.service;
|
// Try to convert response body to a ProxiedErrorPayload with message/type
|
||||||
if (service === "aws") {
|
if (service === "aws") {
|
||||||
// Try to standardize the error format for AWS
|
|
||||||
errorPayload.error = { message: errorPayload.message, type: errorType };
|
errorPayload.error = { message: errorPayload.message, type: errorType };
|
||||||
delete errorPayload.message;
|
delete errorPayload.message;
|
||||||
|
} else if (service === "gcp") {
|
||||||
|
if (errorPayload.error?.code) {
|
||||||
|
errorPayload.error = {
|
||||||
|
message: errorPayload.error.message,
|
||||||
|
type: errorPayload.error.status || errorPayload.error.code,
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Figure out what to do with the error
|
||||||
|
// TODO: separate error handling for each service
|
||||||
if (statusCode === 400) {
|
if (statusCode === 400) {
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "openai":
|
case "openai":
|
||||||
case "google-ai":
|
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
case "azure":
|
case "azure":
|
||||||
const filteredCodes = ["content_policy_violation", "content_filter"];
|
const filteredCodes = ["content_policy_violation", "content_filter"];
|
||||||
@@ -220,20 +244,54 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
// same 429 billing error that other models return.
|
// same 429 billing error that other models return.
|
||||||
await handleOpenAIRateLimitError(req, errorPayload);
|
await handleOpenAIRateLimitError(req, errorPayload);
|
||||||
} else {
|
} else {
|
||||||
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
|
errorPayload.proxy_note = `The upstream API rejected the request. Check the error message for details.`;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case "deepseek":
|
||||||
|
await handleDeepseekBadRequestError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
await handleXaiBadRequestError(req, errorPayload);
|
||||||
|
break;
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
case "aws":
|
case "aws":
|
||||||
await handleAnthropicBadRequestError(req, errorPayload);
|
case "gcp":
|
||||||
|
await handleAnthropicAwsBadRequestError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "google-ai":
|
||||||
|
await handleGoogleAIBadRequestError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "cohere":
|
||||||
|
errorPayload.proxy_note = `The upstream Cohere API rejected the request. Check the error message for details.`;
|
||||||
|
break;
|
||||||
|
case "qwen":
|
||||||
|
// No special handling yet
|
||||||
|
break;
|
||||||
|
case "moonshot":
|
||||||
|
errorPayload.proxy_note = `The Moonshot API rejected the request. Check the error message for details.`;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assertNever(service);
|
assertNever(service);
|
||||||
}
|
}
|
||||||
} else if (statusCode === 401) {
|
} else if (statusCode === 401) {
|
||||||
// Key is invalid or was revoked
|
// Universal 401 handling - authentication failed, retry with different key
|
||||||
keyPool.disable(req.key!, "revoked");
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError(`${service} key authentication failed, retrying with different key.`);
|
||||||
|
} else if (statusCode === 402) {
|
||||||
|
// Deepseek specific - insufficient balance
|
||||||
|
if (service === "deepseek") {
|
||||||
|
keyPool.disable(req.key!, "quota");
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Deepseek key has insufficient balance, retrying with different key.");
|
||||||
|
}
|
||||||
|
} else if (statusCode === 405) {
|
||||||
|
// Xai specific - insufficient balance
|
||||||
|
if (service === "xai") {
|
||||||
|
keyPool.disable(req.key!, "quota");
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("XAI key has insufficient balance, retrying with different key.");
|
||||||
|
}
|
||||||
} else if (statusCode === 403) {
|
} else if (statusCode === 403) {
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
@@ -241,13 +299,11 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
errorType === "permission_error" &&
|
errorType === "permission_error" &&
|
||||||
errorPayload.error?.message?.toLowerCase().includes("multimodal")
|
errorPayload.error?.message?.toLowerCase().includes("multimodal")
|
||||||
) {
|
) {
|
||||||
req.log.warn(
|
|
||||||
{ key: req.key?.hash },
|
|
||||||
"This Anthropic key does not support multimodal prompts."
|
|
||||||
);
|
|
||||||
keyPool.update(req.key!, { allowsMultimodality: false });
|
keyPool.update(req.key!, { allowsMultimodality: false });
|
||||||
await reenqueueRequest(req);
|
await reenqueueRequest(req);
|
||||||
throw new RetryableError("Claude request re-enqueued because key does not support multimodality.");
|
throw new RetryableError(
|
||||||
|
"Claude request re-enqueued because key does not support multimodality."
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
keyPool.disable(req.key!, "revoked");
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||||
@@ -258,7 +314,8 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
case "UnrecognizedClientException":
|
case "UnrecognizedClientException":
|
||||||
// Key is invalid.
|
// Key is invalid.
|
||||||
keyPool.disable(req.key!, "revoked");
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("AWS key is invalid, retrying with different key.");
|
||||||
break;
|
break;
|
||||||
case "AccessDeniedException":
|
case "AccessDeniedException":
|
||||||
const isModelAccessError =
|
const isModelAccessError =
|
||||||
@@ -275,6 +332,16 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
default:
|
default:
|
||||||
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
|
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
|
||||||
}
|
}
|
||||||
|
return;
|
||||||
|
case "mistral-ai":
|
||||||
|
case "gcp":
|
||||||
|
keyPool.disable(req.key!, "revoked");
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("GCP key is invalid, retrying with different key.");
|
||||||
|
case "moonshot":
|
||||||
|
keyPool.disable(req.key!, "revoked");
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Moonshot key is invalid, retrying with different key.");
|
||||||
}
|
}
|
||||||
} else if (statusCode === 429) {
|
} else if (statusCode === 429) {
|
||||||
switch (service) {
|
switch (service) {
|
||||||
@@ -287,6 +354,9 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
case "aws":
|
case "aws":
|
||||||
await handleAwsRateLimitError(req, errorPayload);
|
await handleAwsRateLimitError(req, errorPayload);
|
||||||
break;
|
break;
|
||||||
|
case "gcp":
|
||||||
|
await handleGcpRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
case "azure":
|
case "azure":
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
await handleAzureRateLimitError(req, errorPayload);
|
await handleAzureRateLimitError(req, errorPayload);
|
||||||
@@ -294,14 +364,30 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
case "google-ai":
|
case "google-ai":
|
||||||
await handleGoogleAIRateLimitError(req, errorPayload);
|
await handleGoogleAIRateLimitError(req, errorPayload);
|
||||||
break;
|
break;
|
||||||
|
case "deepseek":
|
||||||
|
await handleDeepseekRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
await handleXaiRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "cohere":
|
||||||
|
await handleCohereRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "qwen":
|
||||||
|
// Similar handling to OpenAI for rate limits
|
||||||
|
await handleOpenAIRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "moonshot":
|
||||||
|
await handleMoonshotRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
assertNever(service);
|
assertNever(service as never);
|
||||||
}
|
}
|
||||||
} else if (statusCode === 404) {
|
} else if (statusCode === 404) {
|
||||||
// Most likely model not found
|
// Most likely model not found
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "openai":
|
case "openai":
|
||||||
if (errorPayload.error?.code === "model_not_found") {
|
if (errorType === "model_not_found") {
|
||||||
const requestedModel = req.body.model;
|
const requestedModel = req.body.model;
|
||||||
const modelFamily = getOpenAIModelFamily(requestedModel);
|
const modelFamily = getOpenAIModelFamily(requestedModel);
|
||||||
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
|
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
|
||||||
@@ -312,28 +398,41 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
|
|
||||||
break;
|
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
|
|
||||||
break;
|
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
|
|
||||||
break;
|
|
||||||
case "aws":
|
case "aws":
|
||||||
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
|
case "gcp":
|
||||||
break;
|
|
||||||
case "azure":
|
case "azure":
|
||||||
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
|
case "deepseek":
|
||||||
|
case "xai":
|
||||||
|
case "cohere":
|
||||||
|
case "qwen":
|
||||||
|
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model.`;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assertNever(service);
|
assertNever(service as never);
|
||||||
|
}
|
||||||
|
} else if (statusCode === 503) {
|
||||||
|
switch (service) {
|
||||||
|
case "aws":
|
||||||
|
// Re-enqueue on any 503 from AWS Bedrock
|
||||||
|
req.log.warn(
|
||||||
|
{ key: req.key?.hash, errorType, errorPayload },
|
||||||
|
`AWS Bedrock service unavailable (503). Re-enqueueing request.`
|
||||||
|
);
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError(
|
||||||
|
"AWS Bedrock service unavailable (503), re-enqueued request."
|
||||||
|
);
|
||||||
|
default:
|
||||||
|
errorPayload.proxy_note = `Upstream service unavailable. Try again later.`;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some OAI errors contain the organization ID, which we don't want to reveal.
|
// Redact the OpenAI org id from the error message
|
||||||
if (errorPayload.error?.message) {
|
if (errorPayload.error?.message) {
|
||||||
errorPayload.error.message = errorPayload.error.message.replace(
|
errorPayload.error.message = errorPayload.error.message.replace(
|
||||||
/org-.{24}/gm,
|
/org-.{24}/gm,
|
||||||
@@ -341,13 +440,14 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send the error to the client
|
||||||
sendProxyError(req, res, statusCode, statusMessage, errorPayload);
|
sendProxyError(req, res, statusCode, statusMessage, errorPayload);
|
||||||
// This is bubbled up to onProxyRes's handler for logging but will not trigger
|
|
||||||
// a write to the response as `sendProxyError` has just done that.
|
// Re-throw the error to bubble up to onProxyRes's handler for logging
|
||||||
throw new HttpError(statusCode, errorPayload.error?.message);
|
throw new HttpError(statusCode, errorPayload.error?.message);
|
||||||
};
|
};
|
||||||
|
|
||||||
async function handleAnthropicBadRequestError(
|
async function handleAnthropicAwsBadRequestError(
|
||||||
req: Request,
|
req: Request,
|
||||||
errorPayload: ProxiedErrorPayload
|
errorPayload: ProxiedErrorPayload
|
||||||
) {
|
) {
|
||||||
@@ -371,25 +471,32 @@ async function handleAnthropicBadRequestError(
|
|||||||
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
|
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
|
||||||
const isOverQuota =
|
const isOverQuota =
|
||||||
error?.message?.match(/usage blocked until/i) ||
|
error?.message?.match(/usage blocked until/i) ||
|
||||||
error?.message?.match(/credit balance is too low/i);
|
error?.message?.match(/credit balance is too low/i) ||
|
||||||
|
error?.message?.match(/You will regain access on/i) ||
|
||||||
|
error?.message?.match(/reached your specified API usage limits/i);
|
||||||
if (isOverQuota) {
|
if (isOverQuota) {
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
{ key: req.key?.hash, message: error?.message },
|
{ key: req.key?.hash, message: error?.message },
|
||||||
"Anthropic key has hit spending limit and will be disabled."
|
"Anthropic key has hit spending limit and will be disabled."
|
||||||
);
|
);
|
||||||
keyPool.disable(req.key!, "quota");
|
keyPool.disable(req.key!, "quota");
|
||||||
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Claude key hit spending limit, retrying with different key.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const isDisabled = error?.message?.match(/organization has been disabled/i);
|
const isDisabled =
|
||||||
|
error?.message?.match(/organization has been disabled/i) ||
|
||||||
|
error?.message?.match(/^operation not allowed/i) ||
|
||||||
|
error?.message?.match(/credential is only authorized for use with Claude Code/i);
|
||||||
if (isDisabled) {
|
if (isDisabled) {
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
{ key: req.key?.hash, message: error?.message },
|
{ key: req.key?.hash, message: error?.message },
|
||||||
"Anthropic key has been disabled."
|
"Anthropic/AWS key has been disabled."
|
||||||
);
|
);
|
||||||
keyPool.disable(req.key!, "revoked");
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Claude key has been disabled, retrying with different key.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -427,6 +534,119 @@ async function handleAwsRateLimitError(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function handleGcpRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
if (errorPayload.error?.type === "RESOURCE_EXHAUSTED") {
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("GCP rate-limited request re-enqueued.");
|
||||||
|
} else {
|
||||||
|
errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from GCP.`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleDeepseekRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Deepseek rate-limited request re-enqueued.");
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleDeepseekBadRequestError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
// Based on the checker code, a 400 response means the key is valid but there was some other error
|
||||||
|
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleXaiRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Xai rate-limited request re-enqueued.");
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleXaiBadRequestError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
// Based on the checker code, a 400 response means the key is valid but there was some other error
|
||||||
|
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleCohereRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
// Mark the current key as rate limited
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
|
||||||
|
// Store the original request attempt count or initialize it
|
||||||
|
req.retryCount = (req.retryCount || 0) + 1;
|
||||||
|
|
||||||
|
// Only retry up to 3 times
|
||||||
|
if (req.retryCount <= 3) {
|
||||||
|
try {
|
||||||
|
// Add a small delay before retrying (1-5 seconds)
|
||||||
|
const delayMs = 1000 + Math.floor(Math.random() * 4000);
|
||||||
|
await new Promise(resolve => setTimeout(resolve, delayMs));
|
||||||
|
|
||||||
|
// Re-enqueue the request to try with a different key
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
req.log.info({ attempt: req.retryCount }, "Cohere rate-limited request re-enqueued");
|
||||||
|
throw new RetryableError(`Cohere rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof RetryableError) {
|
||||||
|
throw error; // Rethrow RetryableError to continue the flow
|
||||||
|
}
|
||||||
|
req.log.error({ error }, "Failed to re-enqueue rate-limited Cohere request");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've already retried 3 times, show the error to the user
|
||||||
|
errorPayload.proxy_note = "Too many requests to the Cohere API. Please try again later.";
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleMoonshotRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
// Mark the current key as rate limited
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
|
||||||
|
// Store the original request attempt count or initialize it
|
||||||
|
req.retryCount = (req.retryCount || 0) + 1;
|
||||||
|
|
||||||
|
// Only retry up to 3 times with different keys
|
||||||
|
if (req.retryCount <= 3) {
|
||||||
|
try {
|
||||||
|
// Add a small delay before retrying (2-6 seconds for Moonshot)
|
||||||
|
const delayMs = 2000 + Math.floor(Math.random() * 4000);
|
||||||
|
await new Promise(resolve => setTimeout(resolve, delayMs));
|
||||||
|
|
||||||
|
// Re-enqueue the request to try with a different key
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
req.log.info({ attempt: req.retryCount }, "Moonshot rate-limited request re-enqueued");
|
||||||
|
throw new RetryableError(`Moonshot rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof RetryableError) {
|
||||||
|
throw error; // Rethrow RetryableError to continue the flow
|
||||||
|
}
|
||||||
|
req.log.error({ error }, "Failed to re-enqueue rate-limited Moonshot request");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we've already retried 3 times, show the error to the user
|
||||||
|
errorPayload.proxy_note = "Too many requests to the Moonshot API. Please try again later.";
|
||||||
|
}
|
||||||
|
|
||||||
async function handleOpenAIRateLimitError(
|
async function handleOpenAIRateLimitError(
|
||||||
req: Request,
|
req: Request,
|
||||||
errorPayload: ProxiedErrorPayload
|
errorPayload: ProxiedErrorPayload
|
||||||
@@ -437,17 +657,20 @@ async function handleOpenAIRateLimitError(
|
|||||||
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
|
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
|
||||||
// Billing quota exceeded (key is dead, disable it)
|
// Billing quota exceeded (key is dead, disable it)
|
||||||
keyPool.disable(req.key!, "quota");
|
keyPool.disable(req.key!, "quota");
|
||||||
errorPayload.proxy_note = `Assigned key's quota has been exceeded. Please try again.`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Google AI key quota exceeded, retrying with different key.");
|
||||||
break;
|
break;
|
||||||
case "access_terminated":
|
case "access_terminated":
|
||||||
// Account banned (key is dead, disable it)
|
// Account banned (key is dead, disable it)
|
||||||
keyPool.disable(req.key!, "revoked");
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. Please try again.`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Google AI key banned for policy violations, retrying with different key.");
|
||||||
break;
|
break;
|
||||||
case "billing_not_active":
|
case "billing_not_active":
|
||||||
// Key valid but account billing is delinquent
|
// Key valid but account billing is delinquent
|
||||||
keyPool.disable(req.key!, "quota");
|
keyPool.disable(req.key!, "quota");
|
||||||
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. Please try again.`;
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Google AI key billing not active, retrying with different key.");
|
||||||
break;
|
break;
|
||||||
case "requests":
|
case "requests":
|
||||||
case "tokens":
|
case "tokens":
|
||||||
@@ -461,58 +684,8 @@ async function handleOpenAIRateLimitError(
|
|||||||
// Per-minute request or token rate limit is exceeded, which we can retry
|
// Per-minute request or token rate limit is exceeded, which we can retry
|
||||||
await reenqueueRequest(req);
|
await reenqueueRequest(req);
|
||||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||||
// WIP/nonfunctional
|
|
||||||
// case "tokens_usage_based":
|
|
||||||
// // Weird new rate limit type that seems limited to preview models.
|
|
||||||
// // Distinct from `tokens` type. Can be per-minute or per-day.
|
|
||||||
//
|
|
||||||
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
|
|
||||||
// // 10k tokens per minute is problematic, because this is much less than
|
|
||||||
// // GPT4-Turbo's max context size for a single prompt and is effectively a
|
|
||||||
// // cap on the max context size for just that key+model, which the app is
|
|
||||||
// // not able to deal with.
|
|
||||||
//
|
|
||||||
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
|
|
||||||
// // been used today, the max context for that key becomes 50k tokens until
|
|
||||||
// // the next day and becomes progressively smaller as more tokens are used.
|
|
||||||
//
|
|
||||||
// // To work around these keys we will first retry the request a few times.
|
|
||||||
// // After that we will reject the request, and if it's a per-day limit we
|
|
||||||
// // will also disable the key.
|
|
||||||
//
|
|
||||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
|
|
||||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
|
|
||||||
//
|
|
||||||
// const regex =
|
|
||||||
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
|
|
||||||
// const [, period, limit, used, requested] =
|
|
||||||
// errorPayload.error?.message?.match(regex) || [];
|
|
||||||
//
|
|
||||||
// req.log.warn(
|
|
||||||
// { key: req.key?.hash, period, limit, used, requested },
|
|
||||||
// "Received `tokens_usage_based` rate limit error from OpenAI."
|
|
||||||
// );
|
|
||||||
//
|
|
||||||
// if (!period || !limit || !requested) {
|
|
||||||
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (req.retryCount < 2) {
|
|
||||||
// await reenqueueRequest(req);
|
|
||||||
// throw new RetryableError("Rate-limited request re-enqueued.");
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (period === "min") {
|
|
||||||
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
|
|
||||||
// } else {
|
|
||||||
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// keyPool.markRateLimited(req.key!);
|
|
||||||
// break;
|
|
||||||
default:
|
default:
|
||||||
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
|
errorPayload.proxy_note = `This is likely a temporary error with the API. Try again in a few seconds.`;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return errorPayload;
|
return errorPayload;
|
||||||
@@ -534,17 +707,193 @@ async function handleAzureRateLimitError(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//{"error":{"code":400,"message":"API Key not found. Please pass a valid API key.","status":"INVALID_ARGUMENT","details":[{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]}}
|
||||||
|
//{"error":{"code":400,"message":"Gemini API free tier is not available in your country. Please enable billing on your project in Google AI Studio.","status":"FAILED_PRECONDITION"}}
|
||||||
|
async function handleGoogleAIBadRequestError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
const error = errorPayload.error || {};
|
||||||
|
// google changes this shit every few months
|
||||||
|
// i don't want to deal with it
|
||||||
|
const keyDeadMsgs = [
|
||||||
|
/please enable billing/i,
|
||||||
|
/API key not valid/i,
|
||||||
|
/API key expired/i,
|
||||||
|
/pass a valid API/i,
|
||||||
|
];
|
||||||
|
const text = JSON.stringify(error);
|
||||||
|
if (keyDeadMsgs.some((msg) => text.match(msg))) {
|
||||||
|
req.log.warn(
|
||||||
|
{ key: req.key?.hash, error: text },
|
||||||
|
"Google API key appears to be inoperative."
|
||||||
|
);
|
||||||
|
keyPool.disable(req.key!, "revoked");
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Google API key inoperative, retrying with different key.");
|
||||||
|
} else {
|
||||||
|
req.log.warn(
|
||||||
|
{ key: req.key?.hash, error: text },
|
||||||
|
"Unknown Google API error."
|
||||||
|
);
|
||||||
|
errorPayload.proxy_note = `Unrecognized error from Google AI.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// const { message, status, details } = error;
|
||||||
|
//
|
||||||
|
// if (status === "INVALID_ARGUMENT") {
|
||||||
|
// const reason = details?.[0]?.reason;
|
||||||
|
// if (reason === "API_KEY_INVALID") {
|
||||||
|
// req.log.warn(
|
||||||
|
// { key: req.key?.hash, status, reason, msg: error.message },
|
||||||
|
// "Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
|
||||||
|
// );
|
||||||
|
// keyPool.disable(req.key!, "revoked");
|
||||||
|
// errorPayload.proxy_note = `Assigned API key is invalid.`;
|
||||||
|
// }
|
||||||
|
// } else if (status === "FAILED_PRECONDITION") {
|
||||||
|
// if (message.match(/please enable billing/i)) {
|
||||||
|
// req.log.warn(
|
||||||
|
// { key: req.key?.hash, status, msg: error.message },
|
||||||
|
// "Cannot use key due to billing restrictions."
|
||||||
|
// );
|
||||||
|
// keyPool.disable(req.key!, "revoked");
|
||||||
|
// errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// req.log.warn(
|
||||||
|
// { key: req.key?.hash, status, msg: error.message },
|
||||||
|
// "Received unexpected 400 error from Google AI."
|
||||||
|
// );
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
|
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
|
||||||
|
//
|
||||||
async function handleGoogleAIRateLimitError(
|
async function handleGoogleAIRateLimitError(
|
||||||
req: Request,
|
req: Request,
|
||||||
errorPayload: ProxiedErrorPayload
|
errorPayload: ProxiedErrorPayload
|
||||||
) {
|
) {
|
||||||
const status = errorPayload.error?.status;
|
const status = errorPayload.error?.status;
|
||||||
|
const text = JSON.stringify(errorPayload.error);
|
||||||
|
const errorMessage = errorPayload.error?.message?.toLowerCase() || '';
|
||||||
|
|
||||||
|
// sometimes they block keys by rate limiting them to 0 requests per minute
|
||||||
|
// for some indefinite period of time
|
||||||
|
const keyDeadMsgs = [
|
||||||
|
/GenerateContentRequestsPerMinutePerProjectPerRegion/i,
|
||||||
|
/"quota_limit_value":"0"/i,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Quota exhaustion indicators in error messages
|
||||||
|
const quotaExhaustedMsgs = [
|
||||||
|
/quota exceeded/i,
|
||||||
|
/free tier|free_tier/i,
|
||||||
|
/quota limit/i
|
||||||
|
];
|
||||||
|
|
||||||
|
// If we don't have a key in the request, we can't process rate limits
|
||||||
|
if (!req.key) {
|
||||||
|
errorPayload.proxy_note = `Rate limit error but no key was found in the request.`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
switch (status) {
|
switch (status) {
|
||||||
case "RESOURCE_EXHAUSTED":
|
case "RESOURCE_EXHAUSTED": {
|
||||||
keyPool.markRateLimited(req.key!);
|
// Hard disabled keys - these are completely blocked
|
||||||
|
if (keyDeadMsgs.some((msg) => msg.test(text))) {
|
||||||
|
req.log.warn(
|
||||||
|
{ key: req.key.hash, error: text },
|
||||||
|
"Google API key appears to be completely disabled and will be removed from rotation."
|
||||||
|
);
|
||||||
|
keyPool.disable(req.key, "revoked");
|
||||||
|
errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a quota exhaustion error rather than just a rate limit
|
||||||
|
const isQuotaExhausted = quotaExhaustedMsgs.some(pattern => pattern.test(text) || pattern.test(errorMessage));
|
||||||
|
|
||||||
|
if (isQuotaExhausted && req.body?.model) {
|
||||||
|
// Get model family for the current request
|
||||||
|
const modelName = req.body.model;
|
||||||
|
const isPro = modelName.includes('pro');
|
||||||
|
const isFlash = modelName.includes('flash');
|
||||||
|
const isUltra = modelName.includes('ultra');
|
||||||
|
|
||||||
|
req.log.warn(
|
||||||
|
{ key: req.key.hash, model: modelName, error: text },
|
||||||
|
"Google API key has exhausted its quota for this model family and will be marked as overquota."
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create a filtered list of model families that excludes the over-quota family
|
||||||
|
let familyToRemove: GoogleAIModelFamily | null = null;
|
||||||
|
if (isPro) {
|
||||||
|
familyToRemove = 'gemini-pro';
|
||||||
|
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Pro models.`;
|
||||||
|
} else if (isFlash) {
|
||||||
|
familyToRemove = 'gemini-flash';
|
||||||
|
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Flash models.`;
|
||||||
|
} else if (isUltra) {
|
||||||
|
familyToRemove = 'gemini-ultra';
|
||||||
|
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Ultra models.`;
|
||||||
|
} else {
|
||||||
|
// If model family can't be determined, just mark as rate limited
|
||||||
|
keyPool.markRateLimited(req.key);
|
||||||
|
errorPayload.proxy_note = `Assigned API key has exhausted quota but model family couldn't be determined.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the modelFamilies in the key if we identified a family to remove
|
||||||
|
if (familyToRemove) {
|
||||||
|
// Get current model families, filter out the one that's over quota
|
||||||
|
const updatedFamilies = [...req.key.modelFamilies].filter(f => f !== familyToRemove);
|
||||||
|
|
||||||
|
// Cast the key to GoogleAIKey type to access its specific properties
|
||||||
|
const googleKey = req.key as GoogleAIKey;
|
||||||
|
|
||||||
|
// Track which families are over quota for future rechecking
|
||||||
|
const overQuotaFamilies = googleKey.overQuotaFamilies || [];
|
||||||
|
if (!overQuotaFamilies.includes(familyToRemove)) {
|
||||||
|
overQuotaFamilies.push(familyToRemove);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark the key as over quota but still usable for other model families
|
||||||
|
req.log.info(
|
||||||
|
{ key: req.key.hash, family: familyToRemove },
|
||||||
|
"Marking Google AI key as over quota for specific model family"
|
||||||
|
);
|
||||||
|
|
||||||
|
// First make a typed update object that includes only the properties we want to update
|
||||||
|
interface GoogleAIPartialUpdate {
|
||||||
|
modelFamilies: GoogleAIModelFamily[];
|
||||||
|
isOverQuota: boolean;
|
||||||
|
overQuotaFamilies: GoogleAIModelFamily[];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a properly typed update
|
||||||
|
const update: GoogleAIPartialUpdate = {
|
||||||
|
modelFamilies: updatedFamilies as GoogleAIModelFamily[],
|
||||||
|
isOverQuota: true,
|
||||||
|
overQuotaFamilies
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the standard KeyPool interface
|
||||||
|
// This gets around the TypeScript issues by letting KeyPool handle routing
|
||||||
|
const clonedKey = { ...req.key }; // Make a clone since we'll be modifying it
|
||||||
|
keyPool.update(clonedKey, update as any);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-enqueue with a different key
|
||||||
|
await reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Quota-exhausted request re-enqueued with a different key.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard rate limiting - just mark as rate limited temporarily
|
||||||
|
req.log.debug({ key: req.key.hash, error: text }, "Google API request rate limited, will retry.");
|
||||||
|
keyPool.markRateLimited(req.key);
|
||||||
await reenqueueRequest(req);
|
await reenqueueRequest(req);
|
||||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
|
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
|
||||||
break;
|
break;
|
||||||
@@ -564,10 +913,12 @@ const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
|
|||||||
},
|
},
|
||||||
`Incrementing usage for model`
|
`Incrementing usage for model`
|
||||||
);
|
);
|
||||||
keyPool.incrementUsage(req.key!, model, tokensUsed);
|
// Get modelFamily for the key usage log
|
||||||
|
const modelFamilyForKeyPool = req.modelFamily!; // Should be set by getModelFamilyForRequest earlier
|
||||||
|
keyPool.incrementUsage(req.key!, modelFamilyForKeyPool, { input: req.promptTokens!, output: req.outputTokens! });
|
||||||
if (req.user) {
|
if (req.user) {
|
||||||
incrementPromptCount(req.user.token);
|
incrementPromptCount(req.user.token);
|
||||||
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
|
incrementTokenCount(req.user.token, model, req.outboundApi, { input: req.promptTokens!, output: req.outputTokens! });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -593,16 +944,24 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
|
|||||||
const service = req.outboundApi;
|
const service = req.outboundApi;
|
||||||
const completion = getCompletionFromBody(req, body);
|
const completion = getCompletionFromBody(req, body);
|
||||||
const tokens = await countTokens({ req, completion, service });
|
const tokens = await countTokens({ req, completion, service });
|
||||||
|
|
||||||
|
if (req.service === "openai" || req.service === "azure" || req.service === "deepseek" || req.service === "cohere" || req.service === "qwen") {
|
||||||
|
// O1 consumes (a significant amount of) invisible tokens for the chain-
|
||||||
|
// of-thought reasoning. We have no way to count these other than to check
|
||||||
|
// the response body.
|
||||||
|
tokens.reasoning_tokens =
|
||||||
|
body.usage?.completion_tokens_details?.reasoning_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
req.log.debug(
|
req.log.debug(
|
||||||
{ service, tokens, prevOutputTokens: req.outputTokens },
|
{ service, prevOutputTokens: req.outputTokens, tokens },
|
||||||
`Counted tokens for completion`
|
`Counted tokens for completion`
|
||||||
);
|
);
|
||||||
if (req.tokenizerInfo) {
|
if (req.tokenizerInfo) {
|
||||||
req.tokenizerInfo.completion_tokens = tokens;
|
req.tokenizerInfo.completion_tokens = tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
req.outputTokens = tokens.token_count;
|
req.outputTokens = tokens.token_count + (tokens.reasoning_tokens ?? 0);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
error,
|
error,
|
||||||
@@ -617,22 +976,30 @@ const trackKeyRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
|
|||||||
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const omittedHeaders = new Set<string>([
|
||||||
|
// Omit content-encoding because we will always decode the response body
|
||||||
|
"content-encoding",
|
||||||
|
// Omit transfer-encoding because we are using response.json which will
|
||||||
|
// set a content-length header, which is not valid for chunked responses.
|
||||||
|
"transfer-encoding",
|
||||||
|
// Don't set cookies from upstream APIs because proxied requests are stateless
|
||||||
|
"set-cookie",
|
||||||
|
"openai-organization",
|
||||||
|
"x-request-id",
|
||||||
|
"x-ds-request-id",
|
||||||
|
"x-ds-trace-id",
|
||||||
|
"cf-ray",
|
||||||
|
]);
|
||||||
const copyHttpHeaders: ProxyResHandlerWithBody = async (
|
const copyHttpHeaders: ProxyResHandlerWithBody = async (
|
||||||
proxyRes,
|
proxyRes,
|
||||||
_req,
|
_req,
|
||||||
res
|
res
|
||||||
) => {
|
) => {
|
||||||
|
// Hack: we don't copy headers since with chunked transfer we've already sent them.
|
||||||
|
if (_req.isChunkedTransfer) return;
|
||||||
|
|
||||||
Object.keys(proxyRes.headers).forEach((key) => {
|
Object.keys(proxyRes.headers).forEach((key) => {
|
||||||
// Omit content-encoding because we will always decode the response body
|
if (omittedHeaders.has(key)) return;
|
||||||
if (key === "content-encoding") {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// We're usually using res.json() to send the response, which causes express
|
|
||||||
// to set content-length. That's not valid for chunked responses and some
|
|
||||||
// clients will reject it so we need to omit it.
|
|
||||||
if (key === "transfer-encoding") {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
res.setHeader(key, proxyRes.headers[key] as string);
|
res.setHeader(key, proxyRes.headers[key] as string);
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
@@ -676,6 +1043,6 @@ function getAwsErrorType(header: string | string[] | undefined) {
|
|||||||
|
|
||||||
function assertJsonResponse(body: any): asserts body is Record<string, any> {
|
function assertJsonResponse(body: any): asserts body is Record<string, any> {
|
||||||
if (typeof body !== "object") {
|
if (typeof body !== "object") {
|
||||||
throw new Error("Expected response to be an object");
|
throw new Error(`Expected response to be an object, got ${typeof body}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ import { ProxyResHandlerWithBody } from ".";
|
|||||||
import { assertNever } from "../../../shared/utils";
|
import { assertNever } from "../../../shared/utils";
|
||||||
import {
|
import {
|
||||||
AnthropicChatMessage,
|
AnthropicChatMessage,
|
||||||
flattenAnthropicMessages, GoogleAIChatMessage,
|
flattenAnthropicMessages,
|
||||||
|
GoogleAIChatMessage,
|
||||||
MistralAIChatMessage,
|
MistralAIChatMessage,
|
||||||
OpenAIChatMessage,
|
OpenAIChatMessage,
|
||||||
} from "../../../shared/api-schemas";
|
} from "../../../shared/api-schemas";
|
||||||
@@ -71,11 +72,21 @@ const getPromptForRequest = (
|
|||||||
// format.
|
// format.
|
||||||
switch (req.outboundApi) {
|
switch (req.outboundApi) {
|
||||||
case "openai":
|
case "openai":
|
||||||
|
case "openai-responses":
|
||||||
|
return req.body.messages;
|
||||||
case "mistral-ai":
|
case "mistral-ai":
|
||||||
return req.body.messages;
|
return req.body.messages;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
return { system: req.body.system, messages: req.body.messages };
|
let system = req.body.system;
|
||||||
|
if (Array.isArray(system)) {
|
||||||
|
system = system
|
||||||
|
.map((m: { type: string; text: string }) => m.text)
|
||||||
|
.join("\n");
|
||||||
|
}
|
||||||
|
return { system, messages: req.body.messages };
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
|
case "anthropic-text":
|
||||||
|
case "mistral-text":
|
||||||
return req.body.prompt;
|
return req.body.prompt;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
return {
|
return {
|
||||||
@@ -85,8 +96,6 @@ const getPromptForRequest = (
|
|||||||
quality: req.body.quality,
|
quality: req.body.quality,
|
||||||
revisedPrompt: responseBody.data[0].revised_prompt,
|
revisedPrompt: responseBody.data[0].revised_prompt,
|
||||||
};
|
};
|
||||||
case "anthropic-text":
|
|
||||||
return req.body.prompt;
|
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
return { contents: req.body.contents };
|
return { contents: req.body.contents };
|
||||||
default:
|
default:
|
||||||
@@ -113,9 +122,7 @@ const flattenMessages = (
|
|||||||
if (isGoogleAIChatPrompt(val)) {
|
if (isGoogleAIChatPrompt(val)) {
|
||||||
return val.contents
|
return val.contents
|
||||||
.map(({ parts, role }) => {
|
.map(({ parts, role }) => {
|
||||||
const text = parts
|
const text = parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text).join("\n");
|
||||||
.map((p) => p.text)
|
|
||||||
.join("\n");
|
|
||||||
return `${role}: ${text}`;
|
return `${role}: ${text}`;
|
||||||
})
|
})
|
||||||
.join("\n");
|
.join("\n");
|
||||||
@@ -143,11 +150,7 @@ const flattenMessages = (
|
|||||||
function isGoogleAIChatPrompt(
|
function isGoogleAIChatPrompt(
|
||||||
val: unknown
|
val: unknown
|
||||||
): val is { contents: GoogleAIChatMessage[] } {
|
): val is { contents: GoogleAIChatMessage[] } {
|
||||||
return (
|
return typeof val === "object" && val !== null && "contents" in val;
|
||||||
typeof val === "object" &&
|
|
||||||
val !== null &&
|
|
||||||
"contents" in val
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function isAnthropicChatPrompt(
|
function isAnthropicChatPrompt(
|
||||||
|
|||||||
@@ -0,0 +1,39 @@
|
|||||||
|
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||||
|
|
||||||
|
export type MistralChatCompletionResponse = {
|
||||||
|
choices: {
|
||||||
|
index: number;
|
||||||
|
message: { role: string; content: string };
|
||||||
|
finish_reason: string | null;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||||
|
* finalized Mistral chat completion response so that non-streaming middleware
|
||||||
|
* can operate on it as if it were a blocking response.
|
||||||
|
*/
|
||||||
|
export function mergeEventsForMistralChat(
|
||||||
|
events: OpenAIChatCompletionStreamEvent[]
|
||||||
|
): MistralChatCompletionResponse {
|
||||||
|
let merged: MistralChatCompletionResponse = {
|
||||||
|
choices: [
|
||||||
|
{ index: 0, message: { role: "", content: "" }, finish_reason: "" },
|
||||||
|
],
|
||||||
|
};
|
||||||
|
merged = events.reduce((acc, event, i) => {
|
||||||
|
// The first event will only contain role assignment and response metadata
|
||||||
|
if (i === 0) {
|
||||||
|
acc.choices[0].message.role = event.choices[0].delta.role ?? "assistant";
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.choices[0].finish_reason = event.choices[0].finish_reason ?? "";
|
||||||
|
if (event.choices[0].delta.content) {
|
||||||
|
acc.choices[0].message.content += event.choices[0].delta.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, merged);
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||||
|
|
||||||
|
export type MistralTextCompletionResponse = {
|
||||||
|
outputs: {
|
||||||
|
text: string;
|
||||||
|
stop_reason: string | null;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||||
|
* finalized Mistral text completion response so that non-streaming middleware
|
||||||
|
* can operate on it as if it were a blocking response.
|
||||||
|
*/
|
||||||
|
export function mergeEventsForMistralText(
|
||||||
|
events: OpenAIChatCompletionStreamEvent[]
|
||||||
|
): MistralTextCompletionResponse {
|
||||||
|
let merged: MistralTextCompletionResponse = {
|
||||||
|
outputs: [{ text: "", stop_reason: "" }],
|
||||||
|
};
|
||||||
|
merged = events.reduce((acc, event, i) => {
|
||||||
|
// The first event will only contain role assignment and response metadata
|
||||||
|
if (i === 0) {
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.outputs[0].text += event.choices[0].delta.content ?? "";
|
||||||
|
acc.outputs[0].stop_reason = event.choices[0].finish_reason ?? "";
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, merged);
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
@@ -24,7 +24,7 @@ export function getAwsEventStreamDecoder(params: {
|
|||||||
if (eventType === "chunk") {
|
if (eventType === "chunk") {
|
||||||
result = input[eventType];
|
result = input[eventType];
|
||||||
} else {
|
} else {
|
||||||
// AWS unmarshaller treats non-chunk (errors and exceptions) oddly.
|
// AWS unmarshaller treats non-chunk events (errors and exceptions) oddly.
|
||||||
result = { [eventType]: input[eventType] } as any;
|
result = { [eventType]: input[eventType] } as any;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import express from "express";
|
||||||
import { APIFormat } from "../../../../shared/key-management";
|
import { APIFormat } from "../../../../shared/key-management";
|
||||||
import { assertNever } from "../../../../shared/utils";
|
import { assertNever } from "../../../../shared/utils";
|
||||||
import {
|
import {
|
||||||
@@ -6,8 +7,13 @@ import {
|
|||||||
mergeEventsForAnthropicText,
|
mergeEventsForAnthropicText,
|
||||||
mergeEventsForOpenAIChat,
|
mergeEventsForOpenAIChat,
|
||||||
mergeEventsForOpenAIText,
|
mergeEventsForOpenAIText,
|
||||||
|
mergeEventsForMistralChat,
|
||||||
|
mergeEventsForMistralText,
|
||||||
AnthropicV2StreamEvent,
|
AnthropicV2StreamEvent,
|
||||||
OpenAIChatCompletionStreamEvent,
|
OpenAIChatCompletionStreamEvent,
|
||||||
|
mistralAIToOpenAI,
|
||||||
|
MistralAIStreamEvent,
|
||||||
|
MistralChatCompletionEvent,
|
||||||
} from "./index";
|
} from "./index";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -15,45 +21,71 @@ import {
|
|||||||
* compiles them into a single finalized response for downstream middleware.
|
* compiles them into a single finalized response for downstream middleware.
|
||||||
*/
|
*/
|
||||||
export class EventAggregator {
|
export class EventAggregator {
|
||||||
private readonly format: APIFormat;
|
private readonly model: string;
|
||||||
|
private readonly requestFormat: APIFormat;
|
||||||
|
private readonly responseFormat: APIFormat;
|
||||||
private readonly events: OpenAIChatCompletionStreamEvent[];
|
private readonly events: OpenAIChatCompletionStreamEvent[];
|
||||||
|
|
||||||
constructor({ format }: { format: APIFormat }) {
|
constructor({ body, inboundApi, outboundApi }: express.Request) {
|
||||||
this.events = [];
|
this.events = [];
|
||||||
this.format = format;
|
this.requestFormat = inboundApi;
|
||||||
|
this.responseFormat = outboundApi;
|
||||||
|
this.model = body.model;
|
||||||
}
|
}
|
||||||
|
|
||||||
addEvent(event: OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent) {
|
addEvent(
|
||||||
|
event:
|
||||||
|
| OpenAIChatCompletionStreamEvent
|
||||||
|
| AnthropicV2StreamEvent
|
||||||
|
| MistralAIStreamEvent
|
||||||
|
) {
|
||||||
if (eventIsOpenAIEvent(event)) {
|
if (eventIsOpenAIEvent(event)) {
|
||||||
this.events.push(event);
|
this.events.push(event);
|
||||||
} else {
|
} else {
|
||||||
// horrible special case. previously all transformers' target format was
|
// horrible special case. previously all transformers' target format was
|
||||||
// openai, so the event aggregator could conveniently assume all incoming
|
// openai, so the event aggregator could conveniently assume all incoming
|
||||||
// events were in openai format.
|
// events were in openai format.
|
||||||
// now we have added anthropic-chat-to-text, so aggregator needs to know
|
// now we have added some transformers that convert between non-openai
|
||||||
// how to collapse events from two formats.
|
// formats, so aggregator needs to know how to collapse for more than
|
||||||
// because that is annoying, we will simply transform anthropic events to
|
// just openai.
|
||||||
// openai (even if the client didn't ask for openai) so we don't have to
|
// because writing aggregation logic for every possible output format is
|
||||||
// write aggregation logic for anthropic chat (which is also a troublesome
|
// annoying, we will just transform any non-openai output events to openai
|
||||||
// stateful format).
|
// format (even if the client did not request openai at all) so that we
|
||||||
const openAIEvent = anthropicV2ToOpenAI({
|
// still only need to write aggregators for openai SSEs.
|
||||||
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
|
let openAIEvent: OpenAIChatCompletionStreamEvent | undefined;
|
||||||
lastPosition: -1,
|
switch (this.requestFormat) {
|
||||||
index: 0,
|
case "anthropic-text":
|
||||||
fallbackId: event.log_id || "event-aggregator-fallback",
|
assertIsAnthropicV2Event(event);
|
||||||
fallbackModel: event.model || "claude-3-fallback",
|
openAIEvent = anthropicV2ToOpenAI({
|
||||||
});
|
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
|
||||||
if (openAIEvent.event) {
|
lastPosition: -1,
|
||||||
this.events.push(openAIEvent.event);
|
index: 0,
|
||||||
|
fallbackId: event.log_id || "fallback-" + Date.now(),
|
||||||
|
fallbackModel: event.model || this.model || "fallback-claude-3",
|
||||||
|
})?.event;
|
||||||
|
break;
|
||||||
|
case "mistral-ai":
|
||||||
|
assertIsMistralChatEvent(event);
|
||||||
|
openAIEvent = mistralAIToOpenAI({
|
||||||
|
data: `data: ${JSON.stringify(event)}\n\n`,
|
||||||
|
lastPosition: -1,
|
||||||
|
index: 0,
|
||||||
|
fallbackId: "fallback-" + Date.now(),
|
||||||
|
fallbackModel: this.model || "fallback-mistral",
|
||||||
|
})?.event;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (openAIEvent) {
|
||||||
|
this.events.push(openAIEvent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
getFinalResponse() {
|
getFinalResponse() {
|
||||||
switch (this.format) {
|
switch (this.responseFormat) {
|
||||||
case "openai":
|
case "openai":
|
||||||
|
case "openai-responses":
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
case "mistral-ai":
|
|
||||||
return mergeEventsForOpenAIChat(this.events);
|
return mergeEventsForOpenAIChat(this.events);
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
return mergeEventsForOpenAIText(this.events);
|
return mergeEventsForOpenAIText(this.events);
|
||||||
@@ -61,10 +93,16 @@ export class EventAggregator {
|
|||||||
return mergeEventsForAnthropicText(this.events);
|
return mergeEventsForAnthropicText(this.events);
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
return mergeEventsForAnthropicChat(this.events);
|
return mergeEventsForAnthropicChat(this.events);
|
||||||
|
case "mistral-ai":
|
||||||
|
return mergeEventsForMistralChat(this.events);
|
||||||
|
case "mistral-text":
|
||||||
|
return mergeEventsForMistralText(this.events);
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
throw new Error(`SSE aggregation not supported for ${this.format}`);
|
throw new Error(
|
||||||
|
`SSE aggregation not supported for ${this.responseFormat}`
|
||||||
|
);
|
||||||
default:
|
default:
|
||||||
assertNever(this.format);
|
assertNever(this.responseFormat);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,3 +116,17 @@ function eventIsOpenAIEvent(
|
|||||||
): event is OpenAIChatCompletionStreamEvent {
|
): event is OpenAIChatCompletionStreamEvent {
|
||||||
return event?.object === "chat.completion.chunk";
|
return event?.object === "chat.completion.chunk";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function assertIsAnthropicV2Event(event: any): asserts event is AnthropicV2StreamEvent {
|
||||||
|
if (!event?.completion) {
|
||||||
|
throw new Error(`Bad event for Anthropic V2 SSE aggregation`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function assertIsMistralChatEvent(
|
||||||
|
event: any
|
||||||
|
): asserts event is MistralChatCompletionEvent {
|
||||||
|
if (!event?.choices) {
|
||||||
|
throw new Error(`Bad event for Mistral SSE aggregation`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,6 +7,25 @@ export type SSEResponseTransformArgs<S = Record<string, any>> = {
|
|||||||
state?: S;
|
state?: S;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export type MistralChatCompletionEvent = {
|
||||||
|
choices: {
|
||||||
|
index: number;
|
||||||
|
message: { role: string; content: string };
|
||||||
|
stop_reason: string | null;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
export type MistralTextCompletionEvent = {
|
||||||
|
outputs: { text: string; stop_reason: string | null }[];
|
||||||
|
};
|
||||||
|
export type MistralAIStreamEvent = {
|
||||||
|
"amazon-bedrock-invocationMetrics"?: {
|
||||||
|
inputTokenCount: number;
|
||||||
|
outputTokenCount: number;
|
||||||
|
invocationLatency: number;
|
||||||
|
firstByteLatency: number;
|
||||||
|
};
|
||||||
|
} & (MistralChatCompletionEvent | MistralTextCompletionEvent);
|
||||||
|
|
||||||
export type AnthropicV2StreamEvent = {
|
export type AnthropicV2StreamEvent = {
|
||||||
log_id?: string;
|
log_id?: string;
|
||||||
model?: string;
|
model?: string;
|
||||||
@@ -41,8 +60,12 @@ export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
|
|||||||
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
|
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
|
||||||
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
|
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
|
||||||
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
|
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
|
||||||
|
export { mistralAIToOpenAI } from "./transformers/mistral-ai-to-openai";
|
||||||
|
export { mistralTextToMistralChat } from "./transformers/mistral-text-to-mistral-chat";
|
||||||
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
|
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
|
||||||
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
|
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
|
||||||
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
|
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
|
||||||
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
|
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
|
||||||
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
|
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
|
||||||
|
export { mergeEventsForMistralChat } from "./aggregators/mistral-chat";
|
||||||
|
export { mergeEventsForMistralText } from "./aggregators/mistral-text";
|
||||||
|
|||||||
@@ -11,8 +11,11 @@ import {
|
|||||||
googleAIToOpenAI,
|
googleAIToOpenAI,
|
||||||
OpenAIChatCompletionStreamEvent,
|
OpenAIChatCompletionStreamEvent,
|
||||||
openAITextToOpenAIChat,
|
openAITextToOpenAIChat,
|
||||||
|
mistralAIToOpenAI,
|
||||||
|
mistralTextToMistralChat,
|
||||||
passthroughToOpenAI,
|
passthroughToOpenAI,
|
||||||
StreamingCompletionTransformer,
|
StreamingCompletionTransformer,
|
||||||
|
MistralChatCompletionEvent,
|
||||||
} from "./index";
|
} from "./index";
|
||||||
|
|
||||||
type SSEMessageTransformerOptions = TransformOptions & {
|
type SSEMessageTransformerOptions = TransformOptions & {
|
||||||
@@ -35,7 +38,9 @@ export class SSEMessageTransformer extends Transform {
|
|||||||
private readonly inputFormat: APIFormat;
|
private readonly inputFormat: APIFormat;
|
||||||
private readonly transformFn: StreamingCompletionTransformer<
|
private readonly transformFn: StreamingCompletionTransformer<
|
||||||
// TODO: Refactor transformers to not assume only OpenAI events as output
|
// TODO: Refactor transformers to not assume only OpenAI events as output
|
||||||
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
|
| OpenAIChatCompletionStreamEvent
|
||||||
|
| AnthropicV2StreamEvent
|
||||||
|
| MistralChatCompletionEvent
|
||||||
>;
|
>;
|
||||||
private readonly log;
|
private readonly log;
|
||||||
private readonly fallbackId: string;
|
private readonly fallbackId: string;
|
||||||
@@ -121,16 +126,17 @@ function eventIsOpenAIEvent(
|
|||||||
function getTransformer(
|
function getTransformer(
|
||||||
responseApi: APIFormat,
|
responseApi: APIFormat,
|
||||||
version?: string,
|
version?: string,
|
||||||
// There's only one case where we're not transforming back to OpenAI, which is
|
// In most cases, we are transforming back to OpenAI. Some responses can be
|
||||||
// Anthropic Chat response -> Anthropic Text request. This parameter is only
|
// translated between two non-OpenAI formats, eg Anthropic Chat -> Anthropic
|
||||||
// used for that case.
|
// Text, or Mistral Text -> Mistral Chat.
|
||||||
requestApi: APIFormat = "openai"
|
requestApi: APIFormat = "openai"
|
||||||
): StreamingCompletionTransformer<
|
): StreamingCompletionTransformer<
|
||||||
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
|
| OpenAIChatCompletionStreamEvent
|
||||||
|
| AnthropicV2StreamEvent
|
||||||
|
| MistralChatCompletionEvent
|
||||||
> {
|
> {
|
||||||
switch (responseApi) {
|
switch (responseApi) {
|
||||||
case "openai":
|
case "openai":
|
||||||
case "mistral-ai":
|
|
||||||
return passthroughToOpenAI;
|
return passthroughToOpenAI;
|
||||||
case "openai-text":
|
case "openai-text":
|
||||||
return openAITextToOpenAIChat;
|
return openAITextToOpenAIChat;
|
||||||
@@ -140,12 +146,20 @@ function getTransformer(
|
|||||||
: anthropicV2ToOpenAI;
|
: anthropicV2ToOpenAI;
|
||||||
case "anthropic-chat":
|
case "anthropic-chat":
|
||||||
return requestApi === "anthropic-text"
|
return requestApi === "anthropic-text"
|
||||||
? anthropicChatToAnthropicV2
|
? anthropicChatToAnthropicV2 // User's legacy text prompt was converted to chat, and response must be converted back to text
|
||||||
: anthropicChatToOpenAI;
|
: anthropicChatToOpenAI;
|
||||||
case "google-ai":
|
case "google-ai":
|
||||||
return googleAIToOpenAI;
|
return googleAIToOpenAI;
|
||||||
|
case "mistral-ai":
|
||||||
|
return mistralAIToOpenAI;
|
||||||
|
case "mistral-text":
|
||||||
|
return requestApi === "mistral-ai"
|
||||||
|
? mistralTextToMistralChat // User's chat request was converted to text, and response must be converted back to chat
|
||||||
|
: mistralAIToOpenAI;
|
||||||
case "openai-image":
|
case "openai-image":
|
||||||
throw new Error(`SSE transformation not supported for ${responseApi}`);
|
throw new Error(`SSE transformation not supported for ${responseApi}`);
|
||||||
|
case "openai-responses":
|
||||||
|
return passthroughToOpenAI;
|
||||||
default:
|
default:
|
||||||
assertNever(responseApi);
|
assertNever(responseApi);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import pino from "pino";
|
|||||||
import { Transform, TransformOptions } from "stream";
|
import { Transform, TransformOptions } from "stream";
|
||||||
import { Message } from "@smithy/eventstream-codec";
|
import { Message } from "@smithy/eventstream-codec";
|
||||||
import { APIFormat } from "../../../../shared/key-management";
|
import { APIFormat } from "../../../../shared/key-management";
|
||||||
import { buildSpoofedSSE } from "../error-generator";
|
|
||||||
import { BadRequestError, RetryableError } from "../../../../shared/errors";
|
import { BadRequestError, RetryableError } from "../../../../shared/errors";
|
||||||
|
|
||||||
type SSEStreamAdapterOptions = TransformOptions & {
|
type SSEStreamAdapterOptions = TransformOptions & {
|
||||||
@@ -20,7 +19,6 @@ type SSEStreamAdapterOptions = TransformOptions & {
|
|||||||
*/
|
*/
|
||||||
export class SSEStreamAdapter extends Transform {
|
export class SSEStreamAdapter extends Transform {
|
||||||
private readonly isAwsStream;
|
private readonly isAwsStream;
|
||||||
private readonly isGoogleStream;
|
|
||||||
private api: APIFormat;
|
private api: APIFormat;
|
||||||
private partialMessage = "";
|
private partialMessage = "";
|
||||||
private textDecoder = new TextDecoder("utf8");
|
private textDecoder = new TextDecoder("utf8");
|
||||||
@@ -30,7 +28,6 @@ export class SSEStreamAdapter extends Transform {
|
|||||||
super({ ...options, objectMode: true });
|
super({ ...options, objectMode: true });
|
||||||
this.isAwsStream =
|
this.isAwsStream =
|
||||||
options?.contentType === "application/vnd.amazon.eventstream";
|
options?.contentType === "application/vnd.amazon.eventstream";
|
||||||
this.isGoogleStream = options?.api === "google-ai";
|
|
||||||
this.api = options.api;
|
this.api = options.api;
|
||||||
this.log = options.logger.child({ module: "sse-stream-adapter" });
|
this.log = options.logger.child({ module: "sse-stream-adapter" });
|
||||||
}
|
}
|
||||||
@@ -55,8 +52,10 @@ export class SSEStreamAdapter extends Transform {
|
|||||||
|
|
||||||
if ("completion" in eventObj) {
|
if ("completion" in eventObj) {
|
||||||
return ["event: completion", `data: ${event}`].join(`\n`);
|
return ["event: completion", `data: ${event}`].join(`\n`);
|
||||||
} else {
|
} else if (eventObj.type) {
|
||||||
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
|
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
|
||||||
|
} else {
|
||||||
|
return `data: ${event}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
|
// noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
|
||||||
@@ -108,44 +107,12 @@ export class SSEStreamAdapter extends Transform {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Processes an incoming array element from the Google AI JSON stream. */
|
|
||||||
protected processGoogleObject(data: any): string | null {
|
|
||||||
// Sometimes data has fields key and value, sometimes it's just the
|
|
||||||
// candidates array.
|
|
||||||
const candidates = data.value?.candidates ?? data.candidates ?? [{}];
|
|
||||||
try {
|
|
||||||
const hasParts = candidates[0].content?.parts?.length > 0;
|
|
||||||
if (hasParts) {
|
|
||||||
return `data: ${JSON.stringify(data.value ?? data)}\n`;
|
|
||||||
} else {
|
|
||||||
this.log.error({ event: data }, "Received bad Google AI event");
|
|
||||||
return `data: ${buildSpoofedSSE({
|
|
||||||
format: "google-ai",
|
|
||||||
title: "Proxy stream error",
|
|
||||||
message:
|
|
||||||
"The proxy received malformed or unexpected data from Google AI while streaming.",
|
|
||||||
obj: data,
|
|
||||||
reqId: "proxy-sse-adapter-message",
|
|
||||||
model: "",
|
|
||||||
})}`;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
error.lastEvent = data;
|
|
||||||
this.emit("error", error);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
|
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
|
||||||
try {
|
try {
|
||||||
if (this.isAwsStream) {
|
if (this.isAwsStream) {
|
||||||
// `data` is a Message object
|
// `data` is a Message object
|
||||||
const message = this.processAwsMessage(data);
|
const message = this.processAwsMessage(data);
|
||||||
if (message) this.push(message + "\n\n");
|
if (message) this.push(message + "\n\n");
|
||||||
} else if (this.isGoogleStream) {
|
|
||||||
// `data` is an element from the Google AI JSON stream
|
|
||||||
const message = this.processGoogleObject(data);
|
|
||||||
if (message) this.push(message + "\n\n");
|
|
||||||
} else {
|
} else {
|
||||||
// `data` is a string, but possibly only a partial message
|
// `data` is a string, but possibly only a partial message
|
||||||
const fullMessages = (this.partialMessage + data).split(
|
const fullMessages = (this.partialMessage + data).split(
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ export const anthropicChatToOpenAI: StreamingCompletionTransformer = (
|
|||||||
model: params.fallbackModel,
|
model: params.fallbackModel,
|
||||||
choices: [
|
choices: [
|
||||||
{
|
{
|
||||||
index: params.index,
|
index: 0,
|
||||||
delta: { content: deltaEvent.delta.text },
|
delta: { content: deltaEvent.delta.text },
|
||||||
finish_reason: null,
|
finish_reason: null,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ const log = logger.child({
|
|||||||
|
|
||||||
type GoogleAIStreamEvent = {
|
type GoogleAIStreamEvent = {
|
||||||
candidates: {
|
candidates: {
|
||||||
content: { parts: { text: string }[]; role: string };
|
content?: { parts?: { text: string }[]; role: string };
|
||||||
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
|
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
|
||||||
index: number;
|
index: number;
|
||||||
tokenCount?: number;
|
tokenCount?: number;
|
||||||
@@ -34,9 +34,15 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
|||||||
return { position: -1 };
|
return { position: -1 };
|
||||||
}
|
}
|
||||||
|
|
||||||
const parts = completionEvent.candidates[0].content.parts;
|
const parts = completionEvent.candidates[0].content?.parts || [];
|
||||||
let content = parts[0]?.text ?? "";
|
let content = parts[0]?.text ?? "";
|
||||||
|
|
||||||
|
if (isSafetyStop(completionEvent)) {
|
||||||
|
content = `[Proxy Warning] Gemini safety filter triggered: ${JSON.stringify(
|
||||||
|
completionEvent.candidates[0].safetyRatings
|
||||||
|
)}`;
|
||||||
|
}
|
||||||
|
|
||||||
// If this is the first chunk, try stripping speaker names from the response
|
// If this is the first chunk, try stripping speaker names from the response
|
||||||
// e.g. "John: Hello" -> "Hello"
|
// e.g. "John: Hello" -> "Hello"
|
||||||
if (index === 0) {
|
if (index === 0) {
|
||||||
@@ -60,6 +66,14 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
|||||||
return { position: -1, event: newEvent };
|
return { position: -1, event: newEvent };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
function isSafetyStop(completion: GoogleAIStreamEvent) {
|
||||||
|
const isSafetyStop = ["SAFETY", "OTHER"].includes(
|
||||||
|
completion.candidates[0].finishReason ?? ""
|
||||||
|
);
|
||||||
|
const hasNoContent = completion.candidates[0].content?.parts?.length === 0;
|
||||||
|
return isSafetyStop && hasNoContent;
|
||||||
|
}
|
||||||
|
|
||||||
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
|
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
|
||||||
try {
|
try {
|
||||||
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
|
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
|
||||||
|
|||||||
@@ -0,0 +1,76 @@
|
|||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
import { MistralAIStreamEvent, SSEResponseTransformArgs } from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "mistral-ai-to-openai",
|
||||||
|
});
|
||||||
|
|
||||||
|
export const mistralAIToOpenAI = (params: SSEResponseTransformArgs) => {
|
||||||
|
const { data } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionEvent = asCompletion(rawEvent);
|
||||||
|
if (!completionEvent) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
if ("choices" in completionEvent) {
|
||||||
|
const newChatEvent = {
|
||||||
|
id: params.fallbackId,
|
||||||
|
object: "chat.completion.chunk" as const,
|
||||||
|
created: Date.now(),
|
||||||
|
model: params.fallbackModel,
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: completionEvent.choices[0].index,
|
||||||
|
delta: { content: completionEvent.choices[0].message.content },
|
||||||
|
finish_reason: completionEvent.choices[0].stop_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
return { position: -1, event: newChatEvent };
|
||||||
|
} else if ("outputs" in completionEvent) {
|
||||||
|
const newTextEvent = {
|
||||||
|
id: params.fallbackId,
|
||||||
|
object: "chat.completion.chunk" as const,
|
||||||
|
created: Date.now(),
|
||||||
|
model: params.fallbackModel,
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
delta: { content: completionEvent.outputs[0].text },
|
||||||
|
finish_reason: completionEvent.outputs[0].stop_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
return { position: -1, event: newTextEvent };
|
||||||
|
}
|
||||||
|
|
||||||
|
// should never happen
|
||||||
|
return { position: -1 };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asCompletion(event: ServerSentEvent): MistralAIStreamEvent | null {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(event.data);
|
||||||
|
if (
|
||||||
|
(Array.isArray(parsed.choices) &&
|
||||||
|
parsed.choices[0].message !== undefined) ||
|
||||||
|
(Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined)
|
||||||
|
) {
|
||||||
|
return parsed;
|
||||||
|
} else {
|
||||||
|
// noinspection ExceptionCaughtLocallyJS
|
||||||
|
throw new Error("Missing required fields");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid data event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
import {
|
||||||
|
MistralChatCompletionEvent,
|
||||||
|
MistralTextCompletionEvent,
|
||||||
|
StreamingCompletionTransformer,
|
||||||
|
} from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "mistral-text-to-mistral-chat",
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms an incoming Mistral Text SSE to an equivalent Mistral Chat SSE.
|
||||||
|
* This is generally used when a client sends a Mistral Chat prompt, but we
|
||||||
|
* convert it to Mistral Text before sending it to the API to work around
|
||||||
|
* some bugs in Mistral/AWS prompt templating. In these cases we need to convert
|
||||||
|
* the response back to Mistral Chat.
|
||||||
|
*/
|
||||||
|
export const mistralTextToMistralChat: StreamingCompletionTransformer<
|
||||||
|
MistralChatCompletionEvent
|
||||||
|
> = (params) => {
|
||||||
|
const { data } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const textCompletion = asTextCompletion(rawEvent);
|
||||||
|
if (!textCompletion) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const chatEvent: MistralChatCompletionEvent = {
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
message: { role: "assistant", content: textCompletion.outputs[0].text },
|
||||||
|
stop_reason: textCompletion.outputs[0].stop_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
return { position: -1, event: chatEvent };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asTextCompletion(
|
||||||
|
event: ServerSentEvent
|
||||||
|
): MistralTextCompletionEvent | null {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(event.data);
|
||||||
|
if (Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined) {
|
||||||
|
return parsed;
|
||||||
|
} else {
|
||||||
|
// noinspection ExceptionCaughtLocallyJS
|
||||||
|
throw new Error("Missing required fields");
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid data event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
+115
-46
@@ -1,48 +1,87 @@
|
|||||||
import { RequestHandler, Router } from "express";
|
import { Request, RequestHandler, Router } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
import { BadRequestError } from "../shared/errors";
|
||||||
import { config } from "../config";
|
|
||||||
import { keyPool } from "../shared/key-management";
|
import { keyPool } from "../shared/key-management";
|
||||||
import {
|
import {
|
||||||
getMistralAIModelFamily,
|
getMistralAIModelFamily,
|
||||||
MistralAIModelFamily,
|
MistralAIModelFamily,
|
||||||
ModelFamily,
|
ModelFamily,
|
||||||
} from "../shared/models";
|
} from "../shared/models";
|
||||||
import { logger } from "../logger";
|
import { config } from "../config";
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
addKey,
|
addKey,
|
||||||
createOnProxyReqHandler,
|
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
createOnProxyResHandler,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
ProxyResHandlerWithBody,
|
|
||||||
} from "./middleware/response";
|
|
||||||
|
|
||||||
|
// Mistral can't settle on a single naming scheme and deprecates models within
|
||||||
|
// months of releasing them so this list is hard to keep up to date. 2024-07-28
|
||||||
// https://docs.mistral.ai/platform/endpoints
|
// https://docs.mistral.ai/platform/endpoints
|
||||||
export const KNOWN_MISTRAL_AI_MODELS = [
|
export const KNOWN_MISTRAL_AI_MODELS = [
|
||||||
// Mistral 7b (open weight, legacy)
|
/* Premier models */
|
||||||
"open-mistral-7b",
|
// Mistral Large (top-tier reasoning model)
|
||||||
"mistral-tiny-2312",
|
"mistral-large-latest",
|
||||||
// Mixtral 8x7b (open weight, legacy)
|
"mistral-large-2411",
|
||||||
"open-mixtral-8x7b",
|
"mistral-large-2407",
|
||||||
"mistral-small-2312",
|
"mistral-large-2402", // older version
|
||||||
// Mixtral Small (newer 8x7b, closed weight)
|
|
||||||
|
// Pixtral Large (multimodal/vision model)
|
||||||
|
"pixtral-large-latest",
|
||||||
|
"pixtral-large-2411",
|
||||||
|
|
||||||
|
// Mistral Saba (language-specialized model)
|
||||||
|
"mistral-saba-latest",
|
||||||
|
"mistral-saba-2502",
|
||||||
|
|
||||||
|
// Codestral (code model)
|
||||||
|
"codestral-latest",
|
||||||
|
"codestral-2501",
|
||||||
|
"codestral-2405",
|
||||||
|
|
||||||
|
// Ministral models (edge models)
|
||||||
|
"ministral-8b-latest",
|
||||||
|
"ministral-8b-2410",
|
||||||
|
"ministral-3b-latest",
|
||||||
|
"ministral-3b-2410",
|
||||||
|
|
||||||
|
// Embedding & Moderation
|
||||||
|
"mistral-embed",
|
||||||
|
"mistral-embed-2312",
|
||||||
|
"mistral-moderation-latest",
|
||||||
|
"mistral-moderation-2411",
|
||||||
|
|
||||||
|
/* Free models */
|
||||||
|
// Mistral Small (with vision in latest version)
|
||||||
"mistral-small-latest",
|
"mistral-small-latest",
|
||||||
"mistral-small-2402",
|
"mistral-small-2503", // v3.1 with vision
|
||||||
// Mistral Medium
|
"mistral-small-2402", // older version
|
||||||
|
"magistral-small-latest",
|
||||||
|
|
||||||
|
// Pixtral 12B (vision model)
|
||||||
|
"pixtral-12b-latest",
|
||||||
|
"pixtral-12b-2409",
|
||||||
|
|
||||||
|
/* Research & Open Models */
|
||||||
|
// Mistral Nemo
|
||||||
|
"open-mistral-nemo",
|
||||||
|
"open-mistral-nemo-2407",
|
||||||
|
|
||||||
|
// Earlier Mixtral & Mistral models
|
||||||
|
"open-mistral-7b",
|
||||||
|
"open-mixtral-8x7b",
|
||||||
|
"open-mixtral-8x22b",
|
||||||
|
"open-codestral-mamba",
|
||||||
|
"mathstral",
|
||||||
|
|
||||||
|
/* Other, too lazy to do it properly now */
|
||||||
"mistral-medium-latest",
|
"mistral-medium-latest",
|
||||||
"mistral-medium-2312",
|
"mistral-medium-2312",
|
||||||
// Mistral Large
|
"mistral-medium-2505",
|
||||||
"mistral-large-latest",
|
"magistral-medium-latest",
|
||||||
"mistral-large-2402",
|
|
||||||
// Deprecated identifiers (2024-05-01)
|
|
||||||
"mistral-tiny",
|
"mistral-tiny",
|
||||||
"mistral-small",
|
"mistral-tiny-2312",
|
||||||
"mistral-medium",
|
|
||||||
];
|
];
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
@@ -89,23 +128,28 @@ const mistralAIResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
throw new Error("Expected body to be an object");
|
throw new Error("Expected body to be an object");
|
||||||
}
|
}
|
||||||
|
|
||||||
res.status(200).json({ ...body, proxy: body.proxy });
|
let newBody = body;
|
||||||
|
if (req.inboundApi === "mistral-text" && req.outboundApi === "mistral-ai") {
|
||||||
|
newBody = transformMistralTextToMistralChat(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
};
|
};
|
||||||
|
|
||||||
const mistralAIProxy = createQueueMiddleware({
|
export function transformMistralTextToMistralChat(textBody: any) {
|
||||||
proxyMiddleware: createProxyMiddleware({
|
return {
|
||||||
target: "https://api.mistral.ai",
|
...textBody,
|
||||||
changeOrigin: true,
|
choices: [
|
||||||
selfHandleResponse: true,
|
{ message: { content: textBody.outputs[0].text, role: "assistant" } },
|
||||||
logger,
|
],
|
||||||
on: {
|
outputs: undefined,
|
||||||
proxyReq: createOnProxyReqHandler({
|
};
|
||||||
pipeline: [addKey, finalizeBody],
|
}
|
||||||
}),
|
|
||||||
proxyRes: createOnProxyResHandler([mistralAIResponseHandler]),
|
const mistralAIProxy = createQueuedProxyMiddleware({
|
||||||
error: handleProxyError,
|
target: "https://api.mistral.ai",
|
||||||
},
|
mutations: [addKey, finalizeBody],
|
||||||
}),
|
blockingResponseHandler: mistralAIResponseHandler,
|
||||||
});
|
});
|
||||||
|
|
||||||
const mistralAIRouter = Router();
|
const mistralAIRouter = Router();
|
||||||
@@ -114,12 +158,37 @@ mistralAIRouter.get("/v1/models", handleModelRequest);
|
|||||||
mistralAIRouter.post(
|
mistralAIRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({
|
createPreprocessorMiddleware(
|
||||||
inApi: "mistral-ai",
|
{
|
||||||
outApi: "mistral-ai",
|
inApi: "mistral-ai",
|
||||||
service: "mistral-ai",
|
outApi: "mistral-ai",
|
||||||
}),
|
service: "mistral-ai",
|
||||||
|
},
|
||||||
|
{ beforeTransform: [detectMistralInputApi] }
|
||||||
|
),
|
||||||
mistralAIProxy
|
mistralAIProxy
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* We can't determine if a request is Mistral text or chat just from the path
|
||||||
|
* because they both use the same endpoint. We need to check the request body
|
||||||
|
* for either `messages` or `prompt`.
|
||||||
|
* @param req
|
||||||
|
*/
|
||||||
|
export function detectMistralInputApi(req: Request) {
|
||||||
|
const { messages, prompt } = req.body;
|
||||||
|
if (messages) {
|
||||||
|
req.inboundApi = "mistral-ai";
|
||||||
|
req.outboundApi = "mistral-ai";
|
||||||
|
} else if (prompt && req.service === "mistral-ai") {
|
||||||
|
// Mistral La Plateforme doesn't expose a text completions endpoint.
|
||||||
|
throw new BadRequestError(
|
||||||
|
"Mistral (via La Plateforme API) does not support text completions. This format is only supported on Mistral via the AWS API."
|
||||||
|
);
|
||||||
|
} else if (prompt && req.service === "aws") {
|
||||||
|
req.inboundApi = "mistral-text";
|
||||||
|
req.outboundApi = "mistral-text";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export const mistralAI = mistralAIRouter;
|
export const mistralAI = mistralAIRouter;
|
||||||
|
|||||||
@@ -0,0 +1,219 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { addKey, finalizeBody } from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import axios from "axios";
|
||||||
|
import { MoonshotKey, keyPool } from "../shared/key-management";
|
||||||
|
import { isMoonshotModel, isMoonshotVisionModel } from "../shared/api-schemas/moonshot";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
|
||||||
|
const log = logger.child({ module: "proxy", service: "moonshot" });
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const moonshotResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...body, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const getModelsResponse = async () => {
|
||||||
|
// Return cache if less than 1 minute old
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const modelToUse = "moonshot-v1-8k";
|
||||||
|
const moonshotKey = keyPool.get(modelToUse, "moonshot") as MoonshotKey;
|
||||||
|
|
||||||
|
if (!moonshotKey || !moonshotKey.key) {
|
||||||
|
log.warn("No valid Moonshot key available for model listing");
|
||||||
|
throw new Error("No valid Moonshot API key available");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch models from Moonshot API
|
||||||
|
const response = await axios.get("https://api.moonshot.cn/v1/models", {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${moonshotKey.key}`
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.data || !response.data.data) {
|
||||||
|
throw new Error("Unexpected response format from Moonshot API");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format response to ensure OpenAI compatibility
|
||||||
|
const models = {
|
||||||
|
object: "list",
|
||||||
|
data: response.data.data.map((model: any) => ({
|
||||||
|
id: model.id,
|
||||||
|
object: "model",
|
||||||
|
created: model.created || Math.floor(Date.now() / 1000),
|
||||||
|
owned_by: model.owned_by || "moonshot",
|
||||||
|
permission: model.permission || [],
|
||||||
|
root: model.root || model.id,
|
||||||
|
parent: model.parent || null,
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
|
||||||
|
log.debug({ modelCount: models.data.length }, "Retrieved models from Moonshot API");
|
||||||
|
|
||||||
|
// Cache the response
|
||||||
|
modelsCache = models;
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return models;
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error fetching Moonshot models"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error fetching Moonshot models");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a default list of known Moonshot models as fallback
|
||||||
|
return {
|
||||||
|
object: "list",
|
||||||
|
data: [
|
||||||
|
{ id: "moonshot-v1-8k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||||
|
{ id: "moonshot-v1-32k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||||
|
{ id: "moonshot-v1-128k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const models = await getModelsResponse();
|
||||||
|
res.status(200).json(models);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error handling model request"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error handling model request");
|
||||||
|
}
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Function to handle partial mode for Moonshot
|
||||||
|
function handlePartialMode(req: Request) {
|
||||||
|
if (!process.env.NO_MOONSHOT_PARTIAL && req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
const msgs = req.body.messages;
|
||||||
|
if (msgs.at(-1)?.role !== 'assistant') return;
|
||||||
|
|
||||||
|
let i = msgs.length - 1;
|
||||||
|
let content = '';
|
||||||
|
|
||||||
|
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||||
|
// Consolidate consecutive assistant messages
|
||||||
|
content = msgs[i--].content + content;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace consecutive assistant messages with single message with partial: true
|
||||||
|
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, partial: true });
|
||||||
|
log.debug("Consolidated assistant messages and enabled partial mode for Moonshot request");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to handle vision model content transformation
|
||||||
|
function handleVisionContent(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
if (isMoonshotVisionModel(model) && req.body.messages) {
|
||||||
|
// Ensure vision content is properly formatted
|
||||||
|
req.body.messages = req.body.messages.map((msg: any) => {
|
||||||
|
if (msg.content && typeof msg.content === 'string') {
|
||||||
|
// Keep string content as is for non-vision requests
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
return msg;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to count tokens for Moonshot models
|
||||||
|
function countMoonshotTokens(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
if (isMoonshotModel(model)) {
|
||||||
|
if (req.promptTokens) {
|
||||||
|
log.debug(
|
||||||
|
{ tokens: req.promptTokens, model },
|
||||||
|
"Estimated token count for Moonshot prompt"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle rate limit errors for Moonshot
|
||||||
|
async function handleMoonshotRateLimitError(req: Request, error: any) {
|
||||||
|
if (error.response?.status === 429) {
|
||||||
|
log.warn({ model: req.body.model }, "Moonshot rate limit hit, rotating key");
|
||||||
|
|
||||||
|
const currentKey = req.key as MoonshotKey;
|
||||||
|
keyPool.markRateLimited(currentKey);
|
||||||
|
|
||||||
|
// Try to get a new key
|
||||||
|
const newKey = keyPool.get(req.body.model, "moonshot") as MoonshotKey;
|
||||||
|
if (newKey.hash !== currentKey.hash) {
|
||||||
|
req.key = newKey;
|
||||||
|
return true; // Retry with new key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const moonshotProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [
|
||||||
|
addKey,
|
||||||
|
finalizeBody
|
||||||
|
],
|
||||||
|
target: "https://api.moonshot.cn",
|
||||||
|
blockingResponseHandler: moonshotResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const moonshotRouter = Router();
|
||||||
|
|
||||||
|
// Chat completions endpoint
|
||||||
|
moonshotRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "moonshot" },
|
||||||
|
{ afterTransform: [ handlePartialMode, handleVisionContent, countMoonshotTokens ] }
|
||||||
|
),
|
||||||
|
moonshotProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
// Embeddings endpoint
|
||||||
|
moonshotRouter.post(
|
||||||
|
"/v1/embeddings",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "moonshot" },
|
||||||
|
{ afterTransform: [ countMoonshotTokens ] }
|
||||||
|
),
|
||||||
|
moonshotProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
// Models endpoint
|
||||||
|
moonshotRouter.get("/v1/models", handleModelRequest);
|
||||||
|
|
||||||
|
export const moonshot = moonshotRouter;
|
||||||
+123
-37
@@ -1,24 +1,17 @@
|
|||||||
import { RequestHandler, Router, Request } from "express";
|
import { Request, RequestHandler, Router } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
||||||
import { config } from "../config";
|
import { generateModelList } from "./openai";
|
||||||
import { logger } from "../logger";
|
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
addKey,
|
addKey,
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
createOnProxyReqHandler,
|
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
createOnProxyResHandler,
|
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||||
ProxyResHandlerWithBody,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
} from "./middleware/response";
|
|
||||||
import { generateModelList } from "./openai";
|
|
||||||
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
|
||||||
|
|
||||||
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
|
const KNOWN_MODELS = ["dall-e-2", "dall-e-3", "gpt-image-1"];
|
||||||
|
|
||||||
let modelListCache: any = null;
|
let modelListCache: any = null;
|
||||||
let modelListValid = 0;
|
let modelListValid = 0;
|
||||||
@@ -26,7 +19,9 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
|||||||
if (new Date().getTime() - modelListValid < 1000 * 60) {
|
if (new Date().getTime() - modelListValid < 1000 * 60) {
|
||||||
return res.status(200).json(modelListCache);
|
return res.status(200).json(modelListCache);
|
||||||
}
|
}
|
||||||
const result = generateModelList(KNOWN_MODELS);
|
const result = generateModelList("openai").filter((m: { id: string }) =>
|
||||||
|
KNOWN_MODELS.includes(m.id)
|
||||||
|
);
|
||||||
modelListCache = { object: "list", data: result };
|
modelListCache = { object: "list", data: result };
|
||||||
modelListValid = new Date().getTime();
|
modelListValid = new Date().getTime();
|
||||||
res.status(200).json(modelListCache);
|
res.status(200).json(modelListCache);
|
||||||
@@ -63,27 +58,46 @@ function transformResponseForChat(
|
|||||||
req: Request
|
req: Request
|
||||||
): Record<string, any> {
|
): Record<string, any> {
|
||||||
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
|
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
|
||||||
|
const isGptImage = req.body.model?.includes("gpt-image") || false;
|
||||||
|
|
||||||
const content = imageBody.data
|
const content = imageBody.data
|
||||||
.map((item) => {
|
.map((item) => {
|
||||||
const { url, b64_json } = item;
|
const { url, b64_json } = item;
|
||||||
|
// The gpt-image-1 model always returns b64_json
|
||||||
|
// Format will depend on output_format parameter (defaults to png)
|
||||||
|
// For simplicity, we'll assume png if not specified
|
||||||
|
const format = req.body.output_format || "png";
|
||||||
|
|
||||||
if (b64_json) {
|
if (b64_json) {
|
||||||
return ``;
|
return ``;
|
||||||
} else {
|
} else {
|
||||||
return ``;
|
return ``;
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.join("\n\n");
|
.join("\n\n");
|
||||||
|
|
||||||
|
// Prepare the usage information - gpt-image-1 includes detailed token usage
|
||||||
|
let usage = {
|
||||||
|
prompt_tokens: 0,
|
||||||
|
completion_tokens: req.outputTokens,
|
||||||
|
total_tokens: req.outputTokens,
|
||||||
|
};
|
||||||
|
|
||||||
|
// If this is a gpt-image-1 response, it includes detailed usage info
|
||||||
|
if (imageBody.usage) {
|
||||||
|
usage = {
|
||||||
|
prompt_tokens: imageBody.usage.input_tokens || 0,
|
||||||
|
completion_tokens: imageBody.usage.output_tokens || 0,
|
||||||
|
total_tokens: imageBody.usage.total_tokens || 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: "dalle-" + req.id,
|
id: req.body.model?.includes("gpt-image") ? "gptimage-" + req.id : "dalle-" + req.id,
|
||||||
object: "chat.completion",
|
object: "chat.completion",
|
||||||
created: Date.now(),
|
created: Date.now(),
|
||||||
model: req.body.model,
|
model: req.body.model,
|
||||||
usage: {
|
usage,
|
||||||
prompt_tokens: 0,
|
|
||||||
completion_tokens: req.outputTokens,
|
|
||||||
total_tokens: req.outputTokens,
|
|
||||||
},
|
|
||||||
choices: [
|
choices: [
|
||||||
{
|
{
|
||||||
message: { role: "assistant", content },
|
message: { role: "assistant", content },
|
||||||
@@ -94,21 +108,82 @@ function transformResponseForChat(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiImagesProxy = createQueueMiddleware({
|
// Filter parameters based on the model being used to avoid sending unsupported parameters
|
||||||
proxyMiddleware: createProxyMiddleware({
|
function filterModelParameters(manager: ProxyReqManager) {
|
||||||
target: "https://api.openai.com",
|
const req = manager.request;
|
||||||
changeOrigin: true,
|
const originalBody = req.body;
|
||||||
selfHandleResponse: true,
|
const modelName = originalBody?.model || "";
|
||||||
logger,
|
|
||||||
pathRewrite: {
|
// Skip if no body or it's not an object
|
||||||
"^/v1/chat/completions": "/v1/images/generations",
|
if (!originalBody || typeof originalBody !== 'object') return;
|
||||||
},
|
|
||||||
on: {
|
// Create a deep copy of the body to filter
|
||||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
const filteredBody = { ...originalBody };
|
||||||
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
|
|
||||||
error: handleProxyError,
|
// Define allowed parameters for each model
|
||||||
},
|
if (modelName.includes('dall-e-2')) {
|
||||||
}),
|
// DALL-E 2 parameters
|
||||||
|
const allowedParams = [
|
||||||
|
'model', 'prompt', 'n', 'size', 'response_format', 'user'
|
||||||
|
];
|
||||||
|
|
||||||
|
// Remove any parameter not in the allowed list
|
||||||
|
Object.keys(filteredBody).forEach(key => {
|
||||||
|
if (!allowedParams.includes(key)) {
|
||||||
|
delete filteredBody[key];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
req.log.info({ model: 'dall-e-2', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 2");
|
||||||
|
} else if (modelName.includes('dall-e-3')) {
|
||||||
|
// DALL-E 3 parameters
|
||||||
|
const allowedParams = [
|
||||||
|
'model', 'prompt', 'n', 'quality', 'size', 'style', 'response_format', 'user'
|
||||||
|
];
|
||||||
|
|
||||||
|
// Remove any parameter not in the allowed list
|
||||||
|
Object.keys(filteredBody).forEach(key => {
|
||||||
|
if (!allowedParams.includes(key)) {
|
||||||
|
delete filteredBody[key];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
req.log.info({ model: 'dall-e-3', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 3");
|
||||||
|
} else if (modelName.includes('gpt-image')) {
|
||||||
|
// Define allowed parameters for gpt-image-1
|
||||||
|
const allowedParams = [
|
||||||
|
'model', 'prompt', 'background', 'moderation', 'n', 'output_compression',
|
||||||
|
'output_format', 'quality', 'size', 'user', 'image', 'mask'
|
||||||
|
];
|
||||||
|
|
||||||
|
// Remove any parameter not in the allowed list, especially 'style' which is only for DALL-E 3
|
||||||
|
Object.keys(filteredBody).forEach(key => {
|
||||||
|
if (!allowedParams.includes(key)) {
|
||||||
|
req.log.info({ model: 'gpt-image-1', removedParam: key }, "Removing unsupported parameter for GPT Image");
|
||||||
|
delete filteredBody[key];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
req.log.info({ model: 'gpt-image-1', params: Object.keys(filteredBody) }, "Filtered parameters for GPT Image");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the proper method to update the body
|
||||||
|
manager.setBody(filteredBody);
|
||||||
|
}
|
||||||
|
|
||||||
|
function replacePath(manager: ProxyReqManager) {
|
||||||
|
const req = manager.request;
|
||||||
|
const pathname = req.url.split("?")[0];
|
||||||
|
req.log.debug({ pathname }, "OpenAI image path filter");
|
||||||
|
if (req.path.startsWith("/v1/chat/completions")) {
|
||||||
|
manager.setPath("/v1/images/generations");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const openaiImagesProxy = createQueuedProxyMiddleware({
|
||||||
|
target: "https://api.openai.com",
|
||||||
|
mutations: [replacePath, filterModelParameters, addKey, finalizeBody],
|
||||||
|
blockingResponseHandler: openaiImagesResponseHandler,
|
||||||
});
|
});
|
||||||
|
|
||||||
const openaiImagesRouter = Router();
|
const openaiImagesRouter = Router();
|
||||||
@@ -123,6 +198,17 @@ openaiImagesRouter.post(
|
|||||||
}),
|
}),
|
||||||
openaiImagesProxy
|
openaiImagesProxy
|
||||||
);
|
);
|
||||||
|
// Add support for the /v1/images/edits endpoint (used by gpt-image-1 for image editing)
|
||||||
|
openaiImagesRouter.post(
|
||||||
|
"/v1/images/edits",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai-image",
|
||||||
|
outApi: "openai-image",
|
||||||
|
service: "openai",
|
||||||
|
}),
|
||||||
|
openaiImagesProxy
|
||||||
|
);
|
||||||
openaiImagesRouter.post(
|
openaiImagesRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
|
|||||||
+367
-113
@@ -1,113 +1,81 @@
|
|||||||
import { RequestHandler, Router } from "express";
|
import { Request, RequestHandler, Router } from "express";
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { keyPool, OpenAIKey } from "../shared/key-management";
|
import { BadRequestError } from "../shared/errors";
|
||||||
import {
|
import { AzureOpenAIKey, keyPool, OpenAIKey } from "../shared/key-management";
|
||||||
getOpenAIModelFamily,
|
import { getOpenAIModelFamily } from "../shared/models";
|
||||||
ModelFamily,
|
|
||||||
OpenAIModelFamily,
|
|
||||||
} from "../shared/models";
|
|
||||||
import { logger } from "../logger";
|
|
||||||
import { createQueueMiddleware } from "./queue";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
import {
|
||||||
addKey,
|
addKey,
|
||||||
addKeyForEmbeddingsRequest,
|
addKeyForEmbeddingsRequest,
|
||||||
createEmbeddingsPreprocessorMiddleware,
|
createEmbeddingsPreprocessorMiddleware,
|
||||||
createOnProxyReqHandler,
|
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
forceModel,
|
|
||||||
RequestPreprocessor,
|
RequestPreprocessor,
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
createOnProxyResHandler,
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
ProxyResHandlerWithBody,
|
|
||||||
} from "./middleware/response";
|
|
||||||
|
|
||||||
// https://platform.openai.com/docs/models/overview
|
// https://platform.openai.com/docs/models/overview
|
||||||
export const KNOWN_OPENAI_MODELS = [
|
|
||||||
"gpt-4o",
|
|
||||||
"gpt-4o-2024-05-13",
|
|
||||||
"gpt-4-turbo", // alias for latest gpt4-turbo stable
|
|
||||||
"gpt-4-turbo-2024-04-09", // gpt4-turbo stable, with vision
|
|
||||||
"gpt-4-turbo-preview", // alias for latest turbo preview
|
|
||||||
"gpt-4-0125-preview", // gpt4-turbo preview 2
|
|
||||||
"gpt-4-1106-preview", // gpt4-turbo preview 1
|
|
||||||
"gpt-4-vision-preview", // gpt4-turbo preview 1 with vision
|
|
||||||
"gpt-4",
|
|
||||||
"gpt-4-0613",
|
|
||||||
"gpt-4-0314", // EOL 2024-06-13
|
|
||||||
"gpt-4-32k",
|
|
||||||
"gpt-4-32k-0314", // EOL 2024-06-13
|
|
||||||
"gpt-4-32k-0613",
|
|
||||||
"gpt-3.5-turbo",
|
|
||||||
"gpt-3.5-turbo-0301", // EOL 2024-06-13
|
|
||||||
"gpt-3.5-turbo-0613",
|
|
||||||
"gpt-3.5-turbo-16k",
|
|
||||||
"gpt-3.5-turbo-16k-0613",
|
|
||||||
"gpt-3.5-turbo-instruct",
|
|
||||||
"gpt-3.5-turbo-instruct-0914",
|
|
||||||
"text-embedding-ada-002",
|
|
||||||
];
|
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
let modelsCacheTime = 0;
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
|
export function generateModelList(service: "openai" | "azure") {
|
||||||
// Get available families and snapshots
|
const keys = keyPool
|
||||||
let availableFamilies = new Set<OpenAIModelFamily>();
|
.list()
|
||||||
const availableSnapshots = new Set<string>();
|
.filter((k) => k.service === service && !k.isDisabled) as
|
||||||
for (const key of keyPool.list()) {
|
| OpenAIKey[]
|
||||||
if (key.isDisabled || key.service !== "openai") continue;
|
| AzureOpenAIKey[];
|
||||||
const asOpenAIKey = key as OpenAIKey;
|
if (keys.length === 0) return [];
|
||||||
asOpenAIKey.modelFamilies.forEach((f) => availableFamilies.add(f));
|
|
||||||
asOpenAIKey.modelSnapshots.forEach((s) => availableSnapshots.add(s));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove disabled families
|
const allowedModelFamilies = new Set(config.allowedModelFamilies);
|
||||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
const modelFamilies = new Set(
|
||||||
availableFamilies = new Set(
|
keys
|
||||||
[...availableFamilies].filter((x) => allowed.has(x))
|
.flatMap((k) => k.modelFamilies)
|
||||||
|
.filter((f) => allowedModelFamilies.has(f))
|
||||||
);
|
);
|
||||||
|
|
||||||
return models
|
const modelIds = new Set(
|
||||||
.map((id) => ({
|
keys
|
||||||
id,
|
.flatMap((k) => k.modelIds)
|
||||||
object: "model",
|
.filter((id) => {
|
||||||
created: new Date().getTime(),
|
const allowed = modelFamilies.has(getOpenAIModelFamily(id));
|
||||||
owned_by: "openai",
|
const known = ["gpt", "o", "dall-e", "chatgpt", "text-embedding", "codex"].some(
|
||||||
permission: [
|
(prefix) => id.startsWith(prefix)
|
||||||
{
|
);
|
||||||
id: "modelperm-" + id,
|
const isFinetune = id.includes("ft");
|
||||||
object: "model_permission",
|
return allowed && known && !isFinetune;
|
||||||
created: new Date().getTime(),
|
})
|
||||||
organization: "*",
|
);
|
||||||
group: null,
|
|
||||||
is_blocking: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
root: id,
|
|
||||||
parent: null,
|
|
||||||
}))
|
|
||||||
.filter((model) => {
|
|
||||||
// First check if the family is available
|
|
||||||
const hasFamily = availableFamilies.has(getOpenAIModelFamily(model.id));
|
|
||||||
if (!hasFamily) return false;
|
|
||||||
|
|
||||||
// Then for snapshots, ensure the specific snapshot is available
|
return Array.from(modelIds).map((id) => ({
|
||||||
const isSnapshot = model.id.match(/-\d{4}(-preview)?$/);
|
id,
|
||||||
if (!isSnapshot) return true;
|
object: "model",
|
||||||
return availableSnapshots.has(model.id);
|
created: new Date().getTime(),
|
||||||
});
|
owned_by: service,
|
||||||
|
permission: [
|
||||||
|
{
|
||||||
|
id: "modelperm-" + id,
|
||||||
|
object: "model_permission",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
organization: "*",
|
||||||
|
group: null,
|
||||||
|
is_blocking: false,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
root: id,
|
||||||
|
parent: null,
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
return res.status(200).json(modelsCache);
|
return res.status(200).json(modelsCache);
|
||||||
}
|
}
|
||||||
const result = generateModelList();
|
|
||||||
|
if (!config.openaiKey) return { object: "list", data: [] };
|
||||||
|
|
||||||
|
const result = generateModelList("openai");
|
||||||
|
|
||||||
modelsCache = { object: "list", data: result };
|
modelsCache = { object: "list", data: result };
|
||||||
modelsCacheTime = new Date().getTime();
|
modelsCacheTime = new Date().getTime();
|
||||||
res.status(200).json(modelsCache);
|
res.status(200).json(modelsCache);
|
||||||
@@ -142,16 +110,26 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
throw new Error("Expected body to be an object");
|
throw new Error("Expected body to be an object");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const interval = (req as any)._keepAliveInterval
|
||||||
|
if (interval) {
|
||||||
|
clearInterval(interval);
|
||||||
|
res.write(JSON.stringify(body));
|
||||||
|
res.end();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let newBody = body;
|
let newBody = body;
|
||||||
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
|
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
|
||||||
req.log.info("Transforming Turbo-Instruct response to Chat format");
|
req.log.info("Transforming Turbo-Instruct response to Chat format");
|
||||||
newBody = transformTurboInstructResponse(body);
|
newBody = transformTurboInstructResponse(body);
|
||||||
|
} else if (req.outboundApi === "openai-responses" && req.inboundApi === "openai") {
|
||||||
|
req.log.info("Transforming Responses API response to Chat format");
|
||||||
|
newBody = transformResponsesApiResponse(body);
|
||||||
}
|
}
|
||||||
|
|
||||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Only used for non-streaming responses. */
|
|
||||||
function transformTurboInstructResponse(
|
function transformTurboInstructResponse(
|
||||||
turboInstructBody: Record<string, any>
|
turboInstructBody: Record<string, any>
|
||||||
): Record<string, any> {
|
): Record<string, any> {
|
||||||
@@ -169,31 +147,151 @@ function transformTurboInstructResponse(
|
|||||||
return transformed;
|
return transformed;
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiProxy = createQueueMiddleware({
|
function transformResponsesApiResponse(
|
||||||
proxyMiddleware: createProxyMiddleware({
|
responsesBody: Record<string, any>
|
||||||
target: "https://api.openai.com",
|
): Record<string, any> {
|
||||||
changeOrigin: true,
|
// If the response is already in chat completion format, return it as is
|
||||||
selfHandleResponse: true,
|
if (responsesBody.choices && responsesBody.choices[0]?.message) {
|
||||||
logger,
|
return responsesBody;
|
||||||
on: {
|
}
|
||||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
|
||||||
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
|
// Create a compatible format for clients expecting chat completions format
|
||||||
error: handleProxyError,
|
const transformed: Record<string, any> = {
|
||||||
},
|
id: responsesBody.id || `chatcmpl-${Date.now()}`,
|
||||||
}),
|
object: "chat.completion",
|
||||||
|
created: responsesBody.created_at || Math.floor(Date.now() / 1000),
|
||||||
|
model: responsesBody.model || "o1-pro",
|
||||||
|
choices: [],
|
||||||
|
usage: responsesBody.usage || {
|
||||||
|
prompt_tokens: 0,
|
||||||
|
completion_tokens: 0,
|
||||||
|
total_tokens: 0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Extract content from the Responses API format - multiple possible structures
|
||||||
|
|
||||||
|
// Structure 1: output array with message objects
|
||||||
|
if (responsesBody.output && Array.isArray(responsesBody.output)) {
|
||||||
|
// Look for a message type in the output array
|
||||||
|
let messageOutput = null;
|
||||||
|
for (const output of responsesBody.output) {
|
||||||
|
if (output.type === "message") {
|
||||||
|
messageOutput = output;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (messageOutput) {
|
||||||
|
if (messageOutput.content && Array.isArray(messageOutput.content) && messageOutput.content.length > 0) {
|
||||||
|
// Handle text content
|
||||||
|
let content = "";
|
||||||
|
const toolCalls: any[] = [];
|
||||||
|
|
||||||
|
for (const contentItem of messageOutput.content) {
|
||||||
|
if (contentItem.type === "output_text") {
|
||||||
|
content += contentItem.text;
|
||||||
|
} else if (contentItem.type === "tool_calls" && Array.isArray(contentItem.tool_calls)) {
|
||||||
|
toolCalls.push(...contentItem.tool_calls);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const message: Record<string, any> = {
|
||||||
|
role: messageOutput.role || "assistant",
|
||||||
|
content: content
|
||||||
|
};
|
||||||
|
|
||||||
|
if (toolCalls.length > 0) {
|
||||||
|
message.tool_calls = toolCalls;
|
||||||
|
}
|
||||||
|
|
||||||
|
transformed.choices.push({
|
||||||
|
index: 0,
|
||||||
|
message,
|
||||||
|
finish_reason: "stop"
|
||||||
|
});
|
||||||
|
} else if (typeof messageOutput.content === 'string') {
|
||||||
|
// Simple string content
|
||||||
|
transformed.choices.push({
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: messageOutput.role || "assistant",
|
||||||
|
content: messageOutput.content
|
||||||
|
},
|
||||||
|
finish_reason: "stop"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structure 2: response object with content
|
||||||
|
else if (responsesBody.response && responsesBody.response.content) {
|
||||||
|
transformed.choices.push({
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: typeof responsesBody.response.content === 'string'
|
||||||
|
? responsesBody.response.content
|
||||||
|
: JSON.stringify(responsesBody.response.content)
|
||||||
|
},
|
||||||
|
finish_reason: responsesBody.response.finish_reason || "stop"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structure 3: look for 'content' field directly
|
||||||
|
else if (responsesBody.content) {
|
||||||
|
transformed.choices.push({
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: typeof responsesBody.content === 'string'
|
||||||
|
? responsesBody.content
|
||||||
|
: JSON.stringify(responsesBody.content)
|
||||||
|
},
|
||||||
|
finish_reason: "stop"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we couldn't extract content, create a basic response
|
||||||
|
if (transformed.choices.length === 0) {
|
||||||
|
transformed.choices.push({
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: ""
|
||||||
|
},
|
||||||
|
finish_reason: "stop"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy usage information if available
|
||||||
|
if (responsesBody.usage) {
|
||||||
|
transformed.usage = {
|
||||||
|
prompt_tokens: responsesBody.usage.input_tokens || 0,
|
||||||
|
completion_tokens: responsesBody.usage.output_tokens || 0,
|
||||||
|
total_tokens: responsesBody.usage.total_tokens || 0
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return transformed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const openaiProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [addKey, finalizeBody],
|
||||||
|
target: "https://api.openai.com",
|
||||||
|
blockingResponseHandler: openaiResponseHandler,
|
||||||
});
|
});
|
||||||
|
|
||||||
const openaiEmbeddingsProxy = createProxyMiddleware({
|
const openaiEmbeddingsProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||||
target: "https://api.openai.com",
|
target: "https://api.openai.com",
|
||||||
changeOrigin: true,
|
});
|
||||||
selfHandleResponse: false,
|
|
||||||
logger,
|
// New proxy middleware for the Responses API
|
||||||
on: {
|
const openaiResponsesProxy = createQueuedProxyMiddleware({
|
||||||
proxyReq: createOnProxyReqHandler({
|
mutations: [addKey, finalizeBody],
|
||||||
pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
|
target: "https://api.openai.com",
|
||||||
}),
|
blockingResponseHandler: openaiResponseHandler,
|
||||||
error: handleProxyError,
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const openaiRouter = Router();
|
const openaiRouter = Router();
|
||||||
@@ -222,17 +320,120 @@ openaiRouter.post(
|
|||||||
),
|
),
|
||||||
openaiProxy
|
openaiProxy
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const setupChunkedTransfer: RequestHandler = (req, res, next) => {
|
||||||
|
req.log.info("Setting chunked transfer for o1 to prevent Cloudflare timeouts")
|
||||||
|
|
||||||
|
// Check if user is trying to use streaming with codex-mini models
|
||||||
|
if (req.body.model?.startsWith("codex-mini") && req.body.stream === true) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: {
|
||||||
|
message: "The codex-mini models do not support streaming. Please set 'stream: false' in your request.",
|
||||||
|
type: "invalid_request_error",
|
||||||
|
param: "stream",
|
||||||
|
code: "streaming_not_supported"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only o1 doesn't support streaming
|
||||||
|
if (req.body.model === "o1" || req.body.model === "o1-2024-12-17") {
|
||||||
|
req.isChunkedTransfer = true;
|
||||||
|
res.writeHead(200, {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Transfer-Encoding': 'chunked'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Higher values are required - otherwise Cloudflare will buffer and not pass
|
||||||
|
// the separate chunks, which means that a >100s response will get terminated anyway
|
||||||
|
const keepAlive = setInterval(() => {
|
||||||
|
res.write(' '.repeat(4096));
|
||||||
|
}, 48_000);
|
||||||
|
|
||||||
|
(req as any)._keepAliveInterval = keepAlive;
|
||||||
|
}
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Functions to handle model-specific API routing
|
||||||
|
function shouldUseResponsesApi(model: string): boolean {
|
||||||
|
return model === "o1-pro" || model.startsWith("o1-pro-") ||
|
||||||
|
model === "o3-pro" || model.startsWith("o3-pro-") ||
|
||||||
|
model === "codex-mini-latest" || model.startsWith("codex-mini-");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preprocessor to redirect requests to the responses API
|
||||||
|
const routeToResponsesApi: RequestPreprocessor = (req) => {
|
||||||
|
if (shouldUseResponsesApi(req.body.model)) {
|
||||||
|
req.log.info(`Routing ${req.body.model} to OpenAI Responses API`);
|
||||||
|
req.url = "/v1/responses";
|
||||||
|
req.outboundApi = "openai-responses";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// General chat completion endpoint. Turbo-instruct is not supported here.
|
// General chat completion endpoint. Turbo-instruct is not supported here.
|
||||||
openaiRouter.post(
|
openaiRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({
|
createPreprocessorMiddleware(
|
||||||
inApi: "openai",
|
{ inApi: "openai", outApi: "openai", service: "openai" },
|
||||||
outApi: "openai",
|
{
|
||||||
service: "openai",
|
afterTransform: [
|
||||||
}),
|
fixupMaxTokens,
|
||||||
|
filterGPT5UnsupportedParams,
|
||||||
|
routeToResponsesApi
|
||||||
|
]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
setupChunkedTransfer,
|
||||||
|
(req, _res, next) => {
|
||||||
|
// Route to the responses endpoint if needed
|
||||||
|
if (req.outboundApi === "openai-responses") {
|
||||||
|
// Ensure messages is moved to input properly
|
||||||
|
req.log.info("Final check for Responses API format in chat completions");
|
||||||
|
if (req.body.messages) {
|
||||||
|
req.log.info("Moving 'messages' to 'input' for Responses API");
|
||||||
|
req.body.input = req.body.messages;
|
||||||
|
delete req.body.messages;
|
||||||
|
} else if (req.body.input && req.body.input.messages) {
|
||||||
|
req.log.info("Reformatting input.messages for Responses API");
|
||||||
|
req.body.input = req.body.input.messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
return openaiResponsesProxy(req, _res, next);
|
||||||
|
}
|
||||||
|
next();
|
||||||
|
},
|
||||||
openaiProxy
|
openaiProxy
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// New endpoint for OpenAI Responses API
|
||||||
|
openaiRouter.post(
|
||||||
|
"/v1/responses",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai-responses", service: "openai" },
|
||||||
|
{ afterTransform: [fixupMaxTokens, filterGPT5UnsupportedParams] }
|
||||||
|
),
|
||||||
|
// Add final check to ensure the body is in the correct format for Responses API
|
||||||
|
(req, _res, next) => {
|
||||||
|
req.log.info("Final check for Responses API format");
|
||||||
|
|
||||||
|
// Ensure messages is properly formatted for input
|
||||||
|
if (req.body.messages) {
|
||||||
|
req.log.info("Moving 'messages' to 'input' for Responses API");
|
||||||
|
req.body.input = req.body.messages;
|
||||||
|
delete req.body.messages;
|
||||||
|
} else if (req.body.input && req.body.input.messages) {
|
||||||
|
req.log.info("Reformatting input.messages for Responses API");
|
||||||
|
req.body.input = req.body.input.messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
next();
|
||||||
|
},
|
||||||
|
openaiResponsesProxy
|
||||||
|
);
|
||||||
|
|
||||||
// Embeddings endpoint.
|
// Embeddings endpoint.
|
||||||
openaiRouter.post(
|
openaiRouter.post(
|
||||||
"/v1/embeddings",
|
"/v1/embeddings",
|
||||||
@@ -241,4 +442,57 @@ openaiRouter.post(
|
|||||||
openaiEmbeddingsProxy
|
openaiEmbeddingsProxy
|
||||||
);
|
);
|
||||||
|
|
||||||
|
function forceModel(model: string): RequestPreprocessor {
|
||||||
|
return (req: Request) => void (req.body.model = model);
|
||||||
|
}
|
||||||
|
|
||||||
|
function fixupMaxTokens(req: Request) {
|
||||||
|
// For Responses API, use max_output_tokens instead of max_completion_tokens
|
||||||
|
if (req.outboundApi === "openai-responses") {
|
||||||
|
if (!req.body.max_output_tokens) {
|
||||||
|
req.body.max_output_tokens = req.body.max_tokens || req.body.max_completion_tokens;
|
||||||
|
}
|
||||||
|
// Remove the other token params to avoid API errors
|
||||||
|
delete req.body.max_tokens;
|
||||||
|
delete req.body.max_completion_tokens;
|
||||||
|
|
||||||
|
// Remove other parameters not supported by Responses API
|
||||||
|
const unsupportedParams = ['frequency_penalty', 'presence_penalty'];
|
||||||
|
for (const param of unsupportedParams) {
|
||||||
|
if (req.body[param] !== undefined) {
|
||||||
|
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
|
||||||
|
delete req.body[param];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Original behavior for other APIs
|
||||||
|
if (!req.body.max_completion_tokens) {
|
||||||
|
req.body.max_completion_tokens = req.body.max_tokens;
|
||||||
|
}
|
||||||
|
delete req.body.max_tokens;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GPT-5, GPT-5-mini, and GPT-5-nano don't support certain parameters
|
||||||
|
// Remove them if present to prevent API errors
|
||||||
|
function filterGPT5UnsupportedParams(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// Only apply filtering to these specific models (gpt5-chat-latest supports all params)
|
||||||
|
const restrictedModels = /^gpt-5(-mini|-nano)?(-\d{4}-\d{2}-\d{2})?$/;
|
||||||
|
|
||||||
|
if (!restrictedModels.test(model)) {
|
||||||
|
return; // Not a restricted model, no filtering needed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove unsupported parameters if they exist
|
||||||
|
const unsupportedParams = ['temperature', 'top_p', 'presence_penalty', 'frequency_penalty'];
|
||||||
|
|
||||||
|
for (const param of unsupportedParams) {
|
||||||
|
if (req.body[param] !== undefined) {
|
||||||
|
delete req.body[param];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export const openai = openaiRouter;
|
export const openai = openaiRouter;
|
||||||
|
|||||||
+55
-67
@@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
import crypto from "crypto";
|
import crypto from "crypto";
|
||||||
import { Handler, Request } from "express";
|
import { Handler, Request } from "express";
|
||||||
|
import { config } from "../config";
|
||||||
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
|
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
|
||||||
import { keyPool } from "../shared/key-management";
|
import { keyPool } from "../shared/key-management";
|
||||||
import {
|
import {
|
||||||
@@ -22,24 +23,25 @@ import {
|
|||||||
} from "../shared/models";
|
} from "../shared/models";
|
||||||
import { initializeSseStream } from "../shared/streaming";
|
import { initializeSseStream } from "../shared/streaming";
|
||||||
import { logger } from "../logger";
|
import { logger } from "../logger";
|
||||||
import { getUniqueIps, SHARED_IP_ADDRESSES } from "./rate-limit";
|
import { getUniqueIps } from "./rate-limit";
|
||||||
import { RequestPreprocessor } from "./middleware/request";
|
import { ProxyReqMutator, RequestPreprocessor } from "./middleware/request";
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||||
|
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||||
|
import { classifyErrorAndSend } from "./middleware/common";
|
||||||
|
|
||||||
const queue: Request[] = [];
|
const queue: Request[] = [];
|
||||||
const log = logger.child({ module: "request-queue" });
|
const log = logger.child({ module: "request-queue" });
|
||||||
|
|
||||||
/** Maximum number of queue slots for Agnai.chat requests. */
|
|
||||||
const AGNAI_CONCURRENCY_LIMIT = 5;
|
|
||||||
/** Maximum number of queue slots for individual users. */
|
/** Maximum number of queue slots for individual users. */
|
||||||
const USER_CONCURRENCY_LIMIT = 1;
|
const USER_CONCURRENCY_LIMIT = parseInt(
|
||||||
|
process.env.USER_CONCURRENCY_LIMIT ?? "1"
|
||||||
|
);
|
||||||
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
|
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
|
||||||
const MAX_HEARTBEAT_SIZE =
|
const MAX_HEARTBEAT_SIZE =
|
||||||
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
|
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
|
||||||
const HEARTBEAT_INTERVAL =
|
const HEARTBEAT_INTERVAL =
|
||||||
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
|
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
|
||||||
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
|
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "150");
|
||||||
const PAYLOAD_SCALE_FACTOR = parseFloat(
|
const PAYLOAD_SCALE_FACTOR = parseFloat(
|
||||||
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
|
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
|
||||||
);
|
);
|
||||||
@@ -58,39 +60,28 @@ const QUEUE_JOIN_TIMEOUT = 5000;
|
|||||||
function getIdentifier(req: Request) {
|
function getIdentifier(req: Request) {
|
||||||
if (req.user) return req.user.token;
|
if (req.user) return req.user.token;
|
||||||
if (req.risuToken) return req.risuToken;
|
if (req.risuToken) return req.risuToken;
|
||||||
if (isFromSharedIp(req)) return "shared-ip";
|
// if (isFromSharedIp(req)) return "shared-ip";
|
||||||
return req.ip;
|
return req.ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
|
const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
|
||||||
getIdentifier(queued) === getIdentifier(incoming);
|
getIdentifier(queued) === getIdentifier(incoming);
|
||||||
|
|
||||||
const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
|
|
||||||
|
|
||||||
async function enqueue(req: Request) {
|
async function enqueue(req: Request) {
|
||||||
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
if (req.socket.destroyed || req.res?.writableEnded) {
|
||||||
let isGuest = req.user?.token === undefined;
|
// In rare cases, a request can be disconnected after it is dequeued for a
|
||||||
|
// retry, but before it is re-enqueued. In this case we may miss the abort
|
||||||
|
// and the request will loop in the queue forever.
|
||||||
|
req.log.warn("Attempt to enqueue aborted request.");
|
||||||
|
throw new Error("Attempt to enqueue aborted request.");
|
||||||
|
}
|
||||||
|
|
||||||
// Requests from shared IP addresses such as Agnai.chat are exempt from IP-
|
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
||||||
// based rate limiting but can only occupy a certain number of slots in the
|
// Do not apply concurrency limit to "special" users
|
||||||
// queue. Authenticated users always get a single spot in the queue.
|
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT && req.user?.type !== "special") {
|
||||||
const isSharedIp = isFromSharedIp(req);
|
throw new TooManyRequestsError(
|
||||||
const maxConcurrentQueuedRequests =
|
"Your IP or user token already has another request in the queue."
|
||||||
isGuest && isSharedIp ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT;
|
);
|
||||||
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
|
|
||||||
if (isSharedIp) {
|
|
||||||
// Re-enqueued requests are not counted towards the limit since they
|
|
||||||
// already made it through the queue once.
|
|
||||||
if (req.retryCount === 0) {
|
|
||||||
throw new TooManyRequestsError(
|
|
||||||
"Too many agnai.chat requests are already queued"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new TooManyRequestsError(
|
|
||||||
"Your IP or user token already has another request in the queue."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shitty hack to remove hpm's event listeners on retried requests
|
// shitty hack to remove hpm's event listeners on retried requests
|
||||||
@@ -146,19 +137,7 @@ export async function reenqueueRequest(req: Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function getQueueForPartition(partition: ModelFamily): Request[] {
|
function getQueueForPartition(partition: ModelFamily): Request[] {
|
||||||
return queue
|
return queue.filter((req) => getModelFamilyForRequest(req) === partition);
|
||||||
.filter((req) => getModelFamilyForRequest(req) === partition)
|
|
||||||
.sort((a, b) => {
|
|
||||||
// Certain requests are exempted from IP-based rate limiting because they
|
|
||||||
// come from a shared IP address. To prevent these requests from starving
|
|
||||||
// out other requests during periods of high traffic, we sort them to the
|
|
||||||
// end of the queue.
|
|
||||||
const aIsExempted = isFromSharedIp(a);
|
|
||||||
const bIsExempted = isFromSharedIp(b);
|
|
||||||
if (aIsExempted && !bIsExempted) return 1;
|
|
||||||
if (!aIsExempted && bIsExempted) return -1;
|
|
||||||
return 0;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function dequeue(partition: ModelFamily): Request | undefined {
|
export function dequeue(partition: ModelFamily): Request | undefined {
|
||||||
@@ -169,7 +148,14 @@ export function dequeue(partition: ModelFamily): Request | undefined {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const req = modelQueue.reduce((prev, curr) =>
|
const req = modelQueue.reduce((prev, curr) =>
|
||||||
prev.startTime < curr.startTime ? prev : curr
|
prev.startTime +
|
||||||
|
config.tokensPunishmentFactor *
|
||||||
|
((prev.promptTokens ?? 0) + (prev.outputTokens ?? 0)) <
|
||||||
|
curr.startTime +
|
||||||
|
config.tokensPunishmentFactor *
|
||||||
|
((curr.promptTokens ?? 0) + (curr.outputTokens ?? 0))
|
||||||
|
? prev
|
||||||
|
: curr
|
||||||
);
|
);
|
||||||
queue.splice(queue.indexOf(req), 1);
|
queue.splice(queue.indexOf(req), 1);
|
||||||
|
|
||||||
@@ -261,7 +247,6 @@ let waitTimes: {
|
|||||||
partition: ModelFamily;
|
partition: ModelFamily;
|
||||||
start: number;
|
start: number;
|
||||||
end: number;
|
end: number;
|
||||||
isDeprioritized: boolean;
|
|
||||||
}[] = [];
|
}[] = [];
|
||||||
|
|
||||||
/** Adds a successful request to the list of wait times. */
|
/** Adds a successful request to the list of wait times. */
|
||||||
@@ -270,7 +255,6 @@ export function trackWaitTime(req: Request) {
|
|||||||
partition: getModelFamilyForRequest(req),
|
partition: getModelFamilyForRequest(req),
|
||||||
start: req.startTime!,
|
start: req.startTime!,
|
||||||
end: req.queueOutTime ?? Date.now(),
|
end: req.queueOutTime ?? Date.now(),
|
||||||
isDeprioritized: isFromSharedIp(req),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,8 +280,7 @@ function calculateWaitTime(partition: ModelFamily) {
|
|||||||
.filter((wait) => {
|
.filter((wait) => {
|
||||||
const isSamePartition = wait.partition === partition;
|
const isSamePartition = wait.partition === partition;
|
||||||
const isRecent = now - wait.end < 300 * 1000;
|
const isRecent = now - wait.end < 300 * 1000;
|
||||||
const isNormalPriority = !wait.isDeprioritized;
|
return isSamePartition && isRecent;
|
||||||
return isSamePartition && isRecent && isNormalPriority;
|
|
||||||
})
|
})
|
||||||
.map((wait) => wait.end - wait.start);
|
.map((wait) => wait.end - wait.start);
|
||||||
const recentAverage = recentWaits.length
|
const recentAverage = recentWaits.length
|
||||||
@@ -311,11 +294,7 @@ function calculateWaitTime(partition: ModelFamily) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const currentWaits = queue
|
const currentWaits = queue
|
||||||
.filter((req) => {
|
.filter((req) => getModelFamilyForRequest(req) === partition)
|
||||||
const isSamePartition = getModelFamilyForRequest(req) === partition;
|
|
||||||
const isNormalPriority = !isFromSharedIp(req);
|
|
||||||
return isSamePartition && isNormalPriority;
|
|
||||||
})
|
|
||||||
.map((req) => now - req.startTime!);
|
.map((req) => now - req.startTime!);
|
||||||
const longestCurrentWait = Math.max(...currentWaits, 0);
|
const longestCurrentWait = Math.max(...currentWaits, 0);
|
||||||
|
|
||||||
@@ -343,26 +322,35 @@ export function getQueueLength(partition: ModelFamily | "all" = "all") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function createQueueMiddleware({
|
export function createQueueMiddleware({
|
||||||
beforeProxy,
|
mutations = [],
|
||||||
proxyMiddleware,
|
proxyMiddleware,
|
||||||
}: {
|
}: {
|
||||||
beforeProxy?: RequestPreprocessor;
|
mutations?: ProxyReqMutator[];
|
||||||
proxyMiddleware: Handler;
|
proxyMiddleware: Handler;
|
||||||
}): Handler {
|
}): Handler {
|
||||||
return async (req, res, next) => {
|
return async (req, res, next) => {
|
||||||
req.proceed = async () => {
|
req.proceed = async () => {
|
||||||
if (beforeProxy) {
|
// canonicalize the stream field which is set in a few places not always
|
||||||
try {
|
// consistently
|
||||||
// Hack to let us run asynchronous middleware before the
|
req.isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||||
// http-proxy-middleware handler. This is used to sign AWS requests
|
req.body.stream = req.isStreaming;
|
||||||
// before they are proxied, as the signing is asynchronous.
|
|
||||||
// Unlike RequestPreprocessors, this runs every time the request is
|
try {
|
||||||
// dequeued, not just the first time.
|
// Just before executing the proxyMiddleware, we will create a
|
||||||
await beforeProxy(req);
|
// ProxyReqManager to track modifications to the request. This allows
|
||||||
} catch (err) {
|
// us to revert those changes if the proxied request fails with a
|
||||||
return handleProxyError(err, req, res);
|
// retryable error. That happens in proxyMiddleware's onProxyRes
|
||||||
|
// handler.
|
||||||
|
const changeManager = new ProxyReqManager(req);
|
||||||
|
req.changeManager = changeManager;
|
||||||
|
for (const mutator of mutations) {
|
||||||
|
await mutator(changeManager);
|
||||||
}
|
}
|
||||||
|
} catch (err) {
|
||||||
|
// Failure during request preparation is a fatal error.
|
||||||
|
return classifyErrorAndSend(err, req, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
proxyMiddleware(req, res, next);
|
proxyMiddleware(req, res, next);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,361 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { addKey, finalizeBody } from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import axios from "axios";
|
||||||
|
import { QwenKey, keyPool } from "../shared/key-management";
|
||||||
|
import {
|
||||||
|
isQwenModel,
|
||||||
|
isQwenThinkingModel,
|
||||||
|
normalizeMessages,
|
||||||
|
isQwen3Model,
|
||||||
|
isThinkingVariant,
|
||||||
|
isNonThinkingVariant,
|
||||||
|
getBaseModelName
|
||||||
|
} from "../shared/api-schemas/qwen";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
|
||||||
|
const log = logger.child({ module: "proxy", service: "qwen" });
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const qwenResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...body, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const getModelsResponse = async () => {
|
||||||
|
// Return cache if less than 1 minute old
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get a Qwen key directly
|
||||||
|
const modelToUse = "qwen-plus"; // Use any Qwen model here - just for key selection
|
||||||
|
const qwenKey = keyPool.get(modelToUse, "qwen") as QwenKey;
|
||||||
|
|
||||||
|
if (!qwenKey || !qwenKey.key) {
|
||||||
|
log.warn("No valid Qwen key available for model listing");
|
||||||
|
throw new Error("No valid Qwen API key available");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch models directly from Qwen API
|
||||||
|
const response = await axios.get("https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${qwenKey.key}`
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.data || !response.data.data) {
|
||||||
|
throw new Error("Unexpected response format from Qwen API");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract models
|
||||||
|
const models = response.data;
|
||||||
|
|
||||||
|
// Ensure we have all known Qwen models in the list
|
||||||
|
const knownQwenModels = [
|
||||||
|
"qwen-max",
|
||||||
|
"qwen-max-latest",
|
||||||
|
"qwen-max-2025-01-25",
|
||||||
|
"qwen-plus",
|
||||||
|
"qwen-plus-latest",
|
||||||
|
"qwen-plus-2025-01-25",
|
||||||
|
"qwen-turbo",
|
||||||
|
"qwen-turbo-latest",
|
||||||
|
"qwen-turbo-2024-11-01",
|
||||||
|
"qwen3-235b-a22b",
|
||||||
|
"qwen3-32b",
|
||||||
|
"qwen3-30b-a3b"
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add thinking capability flag to models that support it
|
||||||
|
if (models.data && Array.isArray(models.data)) {
|
||||||
|
// Create a set of existing model IDs for quick lookup
|
||||||
|
const existingModelIds = new Set(models.data.map((model: any) => model.id));
|
||||||
|
|
||||||
|
// Filter out base Qwen3 models since we'll add variants instead
|
||||||
|
models.data = models.data.filter((model: any) => {
|
||||||
|
return !isQwen3Model(model.id) || isThinkingVariant(model.id) || isNonThinkingVariant(model.id);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add any missing models from our known list
|
||||||
|
knownQwenModels.forEach(modelId => {
|
||||||
|
if (!existingModelIds.has(modelId)) {
|
||||||
|
models.data.push({
|
||||||
|
id: modelId,
|
||||||
|
object: "model",
|
||||||
|
created: Date.now(),
|
||||||
|
owned_by: "qwen",
|
||||||
|
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add thinking capability flag to existing models
|
||||||
|
const processedModelIds = new Set();
|
||||||
|
const originalModelsData = [...models.data];
|
||||||
|
|
||||||
|
models.data = originalModelsData.flatMap((model: any) => {
|
||||||
|
const modelId = model.id;
|
||||||
|
processedModelIds.add(modelId);
|
||||||
|
|
||||||
|
// Apply capabilities to all models
|
||||||
|
if (isQwenThinkingModel(modelId)) {
|
||||||
|
model.capabilities = model.capabilities || {};
|
||||||
|
model.capabilities.thinking = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For Qwen3 models, add thinking and non-thinking variants, but not the original
|
||||||
|
if (isQwen3Model(modelId) &&
|
||||||
|
!isThinkingVariant(modelId) &&
|
||||||
|
!isNonThinkingVariant(modelId)) {
|
||||||
|
|
||||||
|
// Create thinking variant
|
||||||
|
const thinkingModel = {
|
||||||
|
id: `${modelId}-thinking`,
|
||||||
|
object: "model",
|
||||||
|
created: model.created || Date.now(),
|
||||||
|
owned_by: model.owned_by || "qwen",
|
||||||
|
capabilities: { thinking: true },
|
||||||
|
proxy_managed: true,
|
||||||
|
display_name: `${model.display_name || modelId} (Thinking Mode)`
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create non-thinking variant
|
||||||
|
const nonThinkingModel = {
|
||||||
|
id: `${modelId}-nonthinking`,
|
||||||
|
object: "model",
|
||||||
|
created: model.created || Date.now(),
|
||||||
|
owned_by: model.owned_by || "qwen",
|
||||||
|
capabilities: { thinking: true },
|
||||||
|
proxy_managed: true,
|
||||||
|
display_name: `${model.display_name || modelId} (Standard Mode)`
|
||||||
|
};
|
||||||
|
|
||||||
|
// Only add variants, not the original model
|
||||||
|
return [thinkingModel, nonThinkingModel];
|
||||||
|
}
|
||||||
|
|
||||||
|
return [model];
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// If the API response didn't include models, create our own list
|
||||||
|
models.data = knownQwenModels.flatMap(modelId => {
|
||||||
|
// For Qwen3 models, add only thinking and non-thinking variants (not the base model)
|
||||||
|
if (isQwen3Model(modelId) &&
|
||||||
|
!isThinkingVariant(modelId) &&
|
||||||
|
!isNonThinkingVariant(modelId)) {
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
id: `${modelId}-thinking`,
|
||||||
|
object: "model",
|
||||||
|
created: Date.now(),
|
||||||
|
owned_by: "qwen",
|
||||||
|
capabilities: { thinking: true },
|
||||||
|
proxy_managed: true,
|
||||||
|
display_name: `${modelId} (Thinking Mode)`
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: `${modelId}-nonthinking`,
|
||||||
|
object: "model",
|
||||||
|
created: Date.now(),
|
||||||
|
owned_by: "qwen",
|
||||||
|
capabilities: { thinking: true },
|
||||||
|
proxy_managed: true,
|
||||||
|
display_name: `${modelId} (Standard Mode)`
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
// For non-Qwen3 models, return the base model
|
||||||
|
const baseModel = {
|
||||||
|
id: modelId,
|
||||||
|
object: "model",
|
||||||
|
created: Date.now(),
|
||||||
|
owned_by: "qwen",
|
||||||
|
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
|
||||||
|
};
|
||||||
|
|
||||||
|
return [baseModel];
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug({ modelCount: models.data?.length }, "Retrieved models from Qwen API");
|
||||||
|
|
||||||
|
// Cache the response
|
||||||
|
modelsCache = models;
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return models;
|
||||||
|
} catch (error) {
|
||||||
|
// Provide detailed logging for better troubleshooting
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error fetching Qwen models"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error fetching Qwen models");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return empty list as fallback
|
||||||
|
return {
|
||||||
|
object: "list",
|
||||||
|
data: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const models = await getModelsResponse();
|
||||||
|
res.status(200).json(models);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
log.error(
|
||||||
|
{ errorMessage: error.message, stack: error.stack },
|
||||||
|
"Error handling model request"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error({ error }, "Unknown error handling model request");
|
||||||
|
}
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Function to prepare messages for Qwen API
|
||||||
|
function prepareMessages(req: Request) {
|
||||||
|
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
req.body.messages = normalizeMessages(req.body.messages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to handle thinking capability for Qwen models
|
||||||
|
function handleThinkingCapability(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// Special handling for our proxy-managed variants
|
||||||
|
if (isThinkingVariant(model)) {
|
||||||
|
// Set the base model name without the suffix
|
||||||
|
req.body.model = getBaseModelName(model);
|
||||||
|
// Force enable thinking for the -thinking variant
|
||||||
|
req.body.enable_thinking = true;
|
||||||
|
|
||||||
|
// Log the transformation
|
||||||
|
log.debug(
|
||||||
|
{ originalModel: model, transformedModel: req.body.model, enableThinking: true },
|
||||||
|
"Transformed request for thinking variant"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isNonThinkingVariant(model)) {
|
||||||
|
// Set the base model name without the suffix
|
||||||
|
req.body.model = getBaseModelName(model);
|
||||||
|
// Force disable thinking for the -nonthinking variant
|
||||||
|
req.body.enable_thinking = false;
|
||||||
|
|
||||||
|
// Log the transformation
|
||||||
|
log.debug(
|
||||||
|
{ originalModel: model, transformedModel: req.body.model, enableThinking: false },
|
||||||
|
"Transformed request for non-thinking variant"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For standard models with thinking capability
|
||||||
|
if (isQwenThinkingModel(model) && req.body.stream === true) {
|
||||||
|
// Only add enable_thinking if it's not already set
|
||||||
|
if (req.body.enable_thinking === undefined) {
|
||||||
|
req.body.enable_thinking = false; // Default to false, let users explicitly enable it
|
||||||
|
}
|
||||||
|
|
||||||
|
// If thinking_budget is provided but enable_thinking is false, enable thinking
|
||||||
|
if (req.body.thinking_budget !== undefined && req.body.enable_thinking === false) {
|
||||||
|
req.body.enable_thinking = true;
|
||||||
|
}
|
||||||
|
} else if (isQwenThinkingModel(model) && req.body.stream !== true) {
|
||||||
|
// For non-streaming requests with thinking-capable models, always disable thinking
|
||||||
|
req.body.enable_thinking = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to remove parameters not supported by Qwen models
|
||||||
|
function removeUnsupportedParameters(req: Request) {
|
||||||
|
// Remove parameters that Qwen doesn't support
|
||||||
|
if (req.body.logit_bias !== undefined) {
|
||||||
|
delete req.body.logit_bias;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.body.top_logprobs !== undefined) {
|
||||||
|
delete req.body.top_logprobs;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Logging for debugging
|
||||||
|
if (process.env.NODE_ENV !== 'production') {
|
||||||
|
log.debug({ body: req.body }, "Request after parameter cleanup");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up count token functionality for Qwen models
|
||||||
|
function countQwenTokens(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
if (isQwenModel(model)) {
|
||||||
|
// Count tokens using prompt tokens (simplified)
|
||||||
|
if (req.promptTokens) {
|
||||||
|
req.log.debug(
|
||||||
|
{ tokens: req.promptTokens },
|
||||||
|
"Estimated token count for Qwen prompt"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const qwenProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [
|
||||||
|
addKey,
|
||||||
|
finalizeBody
|
||||||
|
],
|
||||||
|
target: "https://dashscope-intl.aliyuncs.com/compatible-mode",
|
||||||
|
blockingResponseHandler: qwenResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const qwenRouter = Router();
|
||||||
|
|
||||||
|
qwenRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "qwen" },
|
||||||
|
{ afterTransform: [ prepareMessages, handleThinkingCapability, removeUnsupportedParameters, countQwenTokens ] }
|
||||||
|
),
|
||||||
|
qwenProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
qwenRouter.post(
|
||||||
|
"/v1/embeddings",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "qwen" },
|
||||||
|
{ afterTransform: [] }
|
||||||
|
),
|
||||||
|
qwenProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
qwenRouter.get("/v1/models", handleModelRequest);
|
||||||
|
|
||||||
|
export const qwen = qwenRouter;
|
||||||
+15
-32
@@ -1,14 +1,6 @@
|
|||||||
import { Request, Response, NextFunction } from "express";
|
import { Request, Response, NextFunction } from "express";
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
|
|
||||||
export const SHARED_IP_ADDRESSES = new Set([
|
|
||||||
// Agnai.chat
|
|
||||||
"157.230.249.32", // old
|
|
||||||
"157.245.148.56",
|
|
||||||
"174.138.29.50",
|
|
||||||
"209.97.162.44",
|
|
||||||
]);
|
|
||||||
|
|
||||||
const ONE_MINUTE_MS = 60 * 1000;
|
const ONE_MINUTE_MS = 60 * 1000;
|
||||||
|
|
||||||
type Timestamp = number;
|
type Timestamp = number;
|
||||||
@@ -20,7 +12,10 @@ const exemptedRequests: Timestamp[] = [];
|
|||||||
const isRecentAttempt = (now: Timestamp) => (attempt: Timestamp) =>
|
const isRecentAttempt = (now: Timestamp) => (attempt: Timestamp) =>
|
||||||
attempt > now - ONE_MINUTE_MS;
|
attempt > now - ONE_MINUTE_MS;
|
||||||
|
|
||||||
const getTryAgainInMs = (ip: string, type: "text" | "image") => {
|
/**
|
||||||
|
* Returns duration in seconds to wait before retrying for Retry-After header.
|
||||||
|
*/
|
||||||
|
const getRetryAfter = (ip: string, type: "text" | "image") => {
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
const attempts = lastAttempts.get(ip) || [];
|
const attempts = lastAttempts.get(ip) || [];
|
||||||
const validAttempts = attempts.filter(isRecentAttempt(now));
|
const validAttempts = attempts.filter(isRecentAttempt(now));
|
||||||
@@ -29,7 +24,7 @@ const getTryAgainInMs = (ip: string, type: "text" | "image") => {
|
|||||||
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
|
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
|
||||||
|
|
||||||
if (validAttempts.length >= limit) {
|
if (validAttempts.length >= limit) {
|
||||||
return validAttempts[0] - now + ONE_MINUTE_MS;
|
return (validAttempts[0] - now + ONE_MINUTE_MS) / 1000;
|
||||||
} else {
|
} else {
|
||||||
lastAttempts.set(ip, [...validAttempts, now]);
|
lastAttempts.set(ip, [...validAttempts, now]);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -96,22 +91,11 @@ export const ipLimiter = async (
|
|||||||
if (!textLimit && !imageLimit) return next();
|
if (!textLimit && !imageLimit) return next();
|
||||||
if (req.user?.type === "special") return next();
|
if (req.user?.type === "special") return next();
|
||||||
|
|
||||||
// Exempts Agnai.chat from IP-based rate limiting because its IPs are shared
|
const path = req.baseUrl + req.path;
|
||||||
// by many users. Instead, the request queue will limit the number of such
|
const type =
|
||||||
// requests that may wait in the queue at a time, and sorts them to the end to
|
path.includes("openai-image") || path.includes("images/generations")
|
||||||
// let individual users go first.
|
? "image"
|
||||||
if (SHARED_IP_ADDRESSES.has(req.ip)) {
|
: "text";
|
||||||
exemptedRequests.push(Date.now());
|
|
||||||
req.log.info(
|
|
||||||
{ ip: req.ip, recentExemptions: exemptedRequests.length },
|
|
||||||
"Exempting Agnai request from rate limiting."
|
|
||||||
);
|
|
||||||
return next();
|
|
||||||
}
|
|
||||||
|
|
||||||
const type = (req.baseUrl + req.path).includes("openai-image")
|
|
||||||
? "image"
|
|
||||||
: "text";
|
|
||||||
const limit = type === "image" ? imageLimit : textLimit;
|
const limit = type === "image" ? imageLimit : textLimit;
|
||||||
|
|
||||||
// If user is authenticated, key rate limiting by their token. Otherwise, key
|
// If user is authenticated, key rate limiting by their token. Otherwise, key
|
||||||
@@ -123,15 +107,14 @@ export const ipLimiter = async (
|
|||||||
res.set("X-RateLimit-Remaining", remaining.toString());
|
res.set("X-RateLimit-Remaining", remaining.toString());
|
||||||
res.set("X-RateLimit-Reset", reset.toString());
|
res.set("X-RateLimit-Reset", reset.toString());
|
||||||
|
|
||||||
const tryAgainInMs = getTryAgainInMs(rateLimitKey, type);
|
const retryAfterTime = getRetryAfter(rateLimitKey, type);
|
||||||
if (tryAgainInMs > 0) {
|
if (retryAfterTime > 0) {
|
||||||
res.set("Retry-After", tryAgainInMs.toString());
|
const waitSec = Math.ceil(retryAfterTime).toString();
|
||||||
|
res.set("Retry-After", waitSec);
|
||||||
res.status(429).json({
|
res.status(429).json({
|
||||||
error: {
|
error: {
|
||||||
type: "proxy_rate_limited",
|
type: "proxy_rate_limited",
|
||||||
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${Math.ceil(
|
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${waitSec} seconds.`,
|
||||||
tryAgainInMs / 1000
|
|
||||||
)} seconds.`,
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
+35
-19
@@ -1,42 +1,65 @@
|
|||||||
import express, { Request, Response, NextFunction } from "express";
|
import express from "express";
|
||||||
import { gatekeeper } from "./gatekeeper";
|
import { addV1 } from "./add-v1";
|
||||||
import { checkRisuToken } from "./check-risu-token";
|
|
||||||
import { openai } from "./openai";
|
|
||||||
import { openaiImage } from "./openai-image";
|
|
||||||
import { anthropic } from "./anthropic";
|
import { anthropic } from "./anthropic";
|
||||||
import { googleAI } from "./google-ai";
|
|
||||||
import { mistralAI } from "./mistral-ai";
|
|
||||||
import { aws } from "./aws";
|
import { aws } from "./aws";
|
||||||
import { azure } from "./azure";
|
import { azure } from "./azure";
|
||||||
|
import { checkRisuToken } from "./check-risu-token";
|
||||||
|
import { gatekeeper } from "./gatekeeper";
|
||||||
|
import { gcp } from "./gcp";
|
||||||
|
import { googleAI } from "./google-ai";
|
||||||
|
import { mistralAI } from "./mistral-ai";
|
||||||
|
import { openai } from "./openai";
|
||||||
|
import { openaiImage } from "./openai-image";
|
||||||
|
import { deepseek } from "./deepseek";
|
||||||
|
import { xai } from "./xai";
|
||||||
|
import { cohere } from "./cohere";
|
||||||
|
import { qwen } from "./qwen";
|
||||||
|
import { moonshot } from "./moonshot";
|
||||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||||
|
|
||||||
const proxyRouter = express.Router();
|
const proxyRouter = express.Router();
|
||||||
|
|
||||||
|
// Remove `expect: 100-continue` header from requests due to incompatibility
|
||||||
|
// with node-http-proxy.
|
||||||
proxyRouter.use((req, _res, next) => {
|
proxyRouter.use((req, _res, next) => {
|
||||||
if (req.headers.expect) {
|
if (req.headers.expect) {
|
||||||
// node-http-proxy does not like it when clients send `expect: 100-continue`
|
|
||||||
// and will stall. none of the upstream APIs use this header anyway.
|
|
||||||
delete req.headers.expect;
|
delete req.headers.expect;
|
||||||
}
|
}
|
||||||
next();
|
next();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Apply body parsers.
|
||||||
proxyRouter.use(
|
proxyRouter.use(
|
||||||
express.json({ limit: "100mb" }),
|
express.json({ limit: "100mb" }),
|
||||||
express.urlencoded({ extended: true, limit: "100mb" })
|
express.urlencoded({ extended: true, limit: "100mb" })
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Apply auth/rate limits.
|
||||||
proxyRouter.use(gatekeeper);
|
proxyRouter.use(gatekeeper);
|
||||||
proxyRouter.use(checkRisuToken);
|
proxyRouter.use(checkRisuToken);
|
||||||
|
|
||||||
|
// Initialize request queue metadata.
|
||||||
proxyRouter.use((req, _res, next) => {
|
proxyRouter.use((req, _res, next) => {
|
||||||
req.startTime = Date.now();
|
req.startTime = Date.now();
|
||||||
req.retryCount = 0;
|
req.retryCount = 0;
|
||||||
next();
|
next();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Proxy endpoints.
|
||||||
proxyRouter.use("/openai", addV1, openai);
|
proxyRouter.use("/openai", addV1, openai);
|
||||||
proxyRouter.use("/openai-image", addV1, openaiImage);
|
proxyRouter.use("/openai-image", addV1, openaiImage);
|
||||||
proxyRouter.use("/anthropic", addV1, anthropic);
|
proxyRouter.use("/anthropic", addV1, anthropic);
|
||||||
proxyRouter.use("/google-ai", addV1, googleAI);
|
proxyRouter.use("/google-ai", addV1, googleAI);
|
||||||
proxyRouter.use("/mistral-ai", addV1, mistralAI);
|
proxyRouter.use("/mistral-ai", addV1, mistralAI);
|
||||||
proxyRouter.use("/aws/claude", addV1, aws);
|
proxyRouter.use("/aws", aws);
|
||||||
|
proxyRouter.use("/gcp/claude", addV1, gcp);
|
||||||
proxyRouter.use("/azure/openai", addV1, azure);
|
proxyRouter.use("/azure/openai", addV1, azure);
|
||||||
|
proxyRouter.use("/deepseek", addV1, deepseek);
|
||||||
|
proxyRouter.use("/xai", addV1, xai);
|
||||||
|
proxyRouter.use("/cohere", addV1, cohere);
|
||||||
|
proxyRouter.use("/qwen", addV1, qwen);
|
||||||
|
proxyRouter.use("/moonshot", addV1, moonshot);
|
||||||
|
|
||||||
// Redirect browser requests to the homepage.
|
// Redirect browser requests to the homepage.
|
||||||
proxyRouter.get("*", (req, res, next) => {
|
proxyRouter.get("*", (req, res, next) => {
|
||||||
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
|
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
|
||||||
@@ -46,7 +69,8 @@ proxyRouter.get("*", (req, res, next) => {
|
|||||||
next();
|
next();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Handle 404s.
|
|
||||||
|
// Send a fake client error if user specifies an invalid proxy endpoint.
|
||||||
proxyRouter.use((req, res) => {
|
proxyRouter.use((req, res) => {
|
||||||
sendErrorToClient({
|
sendErrorToClient({
|
||||||
req,
|
req,
|
||||||
@@ -67,11 +91,3 @@ proxyRouter.use((req, res) => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
export { proxyRouter as proxyRouter };
|
export { proxyRouter as proxyRouter };
|
||||||
|
|
||||||
function addV1(req: Request, res: Response, next: NextFunction) {
|
|
||||||
// Clients don't consistently use the /v1 prefix so we'll add it for them.
|
|
||||||
if (!req.path.startsWith("/v1/")) {
|
|
||||||
req.url = `/v1${req.url}`;
|
|
||||||
}
|
|
||||||
next();
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -0,0 +1,394 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||||
|
import { addKey, finalizeBody } from "./middleware/request";
|
||||||
|
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||||
|
import axios from "axios";
|
||||||
|
import { XaiKey, keyPool } from "../shared/key-management";
|
||||||
|
import { isGrokVisionModel, isGrokImageGenModel, isGrokReasoningModel, isGrokReasoningEffortModel, isGrokReasoningContentModel } from "../shared/api-schemas/xai";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const xaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve the original body (including potential reasoning_content) for grok-3-mini models
|
||||||
|
// which support the reasoning feature
|
||||||
|
let newBody = body;
|
||||||
|
|
||||||
|
// Check if this is an image generation response (data array with url or b64_json)
|
||||||
|
if (body.data && Array.isArray(body.data)) {
|
||||||
|
req.log.debug(
|
||||||
|
{ imageCount: body.data.length },
|
||||||
|
"Grok image generation response detected"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Transform the image generation response into a chat completion format
|
||||||
|
// that SillyTavern can display
|
||||||
|
const images = body.data;
|
||||||
|
|
||||||
|
// Create a chat completion style response
|
||||||
|
newBody = {
|
||||||
|
id: `grok-image-${Date.now()}`,
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Math.floor(Date.now() / 1000),
|
||||||
|
model: req.body.model,
|
||||||
|
choices: images.map((image, index) => {
|
||||||
|
// Create markdown image content for each generated image
|
||||||
|
let content = '';
|
||||||
|
|
||||||
|
// Add the image using data URL for b64_json
|
||||||
|
if (image.b64_json) {
|
||||||
|
// If it doesn't start with data:image/, add the prefix
|
||||||
|
const imgData = image.b64_json.startsWith('data:image/')
|
||||||
|
? image.b64_json
|
||||||
|
: `data:image/jpeg;base64,${image.b64_json}`;
|
||||||
|
|
||||||
|
content = ``;
|
||||||
|
}
|
||||||
|
// Fall back to URL if b64_json isn't available
|
||||||
|
else if (image.url) {
|
||||||
|
content = ``;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
index,
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content
|
||||||
|
},
|
||||||
|
finish_reason: "stop"
|
||||||
|
};
|
||||||
|
}),
|
||||||
|
usage: body.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
|
||||||
|
};
|
||||||
|
|
||||||
|
req.log.debug("Transformed image generation response to chat format");
|
||||||
|
}
|
||||||
|
// Check if this is a chat completion response with choices
|
||||||
|
else if (body.choices && Array.isArray(body.choices) && body.choices.length > 0) {
|
||||||
|
// Make sure each choice's message is preserved, especially reasoning_content
|
||||||
|
// Only grok-3-mini models return reasoning_content
|
||||||
|
const model = req.body.model;
|
||||||
|
if (isGrokReasoningContentModel(model)) {
|
||||||
|
body.choices.forEach(choice => {
|
||||||
|
if (choice.message && choice.message.reasoning_content) {
|
||||||
|
req.log.debug(
|
||||||
|
{ reasoning_length: choice.message.reasoning_content.length },
|
||||||
|
"Grok reasoning content detected"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||||
|
};
|
||||||
|
|
||||||
|
const getModelsResponse = async () => {
|
||||||
|
// Return cache if less than 1 minute old
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get an XAI key directly using keyPool.get()
|
||||||
|
const modelToUse = "grok-3"; // Use any XAI model here - just for key selection
|
||||||
|
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
|
||||||
|
|
||||||
|
if (!xaiKey || !xaiKey.key) {
|
||||||
|
throw new Error("Failed to get valid XAI key");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch models from XAI API with authorization
|
||||||
|
const response = await axios.get("https://api.x.ai/v1/models", {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${xaiKey.key}`
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// If successful, update the cache
|
||||||
|
if (response.data && response.data.data) {
|
||||||
|
modelsCache = {
|
||||||
|
object: "list",
|
||||||
|
data: response.data.data.map((model: any) => ({
|
||||||
|
id: model.id,
|
||||||
|
object: "model",
|
||||||
|
owned_by: "xai",
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
throw new Error("Unexpected response format from XAI API");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error fetching XAI models:", error);
|
||||||
|
throw error; // No fallback - error will be passed to caller
|
||||||
|
}
|
||||||
|
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
return modelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||||
|
try {
|
||||||
|
const modelsResponse = await getModelsResponse();
|
||||||
|
res.status(200).json(modelsResponse);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error in handleModelRequest:", error);
|
||||||
|
res.status(500).json({ error: "Failed to fetch models" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const xaiProxy = createQueuedProxyMiddleware({
|
||||||
|
mutations: [addKey, finalizeBody],
|
||||||
|
target: "https://api.x.ai",
|
||||||
|
blockingResponseHandler: xaiResponseHandler,
|
||||||
|
});
|
||||||
|
|
||||||
|
const xaiRouter = Router();
|
||||||
|
|
||||||
|
// combines all the assistant messages at the end of the context and adds the
|
||||||
|
// beta 'prefix' option, makes prefills work the same way they work for Claude
|
||||||
|
function enablePrefill(req: Request) {
|
||||||
|
// If you want to disable
|
||||||
|
if (process.env.NO_XAI_PREFILL) return
|
||||||
|
|
||||||
|
// Skip if no messages (e.g., for image generation requests)
|
||||||
|
if (!req.body.messages || !Array.isArray(req.body.messages)) return;
|
||||||
|
|
||||||
|
const msgs = req.body.messages;
|
||||||
|
if (msgs.length === 0 || msgs.at(-1)?.role !== 'assistant') return;
|
||||||
|
|
||||||
|
let i = msgs.length - 1;
|
||||||
|
let content = '';
|
||||||
|
|
||||||
|
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||||
|
// maybe we should also add a newline between messages? no for now.
|
||||||
|
content = msgs[i--].content + content;
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to redirect image model requests to the image generations endpoint
|
||||||
|
function redirectImageRequests(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// If this is an image generation model but the endpoint is chat/completions,
|
||||||
|
// we need to transform the request to match the image generations endpoint format
|
||||||
|
if (isGrokImageGenModel(model) && req.path === "/v1/chat/completions") {
|
||||||
|
req.log.info(`Redirecting ${model} request to /v1/images/generations endpoint`);
|
||||||
|
|
||||||
|
// Save original URL and path for later
|
||||||
|
const originalUrl = req.url;
|
||||||
|
const originalPath = req.path;
|
||||||
|
|
||||||
|
// Change the request URL and path to the images endpoint
|
||||||
|
req.url = req.url.replace("/v1/chat/completions", "/v1/images/generations");
|
||||||
|
Object.defineProperty(req, 'path', { value: "/v1/images/generations" });
|
||||||
|
|
||||||
|
// Extract the prompt from the messages if present
|
||||||
|
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
// Find the last user message and use its content as the prompt
|
||||||
|
for (let i = req.body.messages.length - 1; i >= 0; i--) {
|
||||||
|
const msg = req.body.messages[i];
|
||||||
|
if (msg.role === 'user') {
|
||||||
|
// Extract text content
|
||||||
|
let prompt = "";
|
||||||
|
if (typeof msg.content === 'string') {
|
||||||
|
prompt = msg.content;
|
||||||
|
} else if (Array.isArray(msg.content)) {
|
||||||
|
// Collect all text content items
|
||||||
|
prompt = msg.content
|
||||||
|
.filter((item: any) => item.type === 'text')
|
||||||
|
.map((item: any) => item.text)
|
||||||
|
.join(" ");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prompt) {
|
||||||
|
// Create a new request body for image generation
|
||||||
|
req.body = {
|
||||||
|
model: model,
|
||||||
|
prompt: prompt,
|
||||||
|
n: req.body.n || 1,
|
||||||
|
response_format: "b64_json", // Always use b64_json for better client compatibility
|
||||||
|
user: req.body.user
|
||||||
|
};
|
||||||
|
req.log.debug({ newBody: req.body }, "Transformed request for image generation");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log transformation
|
||||||
|
req.log.info(`Request transformed from ${originalUrl} to ${req.url}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to remove parameters not supported by X.AI/Grok models and handle special cases
|
||||||
|
function removeUnsupportedParameters(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// Check if this is a reasoning model (grok-3-mini or grok-4-0709)
|
||||||
|
const isReasoningModel = isGrokReasoningModel(model);
|
||||||
|
const isReasoningEffortModel = isGrokReasoningEffortModel(model);
|
||||||
|
|
||||||
|
if (isReasoningModel) {
|
||||||
|
// List of parameters not supported by reasoning models
|
||||||
|
const unsupportedParams = [
|
||||||
|
'presence_penalty',
|
||||||
|
'frequency_penalty',
|
||||||
|
'stop' // stop parameter is not supported by reasoning models
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const param of unsupportedParams) {
|
||||||
|
if (req.body[param] !== undefined) {
|
||||||
|
req.log.info(`Removing unsupported parameter for reasoning model ${model}: ${param}`);
|
||||||
|
delete req.body[param];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle reasoning_effort parameter - only supported by grok-3-mini
|
||||||
|
if (isReasoningEffortModel) {
|
||||||
|
// This is grok-3-mini, handle reasoning_effort
|
||||||
|
if (req.body.reasoning_effort) {
|
||||||
|
// If reasoning_effort is already present in the request, validate it
|
||||||
|
if (!['low', 'medium', 'high'].includes(req.body.reasoning_effort)) {
|
||||||
|
req.log.warn(`Invalid reasoning_effort value: ${req.body.reasoning_effort}, removing it`);
|
||||||
|
delete req.body.reasoning_effort;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Default to low reasoning effort if not specified
|
||||||
|
req.body.reasoning_effort = 'low';
|
||||||
|
req.log.debug(`Setting default reasoning_effort=low for Grok-3-mini model`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is grok-4-0709 or other reasoning model that doesn't support reasoning_effort
|
||||||
|
if (req.body.reasoning_effort !== undefined) {
|
||||||
|
req.log.info(`Removing unsupported reasoning_effort parameter for model ${model}`);
|
||||||
|
delete req.body.reasoning_effort;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling for vision models
|
||||||
|
if (isGrokVisionModel(model)) {
|
||||||
|
req.log.debug(`Detected Grok vision model: ${model}`);
|
||||||
|
|
||||||
|
// Check that messages have proper format for vision models
|
||||||
|
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
req.body.messages.forEach((msg: { content: string | any[] }) => {
|
||||||
|
// If content is a string but the model is vision-capable,
|
||||||
|
// convert it to an array with a single text item for consistency
|
||||||
|
if (typeof msg.content === 'string') {
|
||||||
|
req.log.debug('Converting string content to array format for vision model');
|
||||||
|
msg.content = [{ type: 'text', text: msg.content }];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling for image generation models is handled by separate endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler for image generation requests
|
||||||
|
const handleImageGenerationRequest: RequestHandler = async (req, res) => {
|
||||||
|
try {
|
||||||
|
// Get an XAI key directly for image generation
|
||||||
|
const modelToUse = req.body.model || "grok-2-image"; // Default model
|
||||||
|
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
|
||||||
|
|
||||||
|
if (!xaiKey || !xaiKey.key) {
|
||||||
|
throw new Error("Failed to get valid XAI key for image generation");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward the request to XAI API
|
||||||
|
const response = await axios.post("https://api.x.ai/v1/images/generations", req.body, {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": `Bearer ${xaiKey.key}`
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return the response directly
|
||||||
|
res.status(200).json(response.data);
|
||||||
|
} catch (error) {
|
||||||
|
req.log.error({ error }, "Error in image generation request");
|
||||||
|
// Pass through the error response if available
|
||||||
|
if (error.response && error.response.data) {
|
||||||
|
res.status(error.response.status || 500).json(error.response.data);
|
||||||
|
} else {
|
||||||
|
res.status(500).json({ error: "Failed to generate image", message: error.message });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Set up count token functionality for XAI models
|
||||||
|
function countXaiTokens(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// For vision models, estimate image token usage
|
||||||
|
if (isGrokVisionModel(model) && req.body.messages && Array.isArray(req.body.messages)) {
|
||||||
|
// Initialize image count
|
||||||
|
let imageCount = 0;
|
||||||
|
|
||||||
|
// Count images in the request
|
||||||
|
for (const msg of req.body.messages) {
|
||||||
|
if (Array.isArray(msg.content)) {
|
||||||
|
const imagesInMessage = msg.content.filter(
|
||||||
|
(item: any) => item.type === "image_url"
|
||||||
|
).length;
|
||||||
|
imageCount += imagesInMessage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply token estimations for images
|
||||||
|
// Each image is approximately 1500 tokens based on documentation
|
||||||
|
const TOKENS_PER_IMAGE = 1500;
|
||||||
|
const imageTokens = imageCount * TOKENS_PER_IMAGE;
|
||||||
|
|
||||||
|
if (imageTokens > 0) {
|
||||||
|
req.log.debug(
|
||||||
|
{ imageCount, tokenEstimate: imageTokens },
|
||||||
|
"Estimated token count for Grok vision images"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add the image tokens to the existing token count if available
|
||||||
|
if (req.promptTokens) {
|
||||||
|
req.promptTokens += imageTokens;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
xaiRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai", service: "xai" },
|
||||||
|
{ afterTransform: [ redirectImageRequests, enablePrefill, removeUnsupportedParameters, countXaiTokens ] }
|
||||||
|
),
|
||||||
|
xaiProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add endpoint for image generation
|
||||||
|
xaiRouter.post(
|
||||||
|
"/v1/images/generations",
|
||||||
|
ipLimiter,
|
||||||
|
handleImageGenerationRequest
|
||||||
|
);
|
||||||
|
|
||||||
|
xaiRouter.get("/v1/models", handleModelRequest);
|
||||||
|
|
||||||
|
export const xai = xaiRouter;
|
||||||
+19
-5
@@ -23,6 +23,7 @@ import { init as initTokenizers } from "./shared/tokenization";
|
|||||||
import { checkOrigin } from "./proxy/check-origin";
|
import { checkOrigin } from "./proxy/check-origin";
|
||||||
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
|
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
|
||||||
import { initializeDatabase, getDatabase } from "./shared/database";
|
import { initializeDatabase, getDatabase } from "./shared/database";
|
||||||
|
import { initializeFirebase } from "./shared/firebase";
|
||||||
|
|
||||||
const PORT = config.port;
|
const PORT = config.port;
|
||||||
const BIND_ADDRESS = config.bindAddress;
|
const BIND_ADDRESS = config.bindAddress;
|
||||||
@@ -49,6 +50,7 @@ app.use(
|
|||||||
// Don't log the prompt text on transform errors
|
// Don't log the prompt text on transform errors
|
||||||
"body.messages",
|
"body.messages",
|
||||||
"body.prompt",
|
"body.prompt",
|
||||||
|
"body.contents",
|
||||||
],
|
],
|
||||||
censor: "********",
|
censor: "********",
|
||||||
},
|
},
|
||||||
@@ -87,6 +89,15 @@ app.use(blacklist);
|
|||||||
app.use(checkOrigin);
|
app.use(checkOrigin);
|
||||||
|
|
||||||
app.use("/admin", adminRouter);
|
app.use("/admin", adminRouter);
|
||||||
|
app.use((req, _, next) => {
|
||||||
|
// For whatever reason SillyTavern just ignores the path a user provides
|
||||||
|
// when using Google AI with reverse proxy. We'll fix it here.
|
||||||
|
if (req.path.match(/^\/v1(alpha|beta)\/models(\/|$)/)) {
|
||||||
|
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
next();
|
||||||
|
});
|
||||||
app.use(config.proxyEndpointRoute, proxyRouter);
|
app.use(config.proxyEndpointRoute, proxyRouter);
|
||||||
app.use("/user", userRouter);
|
app.use("/user", userRouter);
|
||||||
if (config.staticServiceInfo) {
|
if (config.staticServiceInfo) {
|
||||||
@@ -127,6 +138,12 @@ async function start() {
|
|||||||
logger.info("Checking configs and external dependencies...");
|
logger.info("Checking configs and external dependencies...");
|
||||||
await assertConfigIsValid();
|
await assertConfigIsValid();
|
||||||
|
|
||||||
|
if (config.gatekeeperStore.startsWith("firebase")) {
|
||||||
|
logger.info("Testing Firebase connection...");
|
||||||
|
await initializeFirebase();
|
||||||
|
logger.info("Firebase connection successful.");
|
||||||
|
}
|
||||||
|
|
||||||
keyPool.init();
|
keyPool.init();
|
||||||
|
|
||||||
await initTokenizers();
|
await initTokenizers();
|
||||||
@@ -156,7 +173,7 @@ async function start() {
|
|||||||
app.listen(PORT, BIND_ADDRESS, () => {
|
app.listen(PORT, BIND_ADDRESS, () => {
|
||||||
logger.info(
|
logger.info(
|
||||||
{ port: PORT, interface: BIND_ADDRESS },
|
{ port: PORT, interface: BIND_ADDRESS },
|
||||||
"Now listening for connections."
|
"Server ready to accept connections."
|
||||||
);
|
);
|
||||||
registerUncaughtExceptionHandler();
|
registerUncaughtExceptionHandler();
|
||||||
});
|
});
|
||||||
@@ -179,10 +196,7 @@ function cleanup() {
|
|||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
process.on("exit", () => cleanup());
|
process.on("SIGINT", cleanup);
|
||||||
process.on("SIGHUP", () => process.exit(128 + 1));
|
|
||||||
process.on("SIGINT", () => process.exit(128 + 2));
|
|
||||||
process.on("SIGTERM", () => process.exit(128 + 15));
|
|
||||||
|
|
||||||
function registerUncaughtExceptionHandler() {
|
function registerUncaughtExceptionHandler() {
|
||||||
process.on("uncaughtException", (err: any) => {
|
process.on("uncaughtException", (err: any) => {
|
||||||
|
|||||||
+422
-135
@@ -2,15 +2,20 @@ import { config, listConfig } from "./config";
|
|||||||
import {
|
import {
|
||||||
AnthropicKey,
|
AnthropicKey,
|
||||||
AwsBedrockKey,
|
AwsBedrockKey,
|
||||||
AzureOpenAIKey,
|
DeepseekKey,
|
||||||
GoogleAIKey,
|
GcpKey,
|
||||||
keyPool,
|
keyPool,
|
||||||
OpenAIKey,
|
OpenAIKey,
|
||||||
|
XaiKey,
|
||||||
|
CohereKey,
|
||||||
|
QwenKey,
|
||||||
|
MoonshotKey,
|
||||||
} from "./shared/key-management";
|
} from "./shared/key-management";
|
||||||
import {
|
import {
|
||||||
AnthropicModelFamily,
|
AnthropicModelFamily,
|
||||||
assertIsKnownModelFamily,
|
assertIsKnownModelFamily,
|
||||||
AwsBedrockModelFamily,
|
AwsBedrockModelFamily,
|
||||||
|
GcpModelFamily,
|
||||||
AzureOpenAIModelFamily,
|
AzureOpenAIModelFamily,
|
||||||
GoogleAIModelFamily,
|
GoogleAIModelFamily,
|
||||||
LLM_SERVICES,
|
LLM_SERVICES,
|
||||||
@@ -19,27 +24,117 @@ import {
|
|||||||
MODEL_FAMILY_SERVICE,
|
MODEL_FAMILY_SERVICE,
|
||||||
ModelFamily,
|
ModelFamily,
|
||||||
OpenAIModelFamily,
|
OpenAIModelFamily,
|
||||||
|
DeepseekModelFamily,
|
||||||
|
XaiModelFamily,
|
||||||
|
CohereModelFamily,
|
||||||
|
QwenModelFamily,
|
||||||
|
MoonshotModelFamily,
|
||||||
} from "./shared/models";
|
} from "./shared/models";
|
||||||
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
|
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
|
||||||
import { getUniqueIps } from "./proxy/rate-limit";
|
import { getUniqueIps } from "./proxy/rate-limit";
|
||||||
import { assertNever } from "./shared/utils";
|
import { assertNever } from "./shared/utils";
|
||||||
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
|
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
|
||||||
import { MistralAIKey } from "./shared/key-management/mistral-ai/provider";
|
|
||||||
|
|
||||||
const CACHE_TTL = 2000;
|
const CACHE_TTL = 2000;
|
||||||
|
|
||||||
|
// Define the preferred order for model families in the service info display
|
||||||
|
// This ensures logical grouping (GPT-4 models together, then GPT-4.1, then GPT-5, etc.)
|
||||||
|
const MODEL_FAMILY_ORDER: ModelFamily[] = [
|
||||||
|
// OpenAI models in logical order
|
||||||
|
"turbo",
|
||||||
|
"gpt4",
|
||||||
|
"gpt4-32k",
|
||||||
|
"gpt4-turbo",
|
||||||
|
"gpt4o",
|
||||||
|
"gpt41",
|
||||||
|
"gpt41-mini",
|
||||||
|
"gpt41-nano",
|
||||||
|
"gpt45",
|
||||||
|
"gpt5",
|
||||||
|
"gpt5-mini",
|
||||||
|
"gpt5-nano",
|
||||||
|
"gpt5-chat-latest",
|
||||||
|
"o1",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-pro",
|
||||||
|
"o3",
|
||||||
|
"o3-mini",
|
||||||
|
"o3-pro",
|
||||||
|
"o4-mini",
|
||||||
|
"codex-mini",
|
||||||
|
"dall-e",
|
||||||
|
"gpt-image",
|
||||||
|
// Azure OpenAI models (same order as OpenAI)
|
||||||
|
"azure-turbo",
|
||||||
|
"azure-gpt4",
|
||||||
|
"azure-gpt4-32k",
|
||||||
|
"azure-gpt4-turbo",
|
||||||
|
"azure-gpt4o",
|
||||||
|
"azure-gpt41",
|
||||||
|
"azure-gpt41-mini",
|
||||||
|
"azure-gpt41-nano",
|
||||||
|
"azure-gpt45",
|
||||||
|
"azure-gpt5",
|
||||||
|
"azure-gpt5-mini",
|
||||||
|
"azure-gpt5-nano",
|
||||||
|
"azure-gpt5-chat-latest",
|
||||||
|
"azure-o1",
|
||||||
|
"azure-o1-mini",
|
||||||
|
"azure-o1-pro",
|
||||||
|
"azure-o3",
|
||||||
|
"azure-o3-mini",
|
||||||
|
"azure-o3-pro",
|
||||||
|
"azure-o4-mini",
|
||||||
|
"azure-codex-mini",
|
||||||
|
"azure-dall-e",
|
||||||
|
"azure-gpt-image",
|
||||||
|
// Anthropic models
|
||||||
|
"claude",
|
||||||
|
"claude-opus",
|
||||||
|
// Google AI models
|
||||||
|
"gemini-flash",
|
||||||
|
"gemini-pro",
|
||||||
|
"gemini-ultra",
|
||||||
|
// Mistral AI models
|
||||||
|
"mistral-tiny",
|
||||||
|
"mistral-small",
|
||||||
|
"mistral-medium",
|
||||||
|
"mistral-large",
|
||||||
|
// AWS Bedrock models
|
||||||
|
"aws-claude",
|
||||||
|
"aws-claude-opus",
|
||||||
|
"aws-mistral-tiny",
|
||||||
|
"aws-mistral-small",
|
||||||
|
"aws-mistral-medium",
|
||||||
|
"aws-mistral-large",
|
||||||
|
// GCP models
|
||||||
|
"gcp-claude",
|
||||||
|
"gcp-claude-opus",
|
||||||
|
// Other services
|
||||||
|
"deepseek",
|
||||||
|
"xai",
|
||||||
|
"cohere",
|
||||||
|
"qwen",
|
||||||
|
"moonshot"
|
||||||
|
];
|
||||||
|
|
||||||
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
|
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
|
||||||
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
|
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
|
||||||
k.service === "openai";
|
k.service === "openai";
|
||||||
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
|
|
||||||
k.service === "azure";
|
|
||||||
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
|
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
|
||||||
k.service === "anthropic";
|
k.service === "anthropic";
|
||||||
const keyIsGoogleAIKey = (k: KeyPoolKey): k is GoogleAIKey =>
|
|
||||||
k.service === "google-ai";
|
|
||||||
const keyIsMistralAIKey = (k: KeyPoolKey): k is MistralAIKey =>
|
|
||||||
k.service === "mistral-ai";
|
|
||||||
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
|
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
|
||||||
|
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
|
||||||
|
const keyIsDeepseekKey = (k: KeyPoolKey): k is DeepseekKey =>
|
||||||
|
k.service === "deepseek";
|
||||||
|
const keyIsXaiKey = (k: KeyPoolKey): k is XaiKey =>
|
||||||
|
k.service === "xai";
|
||||||
|
const keyIsCohereKey = (k: KeyPoolKey): k is CohereKey =>
|
||||||
|
k.service === "cohere";
|
||||||
|
const keyIsQwenKey = (k: KeyPoolKey): k is QwenKey =>
|
||||||
|
k.service === "qwen";
|
||||||
|
const keyIsMoonshotKey = (k: KeyPoolKey): k is MoonshotKey =>
|
||||||
|
k.service === "moonshot";
|
||||||
|
|
||||||
/** Stats aggregated across all keys for a given service. */
|
/** Stats aggregated across all keys for a given service. */
|
||||||
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
|
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
|
||||||
@@ -51,18 +146,31 @@ type ModelAggregates = {
|
|||||||
overQuota?: number;
|
overQuota?: number;
|
||||||
pozzed?: number;
|
pozzed?: number;
|
||||||
awsLogged?: number;
|
awsLogged?: number;
|
||||||
awsSonnet?: number;
|
// needed to disambugiate aws-claude family's variants
|
||||||
awsHaiku?: number;
|
awsClaude2?: number;
|
||||||
|
awsSonnet3?: number;
|
||||||
|
awsSonnet3_5?: number;
|
||||||
|
awsSonnet3_7?: number;
|
||||||
|
awsSonnet4?: number;
|
||||||
|
awsOpus3?: number;
|
||||||
|
awsOpus4?: number;
|
||||||
|
awsHaiku: number;
|
||||||
|
gcpSonnet?: number;
|
||||||
|
gcpSonnet35?: number;
|
||||||
|
gcpHaiku?: number;
|
||||||
queued: number;
|
queued: number;
|
||||||
queueTime: string;
|
inputTokens: number; // Changed from tokens
|
||||||
tokens: number;
|
outputTokens: number; // Added
|
||||||
|
legacyTokens?: number; // Added for migrated totals
|
||||||
};
|
};
|
||||||
/** All possible combinations of model family and aggregate type. */
|
/** All possible combinations of model family and aggregate type. */
|
||||||
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
|
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
|
||||||
|
|
||||||
type AllStats = {
|
type AllStats = {
|
||||||
proompts: number;
|
proompts: number;
|
||||||
tokens: number;
|
inputTokens: number; // Changed from tokens
|
||||||
|
outputTokens: number; // Added
|
||||||
|
legacyTokens?: number; // Added
|
||||||
tokenCost: number;
|
tokenCost: number;
|
||||||
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
|
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
|
||||||
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
|
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
|
||||||
@@ -86,8 +194,10 @@ type AnthropicInfo = BaseFamilyInfo & {
|
|||||||
};
|
};
|
||||||
type AwsInfo = BaseFamilyInfo & {
|
type AwsInfo = BaseFamilyInfo & {
|
||||||
privacy?: string;
|
privacy?: string;
|
||||||
sonnetKeys?: number;
|
enabledVariants?: string;
|
||||||
haikuKeys?: number;
|
};
|
||||||
|
type GcpInfo = BaseFamilyInfo & {
|
||||||
|
enabledVariants?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
// prettier-ignore
|
// prettier-ignore
|
||||||
@@ -95,12 +205,13 @@ export type ServiceInfo = {
|
|||||||
uptime: number;
|
uptime: number;
|
||||||
endpoints: {
|
endpoints: {
|
||||||
openai?: string;
|
openai?: string;
|
||||||
openai2?: string;
|
deepseek?: string;
|
||||||
|
xai?: string;
|
||||||
anthropic?: string;
|
anthropic?: string;
|
||||||
"anthropic-claude-3"?: string;
|
|
||||||
"google-ai"?: string;
|
"google-ai"?: string;
|
||||||
"mistral-ai"?: string;
|
"mistral-ai"?: string;
|
||||||
aws?: string;
|
"aws"?: string;
|
||||||
|
gcp?: string;
|
||||||
azure?: string;
|
azure?: string;
|
||||||
"openai-image"?: string;
|
"openai-image"?: string;
|
||||||
"azure-image"?: string;
|
"azure-image"?: string;
|
||||||
@@ -114,9 +225,15 @@ export type ServiceInfo = {
|
|||||||
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
|
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
|
||||||
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
|
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
|
||||||
& { [f in AwsBedrockModelFamily]?: AwsInfo }
|
& { [f in AwsBedrockModelFamily]?: AwsInfo }
|
||||||
|
& { [f in GcpModelFamily]?: GcpInfo }
|
||||||
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
|
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
|
||||||
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
|
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo & { overQuotaKeys?: number } }
|
||||||
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
|
& { [f in MistralAIModelFamily]?: BaseFamilyInfo }
|
||||||
|
& { [f in DeepseekModelFamily]?: BaseFamilyInfo }
|
||||||
|
& { [f in XaiModelFamily]?: BaseFamilyInfo }
|
||||||
|
& { [f in CohereModelFamily]?: BaseFamilyInfo }
|
||||||
|
& { [f in QwenModelFamily]?: BaseFamilyInfo }
|
||||||
|
& { [f in MoonshotModelFamily]?: BaseFamilyInfo };
|
||||||
|
|
||||||
// https://stackoverflow.com/a/66661477
|
// https://stackoverflow.com/a/66661477
|
||||||
// type DeepKeyOf<T> = (
|
// type DeepKeyOf<T> = (
|
||||||
@@ -136,7 +253,6 @@ export type ServiceInfo = {
|
|||||||
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
||||||
openai: {
|
openai: {
|
||||||
openai: `%BASE%/openai`,
|
openai: `%BASE%/openai`,
|
||||||
openai2: `%BASE%/openai/turbo-instruct`,
|
|
||||||
"openai-image": `%BASE%/openai-image`,
|
"openai-image": `%BASE%/openai-image`,
|
||||||
},
|
},
|
||||||
anthropic: {
|
anthropic: {
|
||||||
@@ -149,15 +265,34 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
|||||||
"mistral-ai": `%BASE%/mistral-ai`,
|
"mistral-ai": `%BASE%/mistral-ai`,
|
||||||
},
|
},
|
||||||
aws: {
|
aws: {
|
||||||
aws: `%BASE%/aws/claude`,
|
"aws-claude": `%BASE%/aws/claude`,
|
||||||
|
"aws-mistral": `%BASE%/aws/mistral`,
|
||||||
|
},
|
||||||
|
gcp: {
|
||||||
|
gcp: `%BASE%/gcp/claude`,
|
||||||
},
|
},
|
||||||
azure: {
|
azure: {
|
||||||
azure: `%BASE%/azure/openai`,
|
azure: `%BASE%/azure/openai`,
|
||||||
"azure-image": `%BASE%/azure/openai`,
|
"azure-image": `%BASE%/azure/openai`,
|
||||||
},
|
},
|
||||||
|
deepseek: {
|
||||||
|
deepseek: `%BASE%/deepseek`,
|
||||||
|
},
|
||||||
|
xai: {
|
||||||
|
xai: `%BASE%/xai`,
|
||||||
|
},
|
||||||
|
cohere: {
|
||||||
|
cohere: `%BASE%/cohere`,
|
||||||
|
},
|
||||||
|
qwen: {
|
||||||
|
qwen: `%BASE%/qwen`,
|
||||||
|
},
|
||||||
|
moonshot: {
|
||||||
|
moonshot: `%BASE%/moonshot`,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const modelStats = new Map<ModelAggregateKey, number>();
|
const familyStats = new Map<ModelAggregateKey, number>();
|
||||||
const serviceStats = new Map<keyof AllStats, number>();
|
const serviceStats = new Map<keyof AllStats, number>();
|
||||||
|
|
||||||
let cachedInfo: ServiceInfo | undefined;
|
let cachedInfo: ServiceInfo | undefined;
|
||||||
@@ -174,7 +309,7 @@ export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
|
|||||||
.concat("turbo")
|
.concat("turbo")
|
||||||
);
|
);
|
||||||
|
|
||||||
modelStats.clear();
|
familyStats.clear();
|
||||||
serviceStats.clear();
|
serviceStats.clear();
|
||||||
keys.forEach(addKeyToAggregates);
|
keys.forEach(addKeyToAggregates);
|
||||||
|
|
||||||
@@ -246,11 +381,14 @@ function getEndpoints(baseUrl: string, accessibleFamilies: Set<ModelFamily>) {
|
|||||||
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
|
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
|
||||||
|
|
||||||
function getTrafficStats(): TrafficStats {
|
function getTrafficStats(): TrafficStats {
|
||||||
const tokens = serviceStats.get("tokens") || 0;
|
const inputTokens = serviceStats.get("inputTokens") || 0;
|
||||||
|
const outputTokens = serviceStats.get("outputTokens") || 0;
|
||||||
|
// const legacyTokens = serviceStats.get("legacyTokens") || 0; // Optional: include in total if desired
|
||||||
|
const totalTokens = inputTokens + outputTokens; // + legacyTokens;
|
||||||
const tokenCost = serviceStats.get("tokenCost") || 0;
|
const tokenCost = serviceStats.get("tokenCost") || 0;
|
||||||
return {
|
return {
|
||||||
proompts: serviceStats.get("proompts") || 0,
|
proompts: serviceStats.get("proompts") || 0,
|
||||||
tookens: `${prettyTokens(tokens)}${getCostSuffix(tokenCost)}`,
|
tookens: `${prettyTokens(totalTokens)}${getCostSuffix(tokenCost)}`, // Simplified to show aggregate and cost
|
||||||
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
|
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -266,16 +404,18 @@ function getServiceModelStats(accessibleFamilies: Set<ModelFamily>) {
|
|||||||
if (!hasKeys) continue;
|
if (!hasKeys) continue;
|
||||||
|
|
||||||
serviceInfo[`${service}Keys`] = hasKeys;
|
serviceInfo[`${service}Keys`] = hasKeys;
|
||||||
accessibleFamilies.forEach((f) => {
|
|
||||||
if (MODEL_FAMILY_SERVICE[f] === service) {
|
|
||||||
modelFamilyInfo[f] = getInfoForFamily(f);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (service === "openai" && config.checkKeys) {
|
if (service === "openai" && config.checkKeys) {
|
||||||
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
|
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Build model family info in the defined order for logical grouping
|
||||||
|
for (const family of MODEL_FAMILY_ORDER) {
|
||||||
|
if (accessibleFamilies.has(family)) {
|
||||||
|
modelFamilyInfo[family] = getInfoForFamily(family);
|
||||||
|
}
|
||||||
|
}
|
||||||
return { serviceInfo, modelFamilyInfo };
|
return { serviceInfo, modelFamilyInfo };
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,131 +433,229 @@ function increment<T extends keyof AllStats | ModelAggregateKey>(
|
|||||||
) {
|
) {
|
||||||
map.set(key, (map.get(key) || 0) + delta);
|
map.set(key, (map.get(key) || 0) + delta);
|
||||||
}
|
}
|
||||||
|
const addToService = increment.bind(null, serviceStats);
|
||||||
|
const addToFamily = increment.bind(null, familyStats);
|
||||||
|
|
||||||
function addKeyToAggregates(k: KeyPoolKey) {
|
function addKeyToAggregates(k: KeyPoolKey) {
|
||||||
increment(serviceStats, "proompts", k.promptCount);
|
addToService("proompts", k.promptCount);
|
||||||
increment(serviceStats, "openai__keys", k.service === "openai" ? 1 : 0);
|
addToService("openai__keys", k.service === "openai" ? 1 : 0);
|
||||||
increment(serviceStats, "anthropic__keys", k.service === "anthropic" ? 1 : 0);
|
addToService("anthropic__keys", k.service === "anthropic" ? 1 : 0);
|
||||||
increment(serviceStats, "google-ai__keys", k.service === "google-ai" ? 1 : 0);
|
addToService("google-ai__keys", k.service === "google-ai" ? 1 : 0);
|
||||||
increment(
|
addToService("mistral-ai__keys", k.service === "mistral-ai" ? 1 : 0);
|
||||||
serviceStats,
|
addToService("aws__keys", k.service === "aws" ? 1 : 0);
|
||||||
"mistral-ai__keys",
|
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
|
||||||
k.service === "mistral-ai" ? 1 : 0
|
addToService("azure__keys", k.service === "azure" ? 1 : 0);
|
||||||
);
|
addToService("deepseek__keys", k.service === "deepseek" ? 1 : 0);
|
||||||
increment(serviceStats, "aws__keys", k.service === "aws" ? 1 : 0);
|
addToService("xai__keys", k.service === "xai" ? 1 : 0);
|
||||||
increment(serviceStats, "azure__keys", k.service === "azure" ? 1 : 0);
|
addToService("cohere__keys", k.service === "cohere" ? 1 : 0);
|
||||||
|
addToService("qwen__keys", k.service === "qwen" ? 1 : 0);
|
||||||
|
addToService("moonshot__keys", k.service === "moonshot" ? 1 : 0);
|
||||||
|
|
||||||
let sumTokens = 0;
|
let sumInputTokens = 0;
|
||||||
|
let sumOutputTokens = 0;
|
||||||
|
let sumLegacyTokens = 0; // Optional
|
||||||
let sumCost = 0;
|
let sumCost = 0;
|
||||||
|
|
||||||
|
const incrementGenericFamilyStats = (f: ModelFamily) => {
|
||||||
|
const usage = k.tokenUsage?.[f];
|
||||||
|
let familyInputTokens = 0;
|
||||||
|
let familyOutputTokens = 0;
|
||||||
|
let familyLegacyTokens = 0;
|
||||||
|
|
||||||
|
if (usage) {
|
||||||
|
familyInputTokens = usage.input || 0;
|
||||||
|
familyOutputTokens = usage.output || 0;
|
||||||
|
if (usage.legacy_total && familyInputTokens === 0 && familyOutputTokens === 0) {
|
||||||
|
// This is a migrated key with no new usage, use legacy_total as input for cost
|
||||||
|
familyLegacyTokens = usage.legacy_total;
|
||||||
|
sumCost += getTokenCostUsd(f, usage.legacy_total, 0);
|
||||||
|
} else {
|
||||||
|
sumCost += getTokenCostUsd(f, familyInputTokens, familyOutputTokens);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If no k.tokenUsage[f], tokens are 0, cost is 0.
|
||||||
|
|
||||||
|
sumInputTokens += familyInputTokens;
|
||||||
|
sumOutputTokens += familyOutputTokens;
|
||||||
|
sumLegacyTokens += familyLegacyTokens; // Optional
|
||||||
|
|
||||||
|
addToFamily(`${f}__inputTokens`, familyInputTokens);
|
||||||
|
addToFamily(`${f}__outputTokens`, familyOutputTokens);
|
||||||
|
if (familyLegacyTokens > 0) {
|
||||||
|
addToFamily(`${f}__legacyTokens`, familyLegacyTokens); // Optional
|
||||||
|
}
|
||||||
|
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
};
|
||||||
|
|
||||||
switch (k.service) {
|
switch (k.service) {
|
||||||
case "openai":
|
case "openai":
|
||||||
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
|
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
|
||||||
increment(
|
addToService("openai__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
|
||||||
serviceStats,
|
|
||||||
"openai__uncheckedKeys",
|
|
||||||
Boolean(k.lastChecked) ? 0 : 1
|
|
||||||
);
|
|
||||||
|
|
||||||
k.modelFamilies.forEach((f) => {
|
k.modelFamilies.forEach((f) => {
|
||||||
const tokens = k[`${f}Tokens`];
|
incrementGenericFamilyStats(f);
|
||||||
sumTokens += tokens;
|
addToFamily(`${f}__trial`, k.isTrial ? 1 : 0);
|
||||||
sumCost += getTokenCostUsd(f, tokens);
|
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
increment(modelStats, `${f}__tokens`, tokens);
|
|
||||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
|
||||||
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
case "azure":
|
case "anthropic":
|
||||||
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
|
|
||||||
k.modelFamilies.forEach((f) => {
|
|
||||||
const tokens = k[`${f}Tokens`];
|
|
||||||
sumTokens += tokens;
|
|
||||||
sumCost += getTokenCostUsd(f, tokens);
|
|
||||||
increment(modelStats, `${f}__tokens`, tokens);
|
|
||||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
|
||||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
case "anthropic": {
|
|
||||||
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
|
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
|
||||||
|
addToService("anthropic__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
|
||||||
k.modelFamilies.forEach((f) => {
|
k.modelFamilies.forEach((f) => {
|
||||||
const tokens = k[`${f}Tokens`];
|
incrementGenericFamilyStats(f);
|
||||||
sumTokens += tokens;
|
addToFamily(`${f}__trial`, k.tier === "free" ? 1 : 0);
|
||||||
sumCost += getTokenCostUsd(f, tokens);
|
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
increment(modelStats, `${f}__tokens`, tokens);
|
addToFamily(`${f}__pozzed`, k.isPozzed ? 1 : 0);
|
||||||
increment(modelStats, `${f}__trial`, k.tier === "free" ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
|
||||||
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__pozzed`, k.isPozzed ? 1 : 0);
|
|
||||||
});
|
|
||||||
increment(
|
|
||||||
serviceStats,
|
|
||||||
"anthropic__uncheckedKeys",
|
|
||||||
Boolean(k.lastChecked) ? 0 : 1
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case "google-ai": {
|
|
||||||
if (!keyIsGoogleAIKey(k)) throw new Error("Invalid key type");
|
|
||||||
const family = "gemini-pro";
|
|
||||||
sumTokens += k["gemini-proTokens"];
|
|
||||||
sumCost += getTokenCostUsd(family, k["gemini-proTokens"]);
|
|
||||||
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
|
|
||||||
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
|
|
||||||
increment(modelStats, `${family}__tokens`, k["gemini-proTokens"]);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case "mistral-ai": {
|
|
||||||
if (!keyIsMistralAIKey(k)) throw new Error("Invalid key type");
|
|
||||||
k.modelFamilies.forEach((f) => {
|
|
||||||
const tokens = k[`${f}Tokens`];
|
|
||||||
sumTokens += tokens;
|
|
||||||
sumCost += getTokenCostUsd(f, tokens);
|
|
||||||
increment(modelStats, `${f}__tokens`, tokens);
|
|
||||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
|
||||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
case "aws": {
|
case "aws": {
|
||||||
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
|
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
|
||||||
k.modelFamilies.forEach((f) => {
|
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||||
const tokens = k[`${f}Tokens`];
|
if (!k.isDisabled) {
|
||||||
sumTokens += tokens;
|
// Don't add revoked keys to available AWS variants
|
||||||
sumCost += getTokenCostUsd(f, tokens);
|
k.modelIds.forEach((id) => {
|
||||||
increment(modelStats, `${f}__tokens`, tokens);
|
if (id.includes("claude-3-sonnet")) {
|
||||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
addToFamily(`aws-claude__awsSonnet3`, 1);
|
||||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
// not ideal but whatever
|
||||||
});
|
} else if (id.includes("claude-3-5-sonnet")) {
|
||||||
increment(modelStats, `aws-claude__awsSonnet`, k.sonnetEnabled ? 1 : 0);
|
addToFamily(`aws-claude__awsSonnet3_5`, 1);
|
||||||
increment(modelStats, `aws-claude__awsHaiku`, k.haikuEnabled ? 1 : 0);
|
} else if (id.includes("claude-3-7-sonnet")) {
|
||||||
|
addToFamily(`aws-claude__awsSonnet3_7`, 1);
|
||||||
|
} else if (id.includes("claude-3-haiku")) {
|
||||||
|
addToFamily(`aws-claude__awsHaiku`, 1);
|
||||||
|
} else if (id.includes("sonnet-4")) {
|
||||||
|
addToFamily(`aws-claude__awsSonnet4`, 1);
|
||||||
|
} else if (id.includes("claude-3-opus")) {
|
||||||
|
addToFamily(`aws-claude__awsOpus3`, 1);
|
||||||
|
addToFamily(`aws-claude-opus__awsOpus3`, 1);
|
||||||
|
} else if (id.includes("opus-4")) {
|
||||||
|
addToFamily(`aws-claude__awsOpus4`, 1);
|
||||||
|
addToFamily(`aws-claude-opus__awsOpus4`, 1);
|
||||||
|
} else if (id.includes("claude-v2")) {
|
||||||
|
addToFamily(`aws-claude__awsClaude2`, 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
// Ignore revoked keys for aws logging stats, but include keys where the
|
// Ignore revoked keys for aws logging stats, but include keys where the
|
||||||
// logging status is unknown.
|
// logging status is unknown.
|
||||||
const countAsLogged =
|
const countAsLogged =
|
||||||
k.lastChecked && !k.isDisabled && k.awsLoggingStatus === "enabled";
|
k.lastChecked && !k.isDisabled && k.awsLoggingStatus === "enabled";
|
||||||
increment(modelStats, `aws-claude__awsLogged`, countAsLogged ? 1 : 0);
|
addToFamily(`aws-claude__awsLogged`, countAsLogged ? 1 : 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case "gcp":
|
||||||
|
if (!keyIsGcpKey(k)) throw new Error("Invalid key type");
|
||||||
|
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||||
|
// TODO: add modelIds to GcpKey
|
||||||
|
break;
|
||||||
|
case "deepseek":
|
||||||
|
if (!keyIsDeepseekKey(k)) throw new Error("Invalid key type");
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
incrementGenericFamilyStats(f);
|
||||||
|
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
if (!keyIsXaiKey(k)) throw new Error("Invalid key type");
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
incrementGenericFamilyStats(f);
|
||||||
|
if ('isOverQuota' in k) {
|
||||||
|
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "cohere":
|
||||||
|
if (!keyIsCohereKey(k)) throw new Error("Invalid key type");
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
incrementGenericFamilyStats(f);
|
||||||
|
if ('isOverQuota' in k) {
|
||||||
|
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
// These services don't have any additional stats to track.
|
||||||
|
case "azure":
|
||||||
|
case "mistral-ai":
|
||||||
|
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||||
|
break;
|
||||||
|
case "google-ai":
|
||||||
|
// Cast to GoogleAIKey to access GoogleAI-specific properties
|
||||||
|
const googleKey = k as unknown as { overQuotaFamilies?: string[] };
|
||||||
|
|
||||||
|
// First handle general stats for all model families
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
incrementGenericFamilyStats(f);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a set of model families that are over quota for this key
|
||||||
|
let overQuotaModelFamilies = new Set<string>();
|
||||||
|
|
||||||
|
// Add any model family that's listed in overQuotaFamilies
|
||||||
|
if (googleKey.overQuotaFamilies && Array.isArray(googleKey.overQuotaFamilies)) {
|
||||||
|
googleKey.overQuotaFamilies.forEach(family => {
|
||||||
|
overQuotaModelFamilies.add(family);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// If key is generally over quota and we don't have specific families, add all families
|
||||||
|
else if ('isOverQuota' in k && k.isOverQuota) {
|
||||||
|
k.modelFamilies.forEach(family => {
|
||||||
|
overQuotaModelFamilies.add(family);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now increment the over-quota counter for each affected family
|
||||||
|
// These model families are valid and already defined in the enum
|
||||||
|
overQuotaModelFamilies.forEach(family => {
|
||||||
|
if (family === 'gemini-pro' || family === 'gemini-flash' || family === 'gemini-ultra') {
|
||||||
|
addToFamily(`${family}__overQuota` as any, 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "qwen":
|
||||||
|
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||||
|
break;
|
||||||
|
case "moonshot":
|
||||||
|
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
assertNever(k.service);
|
assertNever(k.service);
|
||||||
}
|
}
|
||||||
|
|
||||||
increment(serviceStats, "tokens", sumTokens);
|
addToService("inputTokens", sumInputTokens);
|
||||||
increment(serviceStats, "tokenCost", sumCost);
|
addToService("outputTokens", sumOutputTokens);
|
||||||
|
if (sumLegacyTokens > 0) { // Optional
|
||||||
|
addToService("legacyTokens", sumLegacyTokens);
|
||||||
|
}
|
||||||
|
addToService("tokenCost", sumCost);
|
||||||
}
|
}
|
||||||
|
|
||||||
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||||
const tokens = modelStats.get(`${family}__tokens`) || 0;
|
const inputTokens = familyStats.get(`${family}__inputTokens`) || 0;
|
||||||
const cost = getTokenCostUsd(family, tokens);
|
const outputTokens = familyStats.get(`${family}__outputTokens`) || 0;
|
||||||
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo = {
|
const legacyTokens = familyStats.get(`${family}__legacyTokens`) || 0; // Optional
|
||||||
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
|
|
||||||
activeKeys: modelStats.get(`${family}__active`) || 0,
|
let cost = 0;
|
||||||
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
|
let displayTokens = 0;
|
||||||
|
let usageString = "";
|
||||||
|
|
||||||
|
if (inputTokens > 0 || outputTokens > 0) {
|
||||||
|
cost = getTokenCostUsd(family, inputTokens, outputTokens);
|
||||||
|
displayTokens = inputTokens + outputTokens;
|
||||||
|
usageString = `${prettyTokens(displayTokens)} (In: ${prettyTokens(inputTokens)}, Out: ${prettyTokens(outputTokens)})${getCostSuffix(cost)}`;
|
||||||
|
} else if (legacyTokens > 0) {
|
||||||
|
// Only show legacy if no new input/output has been recorded for this family aggregate
|
||||||
|
cost = getTokenCostUsd(family, legacyTokens, 0); // Cost legacy as all input
|
||||||
|
displayTokens = legacyTokens;
|
||||||
|
usageString = `${prettyTokens(displayTokens)} tokens (legacy total)${getCostSuffix(cost)}`;
|
||||||
|
} else {
|
||||||
|
usageString = `${prettyTokens(0)} tokens${getCostSuffix(0)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
|
||||||
|
usage: usageString,
|
||||||
|
activeKeys: familyStats.get(`${family}__active`) || 0,
|
||||||
|
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add service-specific stats to the info object.
|
// Add service-specific stats to the info object.
|
||||||
@@ -425,8 +663,8 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
|||||||
const service = MODEL_FAMILY_SERVICE[family];
|
const service = MODEL_FAMILY_SERVICE[family];
|
||||||
switch (service) {
|
switch (service) {
|
||||||
case "openai":
|
case "openai":
|
||||||
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
|
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
|
||||||
|
|
||||||
// Delete trial/revoked keys for non-turbo families.
|
// Delete trial/revoked keys for non-turbo families.
|
||||||
// Trials are turbo 99% of the time, and if a key is invalid we don't
|
// Trials are turbo 99% of the time, and if a key is invalid we don't
|
||||||
@@ -437,21 +675,70 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
|
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
|
||||||
info.prefilledKeys = modelStats.get(`${family}__pozzed`) || 0;
|
info.prefilledKeys = familyStats.get(`${family}__pozzed`) || 0;
|
||||||
break;
|
break;
|
||||||
case "aws":
|
case "aws":
|
||||||
if (family === "aws-claude") {
|
if (family === "aws-claude") {
|
||||||
info.sonnetKeys = modelStats.get(`${family}__awsSonnet`) || 0;
|
// Original behavior: get logged count from the same family
|
||||||
info.haikuKeys = modelStats.get(`${family}__awsHaiku`) || 0;
|
const logged = familyStats.get(`${family}__awsLogged`) || 0;
|
||||||
const logged = modelStats.get(`${family}__awsLogged`) || 0;
|
const variants = new Set<string>();
|
||||||
|
if (familyStats.get(`${family}__awsClaude2`) || 0) variants.add("claude2");
|
||||||
|
if (familyStats.get(`${family}__awsSonnet3`) || 0) variants.add("sonnet3");
|
||||||
|
if (familyStats.get(`${family}__awsSonnet3_5`) || 0) variants.add("sonnet3.5");
|
||||||
|
if (familyStats.get(`${family}__awsSonnet3_7`) || 0) variants.add("sonnet3.7");
|
||||||
|
if (familyStats.get(`${family}__awsHaiku`) || 0) variants.add("haiku");
|
||||||
|
if (familyStats.get(`${family}__awsSonnet4`) || 0) variants.add("sonnet4");
|
||||||
|
|
||||||
|
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
|
||||||
|
|
||||||
if (logged > 0) {
|
if (logged > 0) {
|
||||||
info.privacy = config.allowAwsLogging
|
info.privacy = config.allowAwsLogging
|
||||||
? `AWS logging verification inactive. Prompts could be logged.`
|
? `AWS logging verification inactive. Prompts could be logged.`
|
||||||
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
|
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
|
||||||
}
|
}
|
||||||
|
} else if (family === "aws-claude-opus") {
|
||||||
|
// Get logging info from aws-claude family since that's where it's collected
|
||||||
|
const awsLogged = familyStats.get(`aws-claude__awsLogged`) || 0;
|
||||||
|
const variants = new Set<string>();
|
||||||
|
if (familyStats.get(`${family}__awsOpus3`) || 0) variants.add("opus3");
|
||||||
|
if (familyStats.get(`${family}__awsOpus4`) || 0) variants.add("opus4");
|
||||||
|
|
||||||
|
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
|
||||||
|
|
||||||
|
// Show privacy warning for Opus if there are active Opus keys AND some AWS keys are logged
|
||||||
|
if (awsLogged > 0 && info.activeKeys > 0) {
|
||||||
|
info.privacy = config.allowAwsLogging
|
||||||
|
? `AWS logging verification inactive. Prompts could be logged.`
|
||||||
|
: `Some AWS keys are potentially logged. Set ALLOW_AWS_LOGGING=true to override.`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// TODO: Consider if aws-mistral-* families need similar enabledVariant listings
|
||||||
|
break;
|
||||||
|
case "gcp":
|
||||||
|
if (family === "gcp-claude") {
|
||||||
|
// TODO: implement
|
||||||
|
info.enabledVariants = "not implemented";
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "deepseek":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
|
break;
|
||||||
|
case "cohere":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
|
break;
|
||||||
|
case "google-ai":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
|
break;
|
||||||
|
case "qwen":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
|
break;
|
||||||
|
case "moonshot":
|
||||||
|
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,13 @@ const AnthropicV1BaseSchema = z
|
|||||||
top_k: z.coerce.number().optional(),
|
top_k: z.coerce.number().optional(),
|
||||||
top_p: z.coerce.number().optional(),
|
top_p: z.coerce.number().optional(),
|
||||||
metadata: z.object({ user_id: z.string().optional() }).optional(),
|
metadata: z.object({ user_id: z.string().optional() }).optional(),
|
||||||
|
tools: z.array(z.any()).optional(),
|
||||||
|
tool_choice: z.any().optional(),
|
||||||
|
service_tier: z.enum(["auto", "standard_only"]).optional(),
|
||||||
|
cache_control: z.object({
|
||||||
|
type: z.literal("ephemeral"),
|
||||||
|
ttl: z.enum(["5m", "1h"]).optional()
|
||||||
|
}).optional(),
|
||||||
})
|
})
|
||||||
.strip();
|
.strip();
|
||||||
|
|
||||||
@@ -33,16 +40,35 @@ export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
|
|||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const AnthropicV1BaseContentSchema = z.union([
|
||||||
|
z.object({ type: z.literal("text"), text: z.string() }),
|
||||||
|
z.object({
|
||||||
|
type: z.literal("image"),
|
||||||
|
source: z.object({
|
||||||
|
type: z.literal("base64"),
|
||||||
|
media_type: z.string().max(100),
|
||||||
|
data: z.string(),
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
]);
|
||||||
|
|
||||||
const AnthropicV1MessageMultimodalContentSchema = z.array(
|
const AnthropicV1MessageMultimodalContentSchema = z.array(
|
||||||
z.union([
|
z.union([
|
||||||
z.object({ type: z.literal("text"), text: z.string() }),
|
AnthropicV1BaseContentSchema,
|
||||||
z.object({
|
z.object({
|
||||||
type: z.literal("image"),
|
type: z.literal("tool_use"),
|
||||||
source: z.object({
|
id: z.string(),
|
||||||
type: z.literal("base64"),
|
name: z.string(),
|
||||||
media_type: z.string().max(100),
|
input: z.object({}).passthrough(),
|
||||||
data: z.string(),
|
}),
|
||||||
}),
|
z.object({
|
||||||
|
type: z.literal("tool_result"),
|
||||||
|
tool_use_id: z.string(),
|
||||||
|
is_error: z.boolean().optional(),
|
||||||
|
content: z.union([
|
||||||
|
z.string(),
|
||||||
|
z.array(AnthropicV1BaseContentSchema)
|
||||||
|
]).optional(),
|
||||||
}),
|
}),
|
||||||
])
|
])
|
||||||
);
|
);
|
||||||
@@ -63,7 +89,16 @@ export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
|
|||||||
.number()
|
.number()
|
||||||
.int()
|
.int()
|
||||||
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
|
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
|
||||||
system: z.string().optional(),
|
system: z
|
||||||
|
.union([
|
||||||
|
z.string(),
|
||||||
|
z.array(z.object({ type: z.literal("text"), text: z.string() })),
|
||||||
|
])
|
||||||
|
.optional(),
|
||||||
|
thinking: z.object({
|
||||||
|
type: z.literal("enabled"),
|
||||||
|
budget_tokens: z.number().min(1024),
|
||||||
|
}).optional(),
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
export type AnthropicChatMessage = z.infer<
|
export type AnthropicChatMessage = z.infer<
|
||||||
@@ -77,7 +112,7 @@ function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
|
|||||||
let role: string = m.role;
|
let role: string = m.role;
|
||||||
if (role === "assistant") {
|
if (role === "assistant") {
|
||||||
role = "Assistant";
|
role = "Assistant";
|
||||||
} else if (role === "system") {
|
} else if (role === "system" || role === "developer") {
|
||||||
role = "System";
|
role = "System";
|
||||||
} else if (role === "user") {
|
} else if (role === "user") {
|
||||||
role = "Human";
|
role = "Human";
|
||||||
@@ -104,8 +139,10 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
|
|||||||
);
|
);
|
||||||
throw result.error;
|
throw result.error;
|
||||||
}
|
}
|
||||||
|
if (result.data.max_tokens > 8192) {
|
||||||
req.headers["anthropic-version"] = "2023-06-01";
|
result.data.max_tokens = 4096;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
const { messages, ...rest } = result.data;
|
const { messages, ...rest } = result.data;
|
||||||
const { messages: newMessages, system } =
|
const { messages: newMessages, system } =
|
||||||
@@ -119,7 +156,8 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
|
|||||||
stream: rest.stream,
|
stream: rest.stream,
|
||||||
temperature: rest.temperature,
|
temperature: rest.temperature,
|
||||||
top_p: rest.top_p,
|
top_p: rest.top_p,
|
||||||
stop_sequences: typeof rest.stop === "string" ? [rest.stop] : rest.stop,
|
stop_sequences:
|
||||||
|
typeof rest.stop === "string" ? [rest.stop] : rest.stop || undefined,
|
||||||
...(rest.user ? { metadata: { user_id: rest.user } } : {}),
|
...(rest.user ? { metadata: { user_id: rest.user } } : {}),
|
||||||
// Anthropic supports top_k, but OpenAI does not
|
// Anthropic supports top_k, but OpenAI does not
|
||||||
// OpenAI supports frequency_penalty, presence_penalty, logit_bias, n, seed,
|
// OpenAI supports frequency_penalty, presence_penalty, logit_bias, n, seed,
|
||||||
@@ -140,8 +178,6 @@ export const transformOpenAIToAnthropicText: APIFormatTransformer<
|
|||||||
throw result.error;
|
throw result.error;
|
||||||
}
|
}
|
||||||
|
|
||||||
req.headers["anthropic-version"] = "2023-06-01";
|
|
||||||
|
|
||||||
const { messages, ...rest } = result.data;
|
const { messages, ...rest } = result.data;
|
||||||
const prompt = openAIMessagesToClaudeTextPrompt(messages);
|
const prompt = openAIMessagesToClaudeTextPrompt(messages);
|
||||||
|
|
||||||
@@ -186,8 +222,6 @@ export const transformAnthropicTextToAnthropicChat: APIFormatTransformer<
|
|||||||
throw result.error;
|
throw result.error;
|
||||||
}
|
}
|
||||||
|
|
||||||
req.headers["anthropic-version"] = "2023-06-01";
|
|
||||||
|
|
||||||
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
|
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
|
||||||
validateAnthropicTextPrompt(prompt);
|
validateAnthropicTextPrompt(prompt);
|
||||||
|
|
||||||
@@ -365,7 +399,7 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
|||||||
// Here we will lose the original name if it was a system message, but that
|
// Here we will lose the original name if it was a system message, but that
|
||||||
// is generally okay because the system message is usually a prompt and not
|
// is generally okay because the system message is usually a prompt and not
|
||||||
// a character in the chat.
|
// a character in the chat.
|
||||||
const name = msg.role === "system" ? "System" : msg.name?.trim();
|
const name = (msg.role === "system" || msg.role === "developer") ? "System" : msg.name?.trim();
|
||||||
const content = convertOpenAIContent(msg.content);
|
const content = convertOpenAIContent(msg.content);
|
||||||
|
|
||||||
// Prepend the display name to the first text content in the current message
|
// Prepend the display name to the first text content in the current message
|
||||||
@@ -395,8 +429,8 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
|||||||
|
|
||||||
function isSystemOpenAIRole(
|
function isSystemOpenAIRole(
|
||||||
role: OpenAIChatMessage["role"]
|
role: OpenAIChatMessage["role"]
|
||||||
): role is "system" | "function" | "tool" {
|
): role is "developer" | "system" | "function" | "tool" {
|
||||||
return ["system", "function", "tool"].includes(role);
|
return ["developer", "system", "function", "tool"].includes(role);
|
||||||
}
|
}
|
||||||
|
|
||||||
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
|
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
|
||||||
@@ -439,9 +473,25 @@ function convertOpenAIContent(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export function containsImageContent(messages: AnthropicChatMessage[]) {
|
export function containsImageContent(messages: AnthropicChatMessage[]): boolean {
|
||||||
return messages.some(
|
const isImage = (item: any) => item?.type === 'image';
|
||||||
({ content }) =>
|
|
||||||
typeof content !== "string" && content.some((c) => c.type === "image")
|
return messages.some(msg => {
|
||||||
);
|
if (typeof msg.content === 'string') return false;
|
||||||
|
|
||||||
|
return msg.content.some(item => {
|
||||||
|
if (isImage(item)) return true;
|
||||||
|
|
||||||
|
if (item.type === 'tool_result') {
|
||||||
|
const content = item.content;
|
||||||
|
if (!content) return false;
|
||||||
|
|
||||||
|
if (typeof content === 'string') return false;
|
||||||
|
if (Array.isArray(content)) return content.some(isImage);
|
||||||
|
return isImage(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,69 @@
|
|||||||
|
import { z } from "zod";
|
||||||
|
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function to check if a model is from Cohere
|
||||||
|
*/
|
||||||
|
export function isCohereModel(model: string): boolean {
|
||||||
|
// Cohere's command model family
|
||||||
|
return model.includes("command") || model.includes("cohere");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic chat message schema
|
||||||
|
const CohereChatMessageSchema = z.object({
|
||||||
|
role: z.enum(["user", "assistant", "system", "developer"]),
|
||||||
|
content: z.string().nullable(),
|
||||||
|
name: z.string().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const CohereMessagesSchema = z.array(CohereChatMessageSchema);
|
||||||
|
|
||||||
|
// Schema for Cohere chat completions
|
||||||
|
export const CohereV1ChatCompletionsSchema = z.object({
|
||||||
|
model: z.string(),
|
||||||
|
messages: CohereMessagesSchema,
|
||||||
|
temperature: z.number().optional().default(1),
|
||||||
|
top_p: z.number().optional().default(1),
|
||||||
|
max_tokens: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.nullish()
|
||||||
|
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||||
|
stream: z.boolean().optional().default(false),
|
||||||
|
stop: z
|
||||||
|
.union([z.string(), z.array(z.string())])
|
||||||
|
.optional()
|
||||||
|
.default([])
|
||||||
|
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||||
|
seed: z.number().int().min(0).optional(),
|
||||||
|
response_format: z
|
||||||
|
.object({
|
||||||
|
type: z.enum(["text", "json_object"]),
|
||||||
|
schema: z.any().optional()
|
||||||
|
})
|
||||||
|
.optional(),
|
||||||
|
// Structured output with schema
|
||||||
|
tools: z.array(z.any()).optional(),
|
||||||
|
frequency_penalty: z.number().optional().default(0),
|
||||||
|
presence_penalty: z.number().optional().default(0),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Schema for Cohere embeddings
|
||||||
|
export const CohereV1EmbeddingsSchema = z.object({
|
||||||
|
model: z.string(),
|
||||||
|
input: z.union([z.string(), z.array(z.string())]),
|
||||||
|
encoding_format: z.enum(["float", "base64"]).optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Helper function to convert between different message formats if needed
|
||||||
|
export function normalizeMessages(messages: any[]): any[] {
|
||||||
|
// From documentation, Cohere supports roles: developer, user, assistant
|
||||||
|
// The 'developer' role is equivalent to 'system' in OpenAI API
|
||||||
|
return messages.map((msg) => {
|
||||||
|
// Convert system role to developer role for Cohere compatibility
|
||||||
|
if (msg.role === "system") {
|
||||||
|
return { ...msg, role: "developer" };
|
||||||
|
}
|
||||||
|
return msg;
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -5,32 +5,92 @@ import {
|
|||||||
} from "./openai";
|
} from "./openai";
|
||||||
import { APIFormatTransformer } from "./index";
|
import { APIFormatTransformer } from "./index";
|
||||||
|
|
||||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
|
const TextPartSchema = z.object({
|
||||||
|
text: z.string(),
|
||||||
|
thought: z.boolean().optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
const InlineDataPartSchema = z.object({
|
||||||
|
inlineData: z.object({
|
||||||
|
mimeType: z.string(),
|
||||||
|
data: z.string(),
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const PartSchema = z.union([TextPartSchema, InlineDataPartSchema]);
|
||||||
|
|
||||||
|
const GoogleAIV1ContentSchema = z.object({
|
||||||
|
parts: z
|
||||||
|
.union([PartSchema, z.array(PartSchema)])
|
||||||
|
.transform((val) => (Array.isArray(val) ? val : [val])),
|
||||||
|
role: z.enum(["user", "model"]).optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
const SafetySettingsSchema = z
|
||||||
|
.array(
|
||||||
|
z.object({
|
||||||
|
category: z.enum([
|
||||||
|
"HARM_CATEGORY_HARASSMENT",
|
||||||
|
"HARM_CATEGORY_HATE_SPEECH",
|
||||||
|
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||||
|
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||||
|
"HARM_CATEGORY_CIVIC_INTEGRITY",
|
||||||
|
]),
|
||||||
|
threshold: z.enum([
|
||||||
|
"OFF",
|
||||||
|
"BLOCK_NONE",
|
||||||
|
"BLOCK_ONLY_HIGH",
|
||||||
|
"BLOCK_MEDIUM_AND_ABOVE",
|
||||||
|
"BLOCK_LOW_AND_ABOVE",
|
||||||
|
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
||||||
|
]),
|
||||||
|
})
|
||||||
|
)
|
||||||
|
.optional();
|
||||||
|
|
||||||
|
const GoogleSearchToolSchema = z.object({
|
||||||
|
googleSearch: z.object({}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Corrected: Directly assign the schema since there's only one tool type for now
|
||||||
|
const ToolSchema = GoogleSearchToolSchema;
|
||||||
|
|
||||||
export const GoogleAIV1GenerateContentSchema = z
|
export const GoogleAIV1GenerateContentSchema = z
|
||||||
.object({
|
.object({
|
||||||
model: z.string().max(100), //actually specified in path but we need it for the router
|
model: z.string().max(100),
|
||||||
stream: z.boolean().optional().default(false), // also used for router
|
stream: z.boolean().optional().default(false),
|
||||||
contents: z.array(
|
contents: z.array(GoogleAIV1ContentSchema),
|
||||||
z.object({
|
tools: z.array(ToolSchema).optional(), // Uses the corrected ToolSchema
|
||||||
parts: z.array(z.object({ text: z.string() })),
|
safetySettings: SafetySettingsSchema,
|
||||||
role: z.enum(["user", "model"]),
|
systemInstruction: GoogleAIV1ContentSchema.optional(),
|
||||||
|
system_instruction: GoogleAIV1ContentSchema.optional(),
|
||||||
|
generationConfig: z
|
||||||
|
.object({
|
||||||
|
temperature: z.number().min(0).max(2).optional(),
|
||||||
|
maxOutputTokens: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.optional()
|
||||||
|
.default(16)
|
||||||
|
.transform((v) => Math.min(v, 65536)),
|
||||||
|
candidateCount: z.literal(1).optional(),
|
||||||
|
topP: z.number().min(0).max(1).optional(),
|
||||||
|
topK: z.number().min(0).max(500).optional(),
|
||||||
|
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
||||||
|
seed: z.number().int().optional(),
|
||||||
|
frequencyPenalty: z.number().optional().default(0),
|
||||||
|
presencePenalty: z.number().optional().default(0),
|
||||||
|
thinkingConfig: z.object({
|
||||||
|
includeThoughts: z.boolean().optional(),
|
||||||
|
thinkingBudget: z.union([
|
||||||
|
z.literal("auto"),
|
||||||
|
z.number().int()
|
||||||
|
]).optional()
|
||||||
|
}).optional(),
|
||||||
|
responseModalities: z.any().optional(), // responseModalities: z.array(z.enum(["TEXT"])).optional()
|
||||||
})
|
})
|
||||||
),
|
.default({}),
|
||||||
tools: z.array(z.object({})).max(0).optional(),
|
|
||||||
safetySettings: z.array(z.object({})).max(0).optional(),
|
|
||||||
generationConfig: z.object({
|
|
||||||
temperature: z.number().optional(),
|
|
||||||
maxOutputTokens: z.coerce
|
|
||||||
.number()
|
|
||||||
.int()
|
|
||||||
.optional()
|
|
||||||
.default(16)
|
|
||||||
.transform((v) => Math.min(v, 1024)), // TODO: Add config
|
|
||||||
candidateCount: z.literal(1).optional(),
|
|
||||||
topP: z.number().optional(),
|
|
||||||
topK: z.number().optional(),
|
|
||||||
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
|
||||||
}),
|
|
||||||
})
|
})
|
||||||
.strip();
|
.strip();
|
||||||
export type GoogleAIChatMessage = z.infer<
|
export type GoogleAIChatMessage = z.infer<
|
||||||
@@ -54,15 +114,11 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
|||||||
}
|
}
|
||||||
|
|
||||||
const { messages, ...rest } = result.data;
|
const { messages, ...rest } = result.data;
|
||||||
|
|
||||||
const foundNames = new Set<string>();
|
const foundNames = new Set<string>();
|
||||||
const contents = messages
|
const contents = messages
|
||||||
.map((m) => {
|
.map((m) => {
|
||||||
const role = m.role === "assistant" ? "model" : "user";
|
const role = m.role === "assistant" ? "model" : "user";
|
||||||
// Detects character names so we can set stop sequences for them as Gemini
|
|
||||||
// is prone to continuing as the next character.
|
|
||||||
// If names are not available, we'll still try to prefix the message
|
|
||||||
// with generic names so we can set stops for them but they don't work
|
|
||||||
// as well as real names.
|
|
||||||
const text = flattenOpenAIMessageContent(m.content);
|
const text = flattenOpenAIMessageContent(m.content);
|
||||||
const propName = m.name?.trim();
|
const propName = m.name?.trim();
|
||||||
const textName =
|
const textName =
|
||||||
@@ -72,12 +128,6 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
|||||||
|
|
||||||
foundNames.add(name);
|
foundNames.add(name);
|
||||||
|
|
||||||
// Prefixing messages with their character name seems to help avoid
|
|
||||||
// Gemini trying to continue as the next character, or at the very least
|
|
||||||
// ensures it will hit the stop sequence. Otherwise it will start a new
|
|
||||||
// paragraph and switch perspectives.
|
|
||||||
// The response will be very likely to include this prefix so frontends
|
|
||||||
// will need to strip it out.
|
|
||||||
const textPrefix = textName ? "" : `${name}: `;
|
const textPrefix = textName ? "" : `${name}: `;
|
||||||
return {
|
return {
|
||||||
parts: [{ text: textPrefix + text }],
|
parts: [{ text: textPrefix + text }],
|
||||||
@@ -86,7 +136,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
|||||||
})
|
})
|
||||||
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
|
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
|
||||||
const last = acc[acc.length - 1];
|
const last = acc[acc.length - 1];
|
||||||
if (last?.role === msg.role) {
|
if (last?.role === msg.role && 'text' in last.parts[0] && 'text' in msg.parts[0]) {
|
||||||
last.parts[0].text += "\n\n" + msg.parts[0].text;
|
last.parts[0].text += "\n\n" + msg.parts[0].text;
|
||||||
} else {
|
} else {
|
||||||
acc.push(msg);
|
acc.push(msg);
|
||||||
@@ -102,23 +152,52 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
|||||||
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
|
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
|
||||||
stops = [...new Set(stops)].slice(0, 5);
|
stops = [...new Set(stops)].slice(0, 5);
|
||||||
|
|
||||||
|
let tools: z.infer<typeof ToolSchema>[] | undefined = undefined;
|
||||||
|
let responseModalities: string[] | undefined = undefined;
|
||||||
|
|
||||||
|
if (req.body.use_google_search === true) {
|
||||||
|
req.log.info("Google Search tool requested.");
|
||||||
|
tools = [{ googleSearch: {} }];
|
||||||
|
responseModalities = ["TEXT"];
|
||||||
|
}
|
||||||
|
|
||||||
|
let thinkingConfig = undefined;
|
||||||
|
if (body.generationConfig?.thinkingConfig || body.thinkingConfig) {
|
||||||
|
thinkingConfig = body.generationConfig?.thinkingConfig || body.thinkingConfig;
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
model: "gemini-pro",
|
model: req.body.model,
|
||||||
stream: rest.stream,
|
stream: rest.stream,
|
||||||
contents,
|
contents,
|
||||||
tools: [],
|
tools: tools,
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
maxOutputTokens: rest.max_tokens,
|
maxOutputTokens: rest.max_tokens,
|
||||||
stopSequences: stops,
|
stopSequences: stops,
|
||||||
topP: rest.top_p,
|
topP: rest.top_p,
|
||||||
topK: 40, // openai schema doesn't have this, google ai defaults to 40
|
topK: 40,
|
||||||
temperature: rest.temperature,
|
temperature: rest.temperature,
|
||||||
|
seed: rest.seed,
|
||||||
|
frequencyPenalty: rest.frequency_penalty,
|
||||||
|
presencePenalty: rest.presence_penalty,
|
||||||
|
responseModalities: responseModalities,
|
||||||
|
...(thinkingConfig ? { thinkingConfig } : {})
|
||||||
},
|
},
|
||||||
safetySettings: [
|
safetySettings: [
|
||||||
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
|
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
|
||||||
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
|
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
|
||||||
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
|
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
|
||||||
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
|
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "BLOCK_NONE" },
|
||||||
],
|
],
|
||||||
|
...(req.body.system_instruction && { system_instruction: req.body.system_instruction }),
|
||||||
|
...(req.body.systemInstruction && { systemInstruction: req.body.systemInstruction }),
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export function containsImageContent(contents: GoogleAIChatMessage[]): boolean {
|
||||||
|
return contents.some(content => {
|
||||||
|
const parts = Array.isArray(content.parts) ? content.parts : [content.parts];
|
||||||
|
return parts.some(part => 'inlineData' in part);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user