Compare commits
269 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9c0a4fd3a7 | |||
| bc85a71c2a | |||
| 1604246cf1 | |||
| 82028d14b7 | |||
| f23315d233 | |||
| 1bf6d6ac99 | |||
| 09ce6a70d2 | |||
| 0f8581d340 | |||
| e8c5d06cd7 | |||
| 20c9920199 | |||
| 253a2af13f | |||
| 2af4a02b15 | |||
| c8dab8786a | |||
| 9cc86c2d68 | |||
| e974da8a58 | |||
| f114469057 | |||
| 6e02db4bd7 | |||
| 1f9af4374d | |||
| 79a7dee586 | |||
| e1bd960bb7 | |||
| 867fda430b | |||
| bbd2b88503 | |||
| 08400db220 | |||
| 5249e1c904 | |||
| c18df6a546 | |||
| ceedb52478 | |||
| fa13d06f45 | |||
| 0c0dc09020 | |||
| 317ef03ab4 | |||
| 7def7c17e4 | |||
| e201c2cf5e | |||
| edbbf056a0 | |||
| 4c214305af | |||
| cb8f2669ac | |||
| ed737e43a5 | |||
| 2bc1a7dbea | |||
| 5ad22145a0 | |||
| b7ad5f1dae | |||
| 2405be71c1 | |||
| aec3927c94 | |||
| ec82599e24 | |||
| 21294abd8e | |||
| ca4a1f3252 | |||
| e0270f99ee | |||
| 38e2980419 | |||
| 0102c7a6a5 | |||
| b89439287e | |||
| 508bb3e08b | |||
| a17d087928 | |||
| 3f32a9b14d | |||
| 3e11b0bf49 | |||
| 64d26c5c6c | |||
| 41bc4998fc | |||
| 4e3fb9d152 | |||
| 8c98fca56d | |||
| 2389b30e68 | |||
| c066a7d46b | |||
| 7b3cf409e4 | |||
| 74cbafbb3b | |||
| 0411b4c3a6 | |||
| 5988cd7e45 | |||
| f80873ef8a | |||
| 45c0b99f20 | |||
| 692da2b457 | |||
| 32bc797216 | |||
| 1a6ce7ea04 | |||
| fdba7cd7e4 | |||
| 64d2f78526 | |||
| 566d42da07 | |||
| 74bb88daa3 | |||
| 2ea5fdf902 | |||
| d5ec6fe1f9 | |||
| ce9c8ec8b6 | |||
| 29323fd7bf | |||
| af162e567a | |||
| 87c6dd90cb | |||
| 1d8b13ba70 | |||
| 8344fd2e2a | |||
| 8c30088383 | |||
| dde0183d7d | |||
| d64edbb3b7 | |||
| 5f0b5cc4e5 | |||
| 99269b7cd6 | |||
| 6870a36a6e | |||
| 45535de6ae | |||
| 8ea6fe463b | |||
| 4fd5d08ed8 | |||
| 4496afe7a1 | |||
| cc0ece32d0 | |||
| be8accbc37 | |||
| 1d6f3dbf10 | |||
| f2b55ebabb | |||
| 6374bfdee1 | |||
| eb66f6b149 | |||
| 551a13498b | |||
| 780b885aeb | |||
| d9645025c9 | |||
| c1cb395020 | |||
| 80d09f470b | |||
| 44338652fd | |||
| 8ef272f8b3 | |||
| 9c804c0560 | |||
| 2dc7fda2dd | |||
| 68b199e712 | |||
| 1b110d3269 | |||
| abfde6f684 | |||
| d2d6ff3d52 | |||
| a5eda7685b | |||
| cbca37dd77 | |||
| fc55518cd1 | |||
| 925a81de43 | |||
| 989bfc0ca3 | |||
| a1c04234ab | |||
| dc0e7498e8 | |||
| 6628498d5e | |||
| 31f9b4d536 | |||
| afe6ad8ac9 | |||
| a16d66a45b | |||
| 465b13e5fb | |||
| 6c8b19651d | |||
| d3292d8a76 | |||
| dab5c1bbf0 | |||
| 2ffce3eff8 | |||
| 8197192223 | |||
| 7e6857fcf5 | |||
| 719cbc3cfa | |||
| 3beea5dcfc | |||
| 9213b7088b | |||
| 86ed19af99 | |||
| bb75cc668c | |||
| a6d095dcda | |||
| 588aaae5d9 | |||
| 6eec7ff7e6 | |||
| 272b812db3 | |||
| 0bcc0c1037 | |||
| af58d25fb5 | |||
| 15dc2514ee | |||
| d650038f7e | |||
| 6efe09b62e | |||
| 14a1203be7 | |||
| 1e8f55f96d | |||
| 2f8538519b | |||
| 1b7ce423a6 | |||
| 799a73655c | |||
| 96645ba529 | |||
| de631d3d91 | |||
| bf2c0dd3d9 | |||
| 2415be7c51 | |||
| 4c9a3678ae | |||
| 19df23f342 | |||
| 85fafb8edb | |||
| 5eb4858c69 | |||
| 8081d9516d | |||
| 5473ef903e | |||
| 568288c180 | |||
| 65f4e14d3b | |||
| 6479cefe07 | |||
| 94e2c907b5 | |||
| af53fc9913 | |||
| e6cc393296 | |||
| a9811c2886 | |||
| 64e07a0429 | |||
| 83676caa8b | |||
| a76f8a3c87 | |||
| ecae252df4 | |||
| d951989a57 | |||
| 9deafb445b | |||
| ee1d8ab1a2 | |||
| c2bfcdc744 | |||
| 24b6a090d8 | |||
| 758ccbf23b | |||
| 4ad3c217a4 | |||
| ab1fb89ab9 | |||
| ac79935205 | |||
| 2b7c901951 | |||
| ad13928383 | |||
| a3869c2d67 | |||
| 6ebc2f5126 | |||
| d551f86020 | |||
| 7cfaf5777e | |||
| 4f6ef38222 | |||
| d21b232a8e | |||
| 72c9516679 | |||
| fcaad65ccb | |||
| b3d4650275 | |||
| 70c7f2aae9 | |||
| aecc934fad | |||
| a8d36f832e | |||
| c1db122016 | |||
| e9bd6127a4 | |||
| e230e9acec | |||
| 239f95e8a1 | |||
| 17475447a0 | |||
| d2b37b8455 | |||
| cec66cdc44 | |||
| a5c9e95929 | |||
| c5d4fe44e6 | |||
| 8ed883eaff | |||
| 6de338c6ac | |||
| 45576db441 | |||
| bcc83f30d9 | |||
| e5a26215e1 | |||
| cd6cc76a46 | |||
| 613bb789fb | |||
| f1c698388e | |||
| 75605a2bfb | |||
| 58e67d40e2 | |||
| 796b4eee47 | |||
| 0f482e67d2 | |||
| 496ec09905 | |||
| f522dba6a3 | |||
| 25ba8447d9 | |||
| 91b8c01a9d | |||
| 82b88764ba | |||
| 6ea9235ff8 | |||
| 372ad85283 | |||
| c2f5d2fbf3 | |||
| c264413495 | |||
| 8d27082ad0 | |||
| e2b602fd52 | |||
| b00fb88cab | |||
| 1cc281f6fe | |||
| 8f4d00ed26 | |||
| 36e2430a8f | |||
| 28447d0811 | |||
| 6d54cbc785 | |||
| 9d7a4f4b51 | |||
| 3496a2a9bd | |||
| 5072638ec2 | |||
| 8a325a1e0b | |||
| 5eeb2875b4 | |||
| c67dad1617 | |||
| fe61745e24 | |||
| 251ea6d412 | |||
| 55f7337ea4 | |||
| f3b876887e | |||
| 49c578f4dc | |||
| 4190d5fef6 | |||
| 1644e82f25 | |||
| 0bbdc0b841 | |||
| c4a633a5d6 | |||
| 0c6ec3254f | |||
| 13aa55cd3d | |||
| ba4532b38d | |||
| b57627e69b | |||
| 536803853a | |||
| ad0a3c0936 | |||
| 161f5aba3e | |||
| 514d1b7e31 | |||
| 22d7f966c6 | |||
| cfb6353c65 | |||
| a7fed3136e | |||
| 29638cf26e | |||
| ee26e7be65 | |||
| ff0d3dfdcd | |||
| 81a3ae1746 | |||
| 4dfd57fcb4 | |||
| d21e274358 | |||
| 6e97e036b2 | |||
| 7a4a16dd2f | |||
| f1cfa644c5 | |||
| 6a908b09cb | |||
| 86772ab32a | |||
| bd87ca60f7 | |||
| ac1897fd17 | |||
| 2a6f85e2e2 | |||
| ffcaa23511 | |||
| 1d5b8efa23 | |||
| 905273abf2 |
+98
-61
@@ -8,12 +8,32 @@
|
||||
# Use production mode unless you are developing locally.
|
||||
NODE_ENV=production
|
||||
|
||||
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
|
||||
# LOG_LEVEL=info
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# General settings:
|
||||
|
||||
# The title displayed on the info page.
|
||||
# SERVER_TITLE=Coom Tunnel
|
||||
|
||||
# URL for the image displayed on the login page.
|
||||
# If not set, no image will be displayed.
|
||||
# LOGIN_IMAGE_URL=https://example.com/your-logo.png
|
||||
|
||||
# Whether to enable the token-based or password-based login for the main info page.
|
||||
# Defaults to true. Set to false to disable login and make the info page public.
|
||||
# ENABLE_INFO_PAGE_LOGIN=true
|
||||
|
||||
# Authentication mode for the service info page. (token | password)
|
||||
# If 'token', any valid user token is used (requires GATEKEEPER='user_token' mode).
|
||||
# If 'password', SERVICE_INFO_PASSWORD is used.
|
||||
# Defaults to 'token' if ENABLE_INFO_PAGE_LOGIN is true.
|
||||
# SERVICE_INFO_AUTH_MODE=token
|
||||
|
||||
# Password for the service info page if SERVICE_INFO_AUTH_MODE is 'password'.
|
||||
# SERVICE_INFO_PASSWORD=your-service-info-password
|
||||
|
||||
# The route name used to proxy requests to APIs, relative to the Web site root.
|
||||
# PROXY_ENDPOINT_ROUTE=/proxy
|
||||
|
||||
@@ -24,42 +44,76 @@ NODE_ENV=production
|
||||
|
||||
# Max number of context tokens a user can request at once.
|
||||
# Increase this if your proxy allow GPT 32k or 128k context
|
||||
# MAX_CONTEXT_TOKENS_OPENAI=16384
|
||||
# MAX_CONTEXT_TOKENS_OPENAI=32768
|
||||
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
|
||||
|
||||
# Max number of output tokens a user can request at once.
|
||||
# MAX_OUTPUT_TOKENS_OPENAI=400
|
||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
|
||||
# MAX_OUTPUT_TOKENS_OPENAI=1024
|
||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
|
||||
|
||||
# Whether to show the estimated cost of consumed tokens on the info page.
|
||||
# SHOW_TOKEN_COSTS=false
|
||||
|
||||
# Whether to automatically check API keys for validity.
|
||||
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
|
||||
# by default in production mode.
|
||||
# Disabled by default in local development mode, but enabled in production.
|
||||
# CHECK_KEYS=true
|
||||
|
||||
# Which model types users are allowed to access.
|
||||
# The following model families are recognized:
|
||||
|
||||
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | dall-e | claude | claude-opus
|
||||
# | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny | mistral-small
|
||||
# | mistral-medium | mistral-large | aws-claude | aws-claude-opus | gcp-claude
|
||||
# | gcp-claude-opus | azure-turbo | azure-gpt4 | azure-gpt4-32k
|
||||
# | azure-gpt4-turbo | azure-gpt4o | azure-dall-e
|
||||
|
||||
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
|
||||
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
|
||||
# 'azure-dall-e' to the list of allowed model families.
|
||||
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
|
||||
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
|
||||
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
|
||||
# | mistral-small | mistral-medium | mistral-large | aws-claude |
|
||||
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
|
||||
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
|
||||
# | azure-gpt45 | azure-o1-mini | azure-o3-mini | deepseek | xai | o3 | o4-mini | gpt41 | gpt41-mini | gpt41-nano
|
||||
# By default, all models are allowed
|
||||
# To dissalow any, uncomment line below and edit
|
||||
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt45,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o,azure-gpt45,azure-o1-mini,azure-o3-mini,deepseek
|
||||
|
||||
# Which services can be used to process prompts containing images via multimodal
|
||||
# models. The following services are recognized:
|
||||
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
|
||||
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai | xai
|
||||
# Do not enable this feature unless all users are trusted, as you will be liable
|
||||
# for any user-submitted images containing illegal content.
|
||||
# By default, no image services are allowed and image prompts are rejected.
|
||||
# ALLOWED_VISION_SERVICES=
|
||||
|
||||
# Whether prompts should be logged to Google Sheets.
|
||||
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||
# PROMPT_LOGGING=false
|
||||
|
||||
# Specifies the number of proxies or load balancers in front of the server.
|
||||
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
||||
# For any other deployments, please see config.ts as the correct configuration
|
||||
# depends on your setup. Misconfiguring this value can result in problems
|
||||
# accurately tracking IP addresses and enforcing rate limits.
|
||||
# TRUSTED_PROXIES=1
|
||||
|
||||
# Whether cookies should be set without the Secure flag, for hosts that don't
|
||||
# support SSL. True by default in development, false in production.
|
||||
# USE_INSECURE_COOKIES=false
|
||||
|
||||
# Reorganizes requests in the queue according to their token count, placing
|
||||
# larger prompts further back. The penalty is determined by (promptTokens *
|
||||
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
|
||||
# When there is no queue or it is very short, the effect is negligible (this
|
||||
# setting only reorders the queue, it does not artificially delay requests).
|
||||
# TOKENS_PUNISHMENT_FACTOR=0.0
|
||||
|
||||
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
||||
# CAPTCHA_MODE=none
|
||||
# POW_TOKEN_HOURS=24
|
||||
# POW_TOKEN_MAX_IPS=2
|
||||
# POW_DIFFICULTY_LEVEL=low
|
||||
# POW_CHALLENGE_TIMEOUT=30
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Blocking settings:
|
||||
# Allows blocking requests depending on content, referers, or IP addresses.
|
||||
# This is a convenience feature; if you need more robust functionality it is
|
||||
# highly recommended to put this application behind nginx or Cloudflare, as they
|
||||
# will have better performance.
|
||||
|
||||
# IP addresses or CIDR blocks from which requests will be blocked.
|
||||
# IP_BLACKLIST=10.0.0.1/24
|
||||
# URLs from which requests will be blocked.
|
||||
@@ -68,35 +122,13 @@ NODE_ENV=production
|
||||
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
||||
# Destination to redirect blocked requests to.
|
||||
# BLOCK_REDIRECT="https://roblox.com/"
|
||||
|
||||
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
|
||||
# Surround phrases with quotes if they contain commas.
|
||||
# Avoid short or common phrases as this tests the entire prompt.
|
||||
# Comma-separated list of phrases that will be rejected. Surround phrases with
|
||||
# quotes if they contain commas. You can use regular expression tokens.
|
||||
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
|
||||
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
|
||||
# Message to show when requests are rejected.
|
||||
# REJECT_MESSAGE="You can't say that here."
|
||||
|
||||
# Whether prompts should be logged to Google Sheets.
|
||||
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||
# PROMPT_LOGGING=false
|
||||
|
||||
# The port and network interface to listen on.
|
||||
# PORT=7860
|
||||
# BIND_ADDRESS=0.0.0.0
|
||||
|
||||
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
|
||||
# USE_INSECURE_COOKIES=false
|
||||
|
||||
# Detail level of logging. (trace | debug | info | warn | error)
|
||||
# LOG_LEVEL=info
|
||||
|
||||
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
||||
# CAPTCHA_MODE=none
|
||||
# POW_TOKEN_HOURS=24
|
||||
# POW_TOKEN_MAX_IPS=2
|
||||
# POW_DIFFICULTY_LEVEL=low
|
||||
# POW_CHALLENGE_TIMEOUT=30
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Optional settings for user management, access control, and quota enforcement:
|
||||
# See `docs/user-management.md` for more information and setup instructions.
|
||||
@@ -104,8 +136,11 @@ NODE_ENV=production
|
||||
|
||||
# Which access control method to use. (none | proxy_key | user_token)
|
||||
# GATEKEEPER=none
|
||||
# Which persistence method to use. (memory | firebase_rtdb)
|
||||
# Which persistence method to use. (memory | firebase_rtdb | sqlite)
|
||||
# GATEKEEPER_STORE=memory
|
||||
# If using sqlite store, path to the SQLite database file for user data.
|
||||
# Defaults to data/user-store.sqlite in the project directory.
|
||||
# SQLITE_USER_STORE_PATH=data/user-store.sqlite3
|
||||
|
||||
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
|
||||
# MAX_IPS_PER_USER=0
|
||||
@@ -116,15 +151,8 @@ NODE_ENV=production
|
||||
# ALLOW_NICKNAME_CHANGES=true
|
||||
|
||||
# Default token quotas for each model family. (0 for unlimited)
|
||||
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value, replacing dashes with underscores.
|
||||
# TOKEN_QUOTA_TURBO=0
|
||||
# TOKEN_QUOTA_GPT4=0
|
||||
# TOKEN_QUOTA_GPT4_32K=0
|
||||
# TOKEN_QUOTA_GPT4_TURBO=0
|
||||
# TOKEN_QUOTA_CLAUDE=0
|
||||
# TOKEN_QUOTA_GEMINI_PRO=0
|
||||
# TOKEN_QUOTA_AWS_CLAUDE=0
|
||||
# TOKEN_QUOTA_GCP_CLAUDE=0
|
||||
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
|
||||
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
|
||||
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
|
||||
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
|
||||
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
|
||||
@@ -135,12 +163,22 @@ NODE_ENV=production
|
||||
# Leave unset to never automatically refresh quotas.
|
||||
# QUOTA_REFRESH_PERIOD=daily
|
||||
|
||||
# Specifies the number of proxies or load balancers in front of the server.
|
||||
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
||||
# For any other deployments, please see config.ts as the correct configuration
|
||||
# depends on your setup. Misconfiguring this value can result in problems
|
||||
# accurately tracking IP addresses and enforcing rate limits.
|
||||
# TRUSTED_PROXIES=1
|
||||
# -------------------------------------------------------------------------------
|
||||
# HTTP agent settings:
|
||||
# If you need to change how the proxy makes requests to other servers, such
|
||||
# as when checking keys or forwarding users' requests to external services,
|
||||
# you can configure an alternative HTTP agent. Otherwise the default OS settings
|
||||
# will be used.
|
||||
|
||||
# The name of the network interface to use. The first external IPv4 address
|
||||
# belonging to this interface will be used for outgoing requests.
|
||||
# HTTP_AGENT_INTERFACE=enp0s3
|
||||
|
||||
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
|
||||
# Note that if your proxy server issues a self-signed certificate, you may need
|
||||
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
|
||||
# that variable in your environment, not in this file.
|
||||
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Secrets and keys:
|
||||
@@ -164,11 +202,10 @@ GCP_CREDENTIALS=project-id:client-email:region:private-key
|
||||
|
||||
# With user_token gatekeeper, the admin password used to manage users.
|
||||
# ADMIN_KEY=your-very-secret-key
|
||||
# To restrict access to the admin interface to specific IP addresses, set the
|
||||
# ADMIN_WHITELIST environment variable to a comma-separated list of CIDR blocks.
|
||||
# Restrict access to the admin interface to specific IP addresses, specified
|
||||
# as a comma-separated list of CIDR ranges.
|
||||
# ADMIN_WHITELIST=0.0.0.0/0
|
||||
|
||||
|
||||
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
|
||||
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
||||
|
||||
+1
-1
@@ -7,5 +7,5 @@
|
||||
build
|
||||
greeting.md
|
||||
node_modules
|
||||
|
||||
.windsurfrules
|
||||
http-client.private.env.json
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
You are a Senior Full Stack Developer and an Expert in ReactJS, NextJS, JavaScript, TypeScript, HTML, CSS and modern UI/UX frameworks (e.g., TailwindCSS, Shadcn, Radix). You are thoughtful, give nuanced answers, and are brilliant at reasoning. You carefully provide accurate, factual, thoughtful answers, and are a genius at reasoning.
|
||||
|
||||
- Follow the user’s requirements carefully & to the letter.
|
||||
- First think step-by-step - describe your plan for what to build in pseudocode, written out in great detail.
|
||||
- Confirm, then write code!
|
||||
- Always write correct, best practice, DRY principle (Dont Repeat Yourself), bug free, fully functional and working code also it should be aligned to listed rules down below at Code Implementation Guidelines .
|
||||
- Focus on easy and readability code, over being performant.
|
||||
- Fully implement all requested functionality.
|
||||
- Leave NO todo’s, placeholders or missing pieces.
|
||||
- Ensure code is complete! Verify thoroughly finalised.
|
||||
- Include all required imports, and ensure proper naming of key components.
|
||||
- Be concise Minimize any other prose.
|
||||
- If you think there might not be a correct answer, you say so.
|
||||
- If you do not know the answer, say so, instead of guessing.
|
||||
|
||||
### Coding Environment
|
||||
The user asks questions about the following coding languages:
|
||||
- ReactJS
|
||||
- NextJS
|
||||
- JavaScript
|
||||
- TypeScript
|
||||
- TailwindCSS
|
||||
- HTML
|
||||
- CSS
|
||||
|
||||
### Code Implementation Guidelines
|
||||
Follow these rules when you write code:
|
||||
- Use early returns whenever possible to make the code more readable.
|
||||
- Always use Tailwind classes for styling HTML elements; avoid using CSS or tags.
|
||||
- Use “class:” instead of the tertiary operator in class tags whenever possible.
|
||||
- Use descriptive variable and function/const names. Also, event functions should be named with a “handle” prefix, like “handleClick” for onClick and “handleKeyDown” for onKeyDown.
|
||||
- Implement accessibility features on elements. For example, a tag should have a tabindex=“0”, aria-label, on:click, and on:keydown, and similar attributes.
|
||||
- Use consts instead of functions, for example, “const toggle = () =>”. Also, define a type if possible.
|
||||
@@ -0,0 +1,321 @@
|
||||
# Project Codebase Guide
|
||||
|
||||
This document serves as a guide and index for the project codebase, designed to help developers and AI agents quickly understand its structure, components, and how to contribute.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Project Overview](#project-overview)
|
||||
2. [Directory Structure](#directory-structure)
|
||||
3. [Core Components](#core-components)
|
||||
* [Configuration (`src/config.ts`)](#configuration)
|
||||
* [Server Entry Point (`src/server.ts`)](#server-entry-point)
|
||||
* [Proxy Layer (`src/proxy/`)](#proxy-layer)
|
||||
* [User Management (`src/user/`)](#user-management)
|
||||
* [Admin Interface (`src/admin/`)](#admin-interface)
|
||||
* [Shared Utilities (`src/shared/`)](#shared-utilities)
|
||||
4. [Proxy Functionality](#proxy-functionality)
|
||||
* [Routing (`src/proxy/routes.ts`)](#proxy-routing)
|
||||
* [Supported Models & Providers](#supported-models--providers)
|
||||
* [Middleware (`src/proxy/middleware/`)](#proxy-middleware)
|
||||
* [Adding New Models](#adding-new-models)
|
||||
* [Adding New APIs/Providers](#adding-new-apisproviders)
|
||||
5. [Model Management](#model-management)
|
||||
* [Model Family Definitions](#model-family-definitions)
|
||||
* [Adding OpenAI Models](#adding-openai-models)
|
||||
* [Model Mapping & Routing](#model-mapping--routing)
|
||||
* [Service Information](#service-information)
|
||||
* [Step-by-Step Guide for Adding a New Model](#step-by-step-guide-for-adding-a-new-model)
|
||||
* [Model Patterns and Versioning](#model-patterns-and-versioning)
|
||||
* [Response Format Handling](#response-format-handling)
|
||||
6. [Key Management](#key-management)
|
||||
* [Key Pool System](#key-pool-system)
|
||||
* [Provider-Specific Key Management](#provider-specific-key-management)
|
||||
* [Key Rotation and Health Checks](#key-rotation-and-health-checks)
|
||||
7. [Data Management](#data-management)
|
||||
* [Database (`src/shared/database/`)](#database)
|
||||
* [File Storage (`src/shared/file-storage/`)](#file-storage)
|
||||
8. [Authentication & Authorization](#authentication--authorization)
|
||||
9. [Logging & Monitoring](#logging--monitoring)
|
||||
10. [Deployment](#deployment)
|
||||
11. [Contributing](#contributing)
|
||||
|
||||
## Project Overview
|
||||
|
||||
This project provides a proxy layer for various Large Language Models (LLMs) and potentially other AI APIs. It aims to offer a unified interface, manage API keys securely, handle rate limiting, usage tracking, and potentially add features like response caching or prompt modification.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
.
|
||||
├── .env.example # Example environment variables
|
||||
├── .gitattributes # Git attributes
|
||||
├── .gitignore # Git ignore rules
|
||||
├── .husky/ # Git hooks
|
||||
├── .prettierrc # Code formatting rules
|
||||
├── CODEBASE_GUIDE.md # This file
|
||||
├── README.md # Project README
|
||||
├── data/ # Data files (e.g., SQLite DB)
|
||||
├── docker/ # Docker configuration
|
||||
├── docs/ # Documentation files
|
||||
├── http-client.env.json # HTTP client environment
|
||||
├── package-lock.json # NPM lock file
|
||||
├── package.json # Project dependencies and scripts
|
||||
├── patches/ # Patches for dependencies
|
||||
├── public/ # Static assets served by the web server
|
||||
├── render.yaml # Render deployment configuration
|
||||
├── scripts/ # Utility scripts
|
||||
├── src/ # Source code
|
||||
│ ├── admin/ # Admin interface logic
|
||||
│ ├── config.ts # Application configuration
|
||||
│ ├── info-page.ts # Logic for the info page
|
||||
│ ├── logger.ts # Logging setup
|
||||
│ ├── proxy/ # Core proxy logic for different providers
|
||||
│ ├── server.ts # Express server setup and main entry point
|
||||
│ ├── service-info.ts # Service information logic
|
||||
│ ├── shared/ # Shared utilities, types, and modules
|
||||
│ └── user/ # User management logic
|
||||
├── tsconfig.json # TypeScript configuration
|
||||
```
|
||||
|
||||
## Core Components
|
||||
|
||||
### Configuration (`src/config.ts`)
|
||||
|
||||
* Loads environment variables and defines application settings.
|
||||
* Contains configuration for database connections, API keys (placeholders/retrieval methods), logging levels, rate limits, etc.
|
||||
* Uses `dotenv` and potentially a schema validation library (like Zod) to ensure required variables are present.
|
||||
|
||||
### Server Entry Point (`src/server.ts`)
|
||||
|
||||
* Initializes the Express application.
|
||||
* Sets up core middleware (e.g., body parsing, CORS, logging).
|
||||
* Mounts routers for different parts of the application (admin, user, proxy).
|
||||
* Starts the HTTP server.
|
||||
|
||||
### Proxy Layer (`src/proxy/`)
|
||||
|
||||
* The heart of the application, handling requests to downstream AI APIs.
|
||||
* Contains individual modules for each supported provider (e.g., `openai.ts`, `anthropic.ts`).
|
||||
* Handles request transformation, authentication against the target API, and response handling.
|
||||
* Uses middleware for common proxy tasks.
|
||||
|
||||
### User Management (`src/user/`)
|
||||
|
||||
* Handles user registration, login, session management, and potentially API key generation/management for end-users.
|
||||
* Likely interacts with the database (`src/shared/database/`).
|
||||
|
||||
### Admin Interface (`src/admin/`)
|
||||
|
||||
* Provides an interface for administrators to manage users, monitor usage, configure settings, etc.
|
||||
* May have its own set of routes and views.
|
||||
|
||||
### Shared Utilities (`src/shared/`)
|
||||
|
||||
* Contains reusable code across different modules.
|
||||
* `api-schemas/`: Zod schemas for API request/response validation.
|
||||
* `database/`: Database connection, schemas (e.g., Prisma), and query logic.
|
||||
* `errors.ts`: Custom error classes.
|
||||
* `key-management/`: Logic for managing API keys (if applicable).
|
||||
* `models.ts`: Core data models/types used throughout the application.
|
||||
* `prompt-logging/`: Logic for logging prompts and responses.
|
||||
* `tokenization/`: Utilities for counting tokens.
|
||||
* `utils.ts`: General utility functions.
|
||||
|
||||
## Proxy Functionality
|
||||
|
||||
### Proxy Routing (`src/proxy/routes.ts`)
|
||||
|
||||
* Defines the API endpoints for the proxy service (e.g., `/v1/chat/completions`).
|
||||
* Maps incoming requests to the appropriate provider-specific handler based on the request path, headers, or body content (e.g., model requested).
|
||||
* Applies relevant middleware (authentication, rate limiting, queuing, etc.).
|
||||
|
||||
### Supported Models & Providers
|
||||
|
||||
* **OpenAI:** Handled in `src/proxy/openai.ts`. Supports models like GPT-4, GPT-3.5-turbo, as well as o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini). Handles chat completions and potentially image generation (`src/proxy/openai-image.ts`).
|
||||
* **Anthropic:** Handled in `src/proxy/anthropic.ts`. Supports Claude models. May use AWS Bedrock (`src/proxy/aws-claude.ts`) or Anthropic's direct API.
|
||||
* **Google AI / Vertex AI:** Handled in `src/proxy/google-ai.ts` and `src/proxy/gcp.ts`. Supports Gemini models (gemini-flash, gemini-pro, gemini-ultra).
|
||||
* **Mistral AI:** Handled in `src/proxy/mistral-ai.ts`. Supports Mistral models via their API or potentially AWS (`src/proxy/aws-mistral.ts`).
|
||||
* **Azure OpenAI:** Handled in `src/proxy/azure.ts`. Provides an alternative endpoint for OpenAI models via Azure.
|
||||
* **Deepseek:** Handled in `src/proxy/deepseek.ts`.
|
||||
* **Xai:** Handled in `src/proxy/xai.ts`.
|
||||
* **AWS (General):** `src/proxy/aws.ts` might contain shared AWS logic (e.g., authentication).
|
||||
|
||||
### Middleware (`src/proxy/middleware/`)
|
||||
|
||||
* **`gatekeeper.ts`:** Likely handles initial request validation, authentication, and authorization checks before hitting provider logic. Checks origin (`check-origin.ts`), potentially custom tokens (`check-risu-token.ts`).
|
||||
* **`rate-limit.ts`:** Implements rate limiting logic, potentially per-user or per-key.
|
||||
* **`queue.ts`:** Manages request queuing, possibly to handle concurrency limits or prioritize requests.
|
||||
|
||||
### Adding New Models
|
||||
|
||||
1. **Identify the Provider:** Determine if the new model belongs to an existing provider (e.g., a new OpenAI model) or a new one.
|
||||
2. **Update Provider Logic (if existing):**
|
||||
* Modify the relevant provider file (e.g., `src/proxy/openai.ts`).
|
||||
* Update model lists or logic that selects/validates models.
|
||||
* Adjust any request/response transformations if the new model has a different API schema.
|
||||
* Update model information in shared files like `src/shared/models.ts` if necessary.
|
||||
3. **Update Routing (if necessary):** Modify `src/proxy/routes.ts` if the new model requires a different endpoint or routing logic.
|
||||
4. **Configuration:** Add any new API keys or configuration parameters to `.env.example` and `src/config.ts`.
|
||||
5. **Testing:** Add unit or integration tests for the new model.
|
||||
|
||||
### Adding New APIs/Providers
|
||||
|
||||
1. **Create Provider Module:** Create a new file in `src/proxy/` (e.g., `src/proxy/new-provider.ts`).
|
||||
2. **Implement Handler:**
|
||||
* Write the core logic to handle requests for this provider. This typically involves:
|
||||
* Receiving the standardized request from the router.
|
||||
* Transforming the request into the format expected by the new provider's API.
|
||||
* Authenticating with the new provider's API (fetching keys from config).
|
||||
* Making the API call (consider using a robust HTTP client like `axios` or `node-fetch`).
|
||||
* Handling streaming responses if applicable (using helpers from `src/shared/streaming.ts`).
|
||||
* Transforming the provider's response back into a standardized format.
|
||||
* Handling errors gracefully.
|
||||
3. **Add Routing:**
|
||||
* Import the new handler in `src/proxy/routes.ts`.
|
||||
* Add new routes or modify existing routing logic to direct requests to the new handler based on model name, path, or other criteria.
|
||||
* Apply necessary middleware (gatekeeper, rate limiter, queue).
|
||||
4. **Create Key Management:**
|
||||
* Create a new directory in `src/shared/key-management/` for the provider.
|
||||
* Implement provider-specific key management (key checkers, token counters).
|
||||
5. **Configuration:**
|
||||
* Add configuration variables (API keys, base URLs) to `.env.example` and `src/config.ts`.
|
||||
* Update `src/config.ts` to load and validate the new variables.
|
||||
6. **Model Information:** Add details about the new provider and its models to `src/shared/models.ts` or similar shared locations.
|
||||
7. **Tokenization (if applicable):** If token counting is needed, add or update tokenization logic in `src/shared/tokenization/`.
|
||||
8. **Testing:** Implement thorough tests for the new provider integration.
|
||||
9. **Documentation:** Update this guide and any other relevant documentation.
|
||||
|
||||
## Model Management
|
||||
|
||||
### Model Family Definitions
|
||||
|
||||
* **Model Family Definitions:** The project uses a family-based approach to group similar models together. These are defined in `src/shared/models.ts`.
|
||||
* Each model is part of a model family (e.g., "gpt4", "claude", "gemini-pro") which helps with routing, key management, and feature support.
|
||||
* The `MODEL_FAMILIES` array contains all supported model families, and the `MODEL_FAMILY_SERVICE` mapping connects each family to its provider service.
|
||||
|
||||
### Adding OpenAI Models
|
||||
|
||||
When adding new OpenAI models to the codebase, there are several files that must be updated:
|
||||
|
||||
1. **Update Model Types (`src/shared/models.ts`):**
|
||||
- Add the new model to the `OpenAIModelFamily` type
|
||||
- Add the model to the `MODEL_FAMILIES` array
|
||||
- Add the Azure variants for the model if applicable
|
||||
- Add the model to `MODEL_FAMILY_SERVICE` mapping
|
||||
- Update `OPENAI_MODEL_FAMILY_MAP` with regex patterns to match the model names
|
||||
|
||||
2. **Update Context Size Limits (`src/proxy/middleware/request/preprocessors/validate-context-size.ts`):**
|
||||
- Add regex matching for the new model
|
||||
- Set the appropriate context token limit for the model
|
||||
|
||||
3. **Update Token Cost Tracking (`src/shared/stats.ts`):**
|
||||
- Add pricing information for the new model in the `getTokenCostUsd` function
|
||||
- Include both input and output prices in the comments for clarity
|
||||
|
||||
4. **Update Feature Support Checks (`src/proxy/openai.ts`):**
|
||||
- If the model supports special features like the reasoning API parameter (`isO1Model` function), update the appropriate function
|
||||
- For model feature detection, prefer using regex patterns over explicit lists when possible, as this handles date-stamped versions better
|
||||
|
||||
5. **Update Display Names (`src/info-page.ts`):**
|
||||
- Add friendly display names for the new models in the `MODEL_FAMILY_FRIENDLY_NAME` object
|
||||
|
||||
6. **Update Key Management Provider Files:**
|
||||
- For OpenAI keys in `src/shared/key-management/openai/provider.ts`, add token counters for the new models
|
||||
- For Azure OpenAI keys in `src/shared/key-management/azure/provider.ts`, add token counters for the Azure versions
|
||||
|
||||
### Model Patterns and Versioning
|
||||
|
||||
The codebase handles several patterns for model naming and versioning:
|
||||
|
||||
1. **Date-stamped Models:** Many models include date stamps (e.g., `gpt-4-0125-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$`.
|
||||
|
||||
2. **O-Series Models:** OpenAI's o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini) follow a different naming convention. The codebase handles these with dedicated model families and regex patterns.
|
||||
|
||||
3. **Preview/Non-Preview Variants:** Some models have preview variants (e.g., `gpt-4.5-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$`.
|
||||
|
||||
When adding new models, try to follow the existing patterns for consistency.
|
||||
|
||||
### Response Format Handling
|
||||
|
||||
The codebase includes special handling for different API response formats:
|
||||
|
||||
1. **Chat vs. Text Completions:** There's transformation logic in `openai.ts` to convert between chat completions and text completions formats (`transformTurboInstructResponse`).
|
||||
|
||||
2. **Newer API Formats:** For newer APIs like the Responses API, there's transformation logic (`transformResponsesApiResponse`) to convert responses to a format compatible with existing clients.
|
||||
|
||||
When adding support for new models or APIs, consider whether transformation is needed to maintain compatibility with existing clients.
|
||||
|
||||
## Key Management
|
||||
|
||||
### Key Pool System
|
||||
|
||||
The project uses a sophisticated key pool system (`src/shared/key-management/key-pool.ts`) to manage API keys for different providers. Key features include:
|
||||
|
||||
* **Key Selection:** The system selects the appropriate key based on model family, region preferences, and other criteria.
|
||||
* **Rotation:** Keys are rotated to distribute usage and avoid hitting rate limits.
|
||||
* **Health Checks:** Keys are checked periodically to ensure they're still valid and within rate limits.
|
||||
|
||||
### Provider-Specific Key Management
|
||||
|
||||
Each provider has its own key management module in `src/shared/key-management/`:
|
||||
|
||||
* **Key Checkers:** Each provider implements key checkers to validate keys and check their status.
|
||||
* **Token Counters:** Providers implement token counting logic specific to their pricing model.
|
||||
* **Models Support:** Keys are associated with specific model families they support.
|
||||
|
||||
When adding a new model or provider, you'll need to update or create the appropriate key management files.
|
||||
|
||||
### Key Rotation and Health Checks
|
||||
|
||||
The key pool system includes logic for:
|
||||
|
||||
* **Rotation Strategy:** Keys are selected based on a prioritization strategy (`prioritize-keys.ts`).
|
||||
* **Disabling Unhealthy Keys:** Keys that fail health checks are temporarily disabled.
|
||||
* **Rate Limit Awareness:** The system tracks usage to avoid hitting provider rate limits.
|
||||
|
||||
## Data Management
|
||||
|
||||
### Database (`src/shared/database/`)
|
||||
|
||||
* Likely uses Prisma or a similar ORM.
|
||||
* Defines database schemas (e.g., for users, API keys, usage logs).
|
||||
* Provides functions for interacting with the database.
|
||||
* Configuration is managed in `src/config.ts`.
|
||||
|
||||
### File Storage (`src/shared/file-storage/`)
|
||||
|
||||
* May be used for storing logs, cached data, or user-uploaded files.
|
||||
* Could integrate with local storage or cloud providers (e.g., S3, GCS).
|
||||
|
||||
## Authentication & Authorization
|
||||
|
||||
* **User Auth:** Handled in `src/user/` potentially using sessions (`src/shared/with-session.ts`) or JWTs.
|
||||
* **Proxy Auth:** The `gatekeeper.ts` middleware likely verifies incoming requests to the proxy endpoints. This could involve checking:
|
||||
* Custom API keys stored in the database (`src/shared/database/`).
|
||||
* Specific tokens (`check-risu-token.ts`).
|
||||
* HMAC signatures (`src/shared/hmac-signing.ts`).
|
||||
* Origin checks (`check-origin.ts`).
|
||||
* **Downstream Auth:** Each provider module (`src/proxy/*.ts`) handles authentication with the actual AI service API using keys from the configuration.
|
||||
|
||||
## Logging & Monitoring
|
||||
|
||||
* **Logging:** Configured in `src/logger.ts`, likely using a library like `pino` or `winston`. Logs requests, errors, and important events.
|
||||
* **Prompt Logging:** Specific logic for logging prompts and responses might exist in `src/shared/prompt-logging/`.
|
||||
* **Stats/Monitoring:** `src/shared/stats.ts` might handle collecting and exposing application metrics.
|
||||
|
||||
## Deployment
|
||||
|
||||
* **Docker:** The project likely includes Docker configuration for containerized deployment.
|
||||
* **Render:** The `render.yaml` file suggests the project is or can be deployed on Render.
|
||||
* **Environment Variables:** The `.env.example` file provides a template for required environment variables in production.
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to this project:
|
||||
|
||||
1. **Follow Coding Standards:** Use the established patterns and standards in the codebase. The `.prettierrc` file defines code formatting rules.
|
||||
2. **Update Documentation:** Keep this guide updated when adding new components or changing existing ones.
|
||||
3. **Add Tests:** Ensure your changes are tested appropriately.
|
||||
4. **Update Configuration:** If your changes require new environment variables, update `.env.example`.
|
||||
|
||||
*This guide provides a high-level overview. For detailed information, refer to the specific source code files.*
|
||||
@@ -1,15 +1,20 @@
|
||||
# OAI Reverse Proxy
|
||||
|
||||
# OAI Reverse Proxy - just a shitty fork
|
||||
Reverse proxy server for various LLM APIs.
|
||||
|
||||
### Table of Contents
|
||||
- [What is this?](#what-is-this)
|
||||
- [Features](#features)
|
||||
- [Usage Instructions](#usage-instructions)
|
||||
- [Self-hosting](#self-hosting)
|
||||
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
|
||||
- [Render (outdated, not advised)](#render-outdated-not-advised)
|
||||
- [Local Development](#local-development)
|
||||
<!-- TOC -->
|
||||
* [OAI Reverse Proxy](#oai-reverse-proxy)
|
||||
* [Table of Contents](#table-of-contents)
|
||||
* [What is this?](#what-is-this)
|
||||
* [Features](#features)
|
||||
* [Usage Instructions](#usage-instructions)
|
||||
* [Personal Use (single-user)](#personal-use-single-user)
|
||||
* [Updating](#updating)
|
||||
* [Local Development](#local-development)
|
||||
* [Self-hosting](#self-hosting)
|
||||
* [Building](#building)
|
||||
* [Forking](#forking)
|
||||
<!-- TOC -->
|
||||
|
||||
## What is this?
|
||||
This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
@@ -18,7 +23,7 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
- [x] Support for multiple APIs
|
||||
- [x] [OpenAI](https://openai.com/)
|
||||
- [x] [Anthropic](https://www.anthropic.com/)
|
||||
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
||||
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/) (Claude4 is fucked, dont care)
|
||||
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
|
||||
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
|
||||
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
|
||||
@@ -28,40 +33,40 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
- [x] Simple role-based permissions
|
||||
- [x] Per-model token quotas
|
||||
- [x] Temporary user accounts
|
||||
- [x] Prompt and completion logging
|
||||
- [x] Event audit logging
|
||||
- [x] Optional full logging of prompts and completions
|
||||
- [x] Abuse detection and prevention
|
||||
|
||||
---
|
||||
- [x] IP address and user token model invocation rate limits
|
||||
- [x] IP blacklists
|
||||
- [x] Proof-of-work challenge for access by anonymous users
|
||||
|
||||
## Usage Instructions
|
||||
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
|
||||
|
||||
### Personal Use (single-user)
|
||||
If you just want to run the proxy server to use yourself without hosting it for others:
|
||||
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
|
||||
2. Clone this repository
|
||||
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
||||
4. Install dependencies with `npm install`
|
||||
5. Run `npm run build`
|
||||
6. Run `npm start`
|
||||
|
||||
#### Updating
|
||||
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
|
||||
|
||||
#### Local Development
|
||||
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
|
||||
|
||||
### Self-hosting
|
||||
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
|
||||
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
|
||||
|
||||
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
|
||||
|
||||
### Huggingface (outdated, not advised)
|
||||
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
|
||||
|
||||
### Render (outdated, not advised)
|
||||
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
|
||||
|
||||
## Local Development
|
||||
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
|
||||
|
||||
1. Clone the repo
|
||||
2. Install dependencies with `npm install`
|
||||
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
||||
4. Start the server in development mode with `npm run start:dev`.
|
||||
|
||||
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
|
||||
|
||||
## Building
|
||||
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
|
||||
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
|
||||
|
||||
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
|
||||
|
||||
## Forking
|
||||
|
||||
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
|
||||
|
||||
@@ -17,9 +17,8 @@ ARG GREETING_URL
|
||||
RUN if [ -n "$GREETING_URL" ]; then \
|
||||
curl -sL "$GREETING_URL" > greeting.md; \
|
||||
fi
|
||||
COPY package*.json greeting.md* ./
|
||||
RUN npm install
|
||||
COPY . .
|
||||
RUN npm install
|
||||
RUN npm run build
|
||||
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
|
||||
EXPOSE 10000
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Deploy to Render.com
|
||||
|
||||
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
||||
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
||||
|
||||
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ Several of these features require you to set secrets in your environment. If usi
|
||||
- [Memory](#memory)
|
||||
- [Firebase Realtime Database](#firebase-realtime-database)
|
||||
- [Firebase setup instructions](#firebase-setup-instructions)
|
||||
- [SQLite Database](#sqlite-database)
|
||||
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
|
||||
|
||||
## No user management (`GATEKEEPER=none`)
|
||||
@@ -63,6 +64,17 @@ To use Firebase Realtime Database to persist user data, set the following enviro
|
||||
|
||||
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
|
||||
|
||||
### SQLite Database
|
||||
|
||||
To use a local SQLite database file to persist user data, set the following environment variables:
|
||||
|
||||
- `GATEKEEPER_STORE`: Set this to `sqlite`.
|
||||
- `SQLITE_USER_STORE_PATH` (Optional): Specifies the path to the SQLite database file.
|
||||
- If not set, it defaults to `data/user-store.sqlite` within the project directory.
|
||||
- Ensure that the directory where the SQLite file will be created (e.g., the `data/` directory) is writable by the application process.
|
||||
|
||||
Using SQLite provides a simple way to persist user data locally without relying on external services. User data will be saved to the specified file and will be available across server restarts.
|
||||
|
||||
## Whitelisting admin IP addresses
|
||||
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
|
||||
|
||||
|
||||
Generated
+1440
-440
File diff suppressed because it is too large
Load Diff
+15
-10
@@ -5,10 +5,11 @@
|
||||
"scripts": {
|
||||
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
|
||||
"database:migrate": "ts-node scripts/migrate.ts",
|
||||
"postinstall": "patch-package",
|
||||
"prepare": "husky install",
|
||||
"start": "node build/server.js",
|
||||
"start": "node --trace-deprecation --trace-warnings build/server.js",
|
||||
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
||||
"start:replit": "tsc && node build/server.js",
|
||||
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
|
||||
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
||||
"type-check": "tsc --noEmit"
|
||||
},
|
||||
@@ -36,24 +37,28 @@
|
||||
"csrf-csrf": "^2.3.0",
|
||||
"dotenv": "^16.3.1",
|
||||
"ejs": "^3.1.10",
|
||||
"express": "^4.18.2",
|
||||
"express": "^4.19.3",
|
||||
"express-session": "^1.17.3",
|
||||
"firebase-admin": "^12.3.1",
|
||||
"firebase-admin": "^12.5.0",
|
||||
"glob": "^10.3.12",
|
||||
"googleapis": "^122.0.0",
|
||||
"http-proxy-middleware": "^3.0.0-beta.1",
|
||||
"http-proxy": "1.18.1",
|
||||
"http-proxy-middleware": "^3.0.2",
|
||||
"ipaddr.js": "^2.1.0",
|
||||
"memorystore": "^1.6.7",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"node-schedule": "^2.1.1",
|
||||
"patch-package": "^8.0.0",
|
||||
"pino": "^8.11.0",
|
||||
"pino-http": "^8.3.3",
|
||||
"proxy-agent": "^6.4.0",
|
||||
"sanitize-html": "^2.13.0",
|
||||
"sharp": "^0.32.6",
|
||||
"sharp": "^0.34.2",
|
||||
"showdown": "^2.1.0",
|
||||
"source-map-support": "^0.5.21",
|
||||
"stream-json": "^1.8.0",
|
||||
"tiktoken": "^1.0.10",
|
||||
"tinyws": "^0.1.0",
|
||||
"uuid": "^9.0.0",
|
||||
"zlib": "^1.0.5",
|
||||
"zod": "^3.22.3",
|
||||
@@ -73,7 +78,7 @@
|
||||
"@types/stream-json": "^1.7.7",
|
||||
"@types/uuid": "^9.0.1",
|
||||
"concurrently": "^8.0.1",
|
||||
"esbuild": "^0.17.16",
|
||||
"esbuild": "^0.25.5",
|
||||
"esbuild-register": "^3.4.2",
|
||||
"husky": "^8.0.3",
|
||||
"nodemon": "^3.0.1",
|
||||
@@ -84,8 +89,8 @@
|
||||
"typescript": "^5.4.2"
|
||||
},
|
||||
"overrides": {
|
||||
"braces": "^3.0.3",
|
||||
"fast-xml-parser": "^4.4.1",
|
||||
"follow-redirects": "^1.15.4"
|
||||
"node-fetch@2.x": {
|
||||
"whatwg-url": "14.x"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
# Patches
|
||||
Contains monkey patches for certain packages, applied using `patch-package`.
|
||||
|
||||
## `http-proxy+1.18.1.patch`
|
||||
Modifies the `http-proxy` package to work around an incompatibility with
|
||||
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
|
||||
when `socks-proxy-agent` is used instead of a generic http.Agent.
|
||||
|
||||
Modification involves adjusting the `buffer` property on ProxyServer's `options`
|
||||
object to be a function that returns a stream instead of a stream itself. This
|
||||
allows us to give it a function which produces a new Readable from the already-
|
||||
parsed request body.
|
||||
|
||||
With the old implementation we would need to create an entirely new ProxyServer
|
||||
instance for each request, which is not ideal under heavy load.
|
||||
|
||||
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
|
||||
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
|
||||
|
||||
### See also
|
||||
https://github.com/chimurai/http-proxy-middleware/issues/40
|
||||
https://github.com/chimurai/http-proxy-middleware/issues/299
|
||||
https://github.com/http-party/node-http-proxy/pull/1027
|
||||
@@ -0,0 +1,13 @@
|
||||
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
index 7ae7355..c825c27 100644
|
||||
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
@@ -167,7 +167,7 @@ module.exports = {
|
||||
}
|
||||
}
|
||||
|
||||
- (options.buffer || req).pipe(proxyReq);
|
||||
+ (options.buffer(req) || req).pipe(proxyReq);
|
||||
|
||||
proxyReq.on('response', function(proxyRes) {
|
||||
if(server) { server.emit('proxyRes', proxyRes, req, res); }
|
||||
@@ -30,7 +30,6 @@ self.onmessage = async (event) => {
|
||||
nonce = data.nonce;
|
||||
|
||||
const c = data.challenge;
|
||||
// decode salt to Uint8Array
|
||||
const salt = new Uint8Array(c.s.length / 2);
|
||||
for (let i = 0; i < c.s.length; i += 2) {
|
||||
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
|
||||
@@ -99,7 +98,7 @@ const solve = async () => {
|
||||
self.postMessage({ type: "solved", nonce: solution.nonce });
|
||||
active = false;
|
||||
} else {
|
||||
if (Date.now() - lastNotify > 1000) {
|
||||
if (Date.now() - lastNotify >= 500) {
|
||||
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
|
||||
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
|
||||
lastNotify = Date.now();
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
const axios = require("axios");
|
||||
|
||||
function randomInteger(max) {
|
||||
return Math.floor(Math.random() * max + 1);
|
||||
}
|
||||
|
||||
async function testQueue() {
|
||||
const requests = Array(10).fill(undefined).map(async function() {
|
||||
const maxTokens = randomInteger(2000);
|
||||
|
||||
const headers = {
|
||||
"Authorization": "Bearer test",
|
||||
"Content-Type": "application/json",
|
||||
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
|
||||
};
|
||||
|
||||
const payload = {
|
||||
model: "gpt-4o-mini-2024-07-18",
|
||||
max_tokens: 20 + maxTokens,
|
||||
stream: false,
|
||||
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
|
||||
temperature: 0,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await axios.post(
|
||||
"http://localhost:7860/proxy/openai/v1/chat/completions",
|
||||
payload,
|
||||
{ headers }
|
||||
);
|
||||
|
||||
if (response.status !== 200) {
|
||||
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
|
||||
return;
|
||||
}
|
||||
|
||||
const content = response.data.choices[0].message.content;
|
||||
|
||||
console.log(
|
||||
`Request ${maxTokens} `,
|
||||
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
|
||||
);
|
||||
} catch (error) {
|
||||
const msg = error.response;
|
||||
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(requests);
|
||||
console.log("All requests finished");
|
||||
}
|
||||
|
||||
testQueue();
|
||||
@@ -13,6 +13,7 @@ import { eventsApiRouter } from "./api/events";
|
||||
import { usersApiRouter } from "./api/users";
|
||||
import { usersWebRouter as webRouter } from "./web/manage";
|
||||
import { logger } from "../logger";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
|
||||
const adminRouter = Router();
|
||||
|
||||
@@ -36,6 +37,43 @@ adminRouter.use(injectCsrfToken);
|
||||
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
|
||||
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
|
||||
|
||||
// Special endpoint to validate organization verification status for all OpenAI keys
|
||||
// This checks both gpt-image-1 and o3 streaming access which require verified organizations
|
||||
adminRouter.post("/validate-gpt-image-keys", authorize({ via: "header" }), async (req, res) => {
|
||||
try {
|
||||
logger.info("Manual validation of organization verification status initiated");
|
||||
|
||||
// Use the specialized validation function that tests each key's organization verification
|
||||
// status using o3 streaming and waits for the results
|
||||
const results = await keyPool.validateGptImageAccess();
|
||||
|
||||
logger.info({
|
||||
total: results.total,
|
||||
verified: results.verified.length,
|
||||
removed: results.removed.length,
|
||||
errors: results.errors.length
|
||||
}, "Manual organization verification check completed");
|
||||
|
||||
return res.json({
|
||||
success: true,
|
||||
message: "Organization verification check completed",
|
||||
results: {
|
||||
total: results.total,
|
||||
verified: results.verified.length,
|
||||
removed: results.removed.length,
|
||||
errors: results.errors.length,
|
||||
// Only include hashes, not full keys
|
||||
verified_keys: results.verified,
|
||||
removed_keys: results.removed,
|
||||
error_details: results.errors
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error({ error }, "Error validating organization verification status for OpenAI keys");
|
||||
return res.status(500).json({ error: "Failed to validate keys", details: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
adminRouter.use(checkCsrfToken);
|
||||
adminRouter.use(injectLocals);
|
||||
adminRouter.use("/", loginRouter);
|
||||
|
||||
+81
-10
@@ -132,10 +132,11 @@ router.post("/create-user", (req, res) => {
|
||||
)
|
||||
.transform((data: any) => {
|
||||
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
|
||||
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
|
||||
limits[model] = data[`temporaryUserQuota_${model}`];
|
||||
const tokenLimits = MODEL_FAMILIES.reduce((limits, modelFamily) => {
|
||||
const quotaValue = data[`temporaryUserQuota_${modelFamily}`];
|
||||
limits[modelFamily] = typeof quotaValue === 'number' ? quotaValue : 0;
|
||||
return limits;
|
||||
}, {} as UserTokenCounts);
|
||||
}, {} as any);
|
||||
return { ...data, expiresAt, tokenLimits };
|
||||
});
|
||||
|
||||
@@ -189,7 +190,70 @@ router.post("/import-users", upload.single("users"), (req, res) => {
|
||||
if (!req.file) throw new HttpError(400, "No file uploaded");
|
||||
|
||||
const data = JSON.parse(req.file.buffer.toString());
|
||||
const result = z.array(UserPartialSchema).safeParse(data.users);
|
||||
|
||||
// Transform old token count format to new format
|
||||
const transformedUsers = data.users.map((user: any) => {
|
||||
if (user.tokenCounts) {
|
||||
const transformedTokenCounts: any = {};
|
||||
for (const [family, value] of Object.entries(user.tokenCounts)) {
|
||||
if (typeof value === 'number') {
|
||||
// Old format: just a number (legacy_total)
|
||||
transformedTokenCounts[family] = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
legacy_total: value
|
||||
};
|
||||
} else if (typeof value === 'object' && value !== null) {
|
||||
// New format or partially new format
|
||||
const transformedCounts: { input: number; output: number; legacy_total?: number } = {
|
||||
input: (value as any).input || 0,
|
||||
output: (value as any).output || 0
|
||||
};
|
||||
if ((value as any).legacy_total !== undefined) {
|
||||
transformedCounts.legacy_total = (value as any).legacy_total;
|
||||
}
|
||||
transformedTokenCounts[family] = transformedCounts;
|
||||
}
|
||||
}
|
||||
user.tokenCounts = transformedTokenCounts;
|
||||
}
|
||||
|
||||
// Handle tokenLimits - should be flat numbers
|
||||
if (user.tokenLimits) {
|
||||
const transformedTokenLimits: any = {};
|
||||
for (const [family, value] of Object.entries(user.tokenLimits)) {
|
||||
if (typeof value === 'number') {
|
||||
// Already in correct format
|
||||
transformedTokenLimits[family] = value;
|
||||
} else if (typeof value === 'object' && value !== null) {
|
||||
// Old format with input/output/legacy_total - sum them up
|
||||
const val = value as any;
|
||||
transformedTokenLimits[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
|
||||
}
|
||||
}
|
||||
user.tokenLimits = transformedTokenLimits;
|
||||
}
|
||||
|
||||
// Handle tokenRefresh - should be flat numbers
|
||||
if (user.tokenRefresh) {
|
||||
const transformedTokenRefresh: any = {};
|
||||
for (const [family, value] of Object.entries(user.tokenRefresh)) {
|
||||
if (typeof value === 'number') {
|
||||
// Already in correct format
|
||||
transformedTokenRefresh[family] = value;
|
||||
} else if (typeof value === 'object' && value !== null) {
|
||||
// Old format with input/output/legacy_total - sum them up
|
||||
const val = value as any;
|
||||
transformedTokenRefresh[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
|
||||
}
|
||||
}
|
||||
user.tokenRefresh = transformedTokenRefresh;
|
||||
}
|
||||
|
||||
return user;
|
||||
});
|
||||
|
||||
const result = z.array(UserPartialSchema).safeParse(transformedUsers);
|
||||
if (!result.success) throw new HttpError(400, result.error.toString());
|
||||
|
||||
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
||||
@@ -274,6 +338,7 @@ router.post("/maintenance", (req, res) => {
|
||||
"aws",
|
||||
"gcp",
|
||||
"azure",
|
||||
"google-ai"
|
||||
];
|
||||
checkable.forEach((s) => keyPool.recheck(s));
|
||||
const keyCount = keyPool
|
||||
@@ -344,10 +409,11 @@ router.post("/maintenance", (req, res) => {
|
||||
case "setDifficulty": {
|
||||
const selected = req.body["pow-difficulty"];
|
||||
const valid = ["low", "medium", "high", "extreme"];
|
||||
if (!selected || !valid.includes(selected)) {
|
||||
throw new HttpError(400, "Invalid difficulty" + selected);
|
||||
const isNumber = Number.isInteger(Number(selected));
|
||||
if (!selected || !valid.includes(selected) && !isNumber) {
|
||||
throw new HttpError(400, "Invalid difficulty " + selected);
|
||||
}
|
||||
config.powDifficultyLevel = selected;
|
||||
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
|
||||
invalidatePowChallenges();
|
||||
break;
|
||||
}
|
||||
@@ -545,9 +611,14 @@ router.post("/generate-stats", (req, res) => {
|
||||
function getSumsForUser(user: User) {
|
||||
const sums = MODEL_FAMILIES.reduce(
|
||||
(s, model) => {
|
||||
const tokens = user.tokenCounts[model] ?? 0;
|
||||
s.sumTokens += tokens;
|
||||
s.sumCost += getTokenCostUsd(model, tokens);
|
||||
const counts = user.tokenCounts[model] ?? { input: 0, output: 0 };
|
||||
// Ensure inputTokens and outputTokens are numbers, defaulting to 0 if NaN or undefined
|
||||
const inputTokens = Number(counts.input) || 0;
|
||||
const outputTokens = Number(counts.output) || 0;
|
||||
// We could also consider legacy_total here if input and output are 0
|
||||
// For now, sumTokens and sumCost will be based on current input/output.
|
||||
s.sumTokens += inputTokens + outputTokens;
|
||||
s.sumCost += getTokenCostUsd(model, inputTokens, outputTokens);
|
||||
return s;
|
||||
},
|
||||
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
|
||||
|
||||
@@ -38,15 +38,20 @@
|
||||
<h3>Difficulty Level</h3>
|
||||
<div>
|
||||
<label for="difficulty">Difficulty Level:</label>
|
||||
<span id="currentDifficulty">Current: <%= difficulty %></span>
|
||||
<select name="difficulty" id="difficulty">
|
||||
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
|
||||
<option value="low">Low</option>
|
||||
<option value="medium">Medium</option>
|
||||
<option value="high">High</option>
|
||||
<option value="extreme">Extreme</option>
|
||||
<option value="custom">Custom</option>
|
||||
</select>
|
||||
<div id="custom-difficulty-container" style="display: none">
|
||||
<label for="customDifficulty">Hashes required (average):</label>
|
||||
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
|
||||
</div>
|
||||
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
|
||||
</div>
|
||||
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
|
||||
<% } %>
|
||||
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
|
||||
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||
@@ -63,15 +68,15 @@
|
||||
<div>
|
||||
<h2>IP Whitelists and Blacklists</h2>
|
||||
<p>
|
||||
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Note that changes here are not
|
||||
persisted across server restarts. If you want to make changes permanent, you can copy the values to your deployment
|
||||
configuration.
|
||||
</p>
|
||||
<p>
|
||||
Entries can be specified as single addresses or
|
||||
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
|
||||
addresses or
|
||||
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
|
||||
supported but not recommended for use with the current version of the proxy.
|
||||
</p>
|
||||
<p>
|
||||
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
|
||||
you can copy the values to your deployment configuration.
|
||||
</p>
|
||||
<% for (let i = 0; i < whitelists.length; i++) { %>
|
||||
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
|
||||
<% } %>
|
||||
@@ -99,10 +104,25 @@
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function difficultyChanged(event) {
|
||||
const value = event.target.value;
|
||||
if (value === "custom") {
|
||||
document.getElementById("custom-difficulty-container").style.display = "block";
|
||||
} else {
|
||||
document.getElementById("custom-difficulty-container").style.display = "none";
|
||||
}
|
||||
}
|
||||
|
||||
function doAction(action) {
|
||||
document.getElementById("hiddenAction").value = action;
|
||||
if (action === "setDifficulty") {
|
||||
document.getElementById("hiddenDifficulty").value = document.getElementById("difficulty").value;
|
||||
const selected = document.getElementById("difficulty").value;
|
||||
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
|
||||
if (selected === "custom") {
|
||||
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
|
||||
} else {
|
||||
hiddenDifficulty.value = selected;
|
||||
}
|
||||
}
|
||||
document.getElementById("maintenanceForm").submit();
|
||||
}
|
||||
|
||||
@@ -18,13 +18,19 @@
|
||||
</li>
|
||||
<li>
|
||||
<code>tokenCounts</code> (optional): the number of tokens the user has
|
||||
consumed. This should be an object with keys <code>turbo</code>,
|
||||
<code>gpt4</code>, and <code>claude</code>.
|
||||
consumed. This should be an object with model family keys (e.g. <code>turbo</code>,
|
||||
<code>gpt4</code>, <code>claude</code>), each containing an object with
|
||||
<code>input</code> and <code>output</code> token counts.
|
||||
</li>
|
||||
<li>
|
||||
<code>tokenLimits</code> (optional): the number of tokens the user can
|
||||
consume. This should be an object with keys <code>turbo</code>,
|
||||
<code>gpt4</code>, and <code>claude</code>.
|
||||
<code>tokenLimits</code> (optional): the maximum number of tokens the user can
|
||||
consume. This should be an object with model family keys (e.g. <code>turbo</code>,
|
||||
<code>gpt4</code>, <code>claude</code>), each containing a single number
|
||||
representing the total token quota.
|
||||
</li>
|
||||
<li>
|
||||
<code>tokenRefresh</code> (optional): the amount of tokens to refresh when quotas
|
||||
are reset. Same format as <code>tokenLimits</code>.
|
||||
</li>
|
||||
<li>
|
||||
<code>createdAt</code> (optional): the timestamp when the user was created
|
||||
|
||||
+155
-40
@@ -29,10 +29,40 @@ type Config = {
|
||||
* same but the APIs are different. Vertex is the GCP product for enterprise.
|
||||
**/
|
||||
googleAIKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Google AI experimental model names that are
|
||||
* allowed to bypass the experimental model block. By default, all models
|
||||
* containing "exp" are blocked, but specific models listed here will be
|
||||
* permitted.
|
||||
*
|
||||
* @example "gemini-2.0-flash-exp,gemini-exp-1206"
|
||||
*/
|
||||
allowedExpModels?: string;
|
||||
/**
|
||||
* Comma-delimited list of Mistral AI API keys.
|
||||
*/
|
||||
mistralAIKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Deepseek API keys.
|
||||
*/
|
||||
deepseekKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Xai (Grok) API keys.
|
||||
*/
|
||||
xaiKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Cohere API keys.
|
||||
*/
|
||||
cohereKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Qwen API keys.
|
||||
*/
|
||||
qwenKey?: string;
|
||||
/**
|
||||
* Comma-delimited list of Moonshot API keys.
|
||||
*/
|
||||
moonshotKey?: string;
|
||||
|
||||
/**
|
||||
* Comma-delimited list of AWS credentials. Each credential item should be a
|
||||
* colon-delimited list of access key, secret key, and AWS region.
|
||||
@@ -73,11 +103,6 @@ type Config = {
|
||||
* management mode is set to 'user_token'.
|
||||
*/
|
||||
adminKey?: string;
|
||||
/**
|
||||
* The password required to view the service info/status page. If not set, the
|
||||
* info page will be publicly accessible.
|
||||
*/
|
||||
serviceInfoPassword?: string;
|
||||
/**
|
||||
* Which user management mode to use.
|
||||
* - `none`: No user management. Proxy is open to all requests with basic
|
||||
@@ -94,10 +119,14 @@ type Config = {
|
||||
* - `memory`: Users are stored in memory and are lost on restart (default)
|
||||
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
|
||||
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
|
||||
* - `sqlite`: Users are stored in an SQLite database; requires
|
||||
* `sqliteUserStorePath` to be set.
|
||||
*/
|
||||
gatekeeperStore: "memory" | "firebase_rtdb";
|
||||
gatekeeperStore: "memory" | "firebase_rtdb" | "sqlite";
|
||||
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
|
||||
firebaseRtdbUrl?: string;
|
||||
/** Path to the SQLite database file for storing user data. */
|
||||
sqliteUserStorePath?: string;
|
||||
/**
|
||||
* Base64-encoded Firebase service account key if using the Firebase RTDB
|
||||
* store. Note that you should encode the *entire* JSON key file, not just the
|
||||
@@ -356,7 +385,7 @@ type Config = {
|
||||
*
|
||||
* Defaults to no services, meaning image prompts are disabled. Use a comma-
|
||||
* separated list. Available services are:
|
||||
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure
|
||||
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure,xai
|
||||
*/
|
||||
allowedVisionServices: LLMService[];
|
||||
/**
|
||||
@@ -378,6 +407,51 @@ type Config = {
|
||||
* Takes precedence over the adminWhitelist.
|
||||
*/
|
||||
ipBlacklist: string[];
|
||||
/**
|
||||
* If set, pushes requests further back into the queue according to their
|
||||
* token costs by factor*tokens*milliseconds (or more intuitively
|
||||
* factor*thousands_of_tokens*seconds).
|
||||
* Accepts floats.
|
||||
*/
|
||||
tokensPunishmentFactor: number;
|
||||
/**
|
||||
* Configuration for HTTP requests made by the proxy to other servers, such
|
||||
* as when checking keys or forwarding users' requests to external services.
|
||||
* If not set, all requests will be made using the default agent.
|
||||
*
|
||||
* If set, the proxy may make requests to other servers using the specified
|
||||
* settings. This is useful if you wish to route users' requests through
|
||||
* another proxy or VPN, or if you have multiple network interfaces and want
|
||||
* to use a specific one for outgoing requests.
|
||||
*/
|
||||
httpAgent?: {
|
||||
/**
|
||||
* The name of the network interface to use. The first external IPv4 address
|
||||
* belonging to this interface will be used for outgoing requests.
|
||||
*/
|
||||
interface?: string;
|
||||
/**
|
||||
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
|
||||
* HTTPS. If not set, the proxy will be made using the default agent.
|
||||
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
|
||||
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
|
||||
* - HTTP: `http://proxy-server-over-tcp.com:3128`
|
||||
* - HTTPS: `https://proxy-server-over-tls.com:3129`
|
||||
*
|
||||
* **Note:** If your proxy server issues a certificate, you may need to set
|
||||
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
|
||||
* application will reject TLS connections.
|
||||
*/
|
||||
proxyUrl?: string;
|
||||
};
|
||||
/** URL for the image on the login page. Defaults to empty string (no image). */
|
||||
loginImageUrl?: string;
|
||||
/** Whether to enable the token-based login page for the service info page. Defaults to true. */
|
||||
enableInfoPageLogin?: boolean;
|
||||
/** Authentication mode for the service info page. (token | password) */
|
||||
serviceInfoAuthMode: "token" | "password";
|
||||
/** Password for the service info page if serviceInfoAuthMode is 'password'. */
|
||||
serviceInfoPassword?: string;
|
||||
};
|
||||
|
||||
// To change configs, create a file called .env in the root directory.
|
||||
@@ -387,14 +461,19 @@ export const config: Config = {
|
||||
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
|
||||
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
||||
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
|
||||
qwenKey: getEnvWithDefault("QWEN_KEY", ""),
|
||||
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
|
||||
allowedExpModels: getEnvWithDefault("ALLOWED_EXP_MODELS", ""),
|
||||
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
|
||||
deepseekKey: getEnvWithDefault("DEEPSEEK_KEY", ""),
|
||||
xaiKey: getEnvWithDefault("XAI_KEY", ""),
|
||||
cohereKey: getEnvWithDefault("COHERE_KEY", ""),
|
||||
moonshotKey: getEnvWithDefault("MOONSHOT_KEY", ""),
|
||||
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
|
||||
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
|
||||
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
|
||||
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
||||
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
||||
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
|
||||
sqliteDataPath: getEnvWithDefault(
|
||||
"SQLITE_DATA_PATH",
|
||||
path.join(DATA_DIR, "database.sqlite")
|
||||
@@ -402,7 +481,11 @@ export const config: Config = {
|
||||
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
|
||||
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
|
||||
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
|
||||
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
|
||||
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory") as Config["gatekeeperStore"],
|
||||
sqliteUserStorePath: getEnvWithDefault(
|
||||
"SQLITE_USER_STORE_PATH",
|
||||
path.join(DATA_DIR, "user-store.sqlite")
|
||||
),
|
||||
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
|
||||
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
|
||||
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
|
||||
@@ -483,6 +566,15 @@ export const config: Config = {
|
||||
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
|
||||
),
|
||||
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
|
||||
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
|
||||
httpAgent: {
|
||||
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
|
||||
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
|
||||
},
|
||||
loginImageUrl: getEnvWithDefault("LOGIN_IMAGE_URL", ""),
|
||||
enableInfoPageLogin: getEnvWithDefault("ENABLE_INFO_PAGE_LOGIN", true),
|
||||
serviceInfoAuthMode: getEnvWithDefault("SERVICE_INFO_AUTH_MODE", "token") as Config["serviceInfoAuthMode"],
|
||||
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", undefined),
|
||||
} as const;
|
||||
|
||||
function generateSigningKey() {
|
||||
@@ -499,6 +591,8 @@ function generateSigningKey() {
|
||||
config.anthropicKey,
|
||||
config.googleAIKey,
|
||||
config.mistralAIKey,
|
||||
config.deepseekKey,
|
||||
config.xaiKey,
|
||||
config.awsCredentials,
|
||||
config.gcpCredentials,
|
||||
config.azureCredentials,
|
||||
@@ -602,6 +696,41 @@ export async function assertConfigIsValid() {
|
||||
);
|
||||
}
|
||||
|
||||
if (config.gatekeeperStore === "sqlite" && !config.sqliteUserStorePath) {
|
||||
throw new Error(
|
||||
"SQLite user store requires `SQLITE_USER_STORE_PATH` to be set."
|
||||
);
|
||||
}
|
||||
|
||||
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
|
||||
delete config.httpAgent;
|
||||
} else if (config.httpAgent) {
|
||||
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
|
||||
throw new Error(
|
||||
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.enableInfoPageLogin) {
|
||||
if (!["token", "password"].includes(config.serviceInfoAuthMode)) {
|
||||
throw new Error(
|
||||
`Invalid SERVICE_INFO_AUTH_MODE: ${config.serviceInfoAuthMode}. Must be 'token' or 'password'.`
|
||||
);
|
||||
}
|
||||
if (config.serviceInfoAuthMode === "password" && !config.serviceInfoPassword) {
|
||||
throw new Error(
|
||||
"SERVICE_INFO_AUTH_MODE is 'password' but SERVICE_INFO_PASSWORD is not set."
|
||||
);
|
||||
}
|
||||
// If service info login is token-based, gatekeeper must be 'user_token' mode for getUser() to be effective.
|
||||
if (config.serviceInfoAuthMode === "token" && config.gatekeeper !== "user_token") {
|
||||
throw new Error(
|
||||
"SERVICE_INFO_AUTH_MODE is 'token' for info page login, but GATEKEEPER is not 'user_token'. User token authentication will not work."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure forks which add new secret-like config keys don't unwittingly expose
|
||||
// them to users.
|
||||
for (const key of getKeys(config)) {
|
||||
@@ -615,15 +744,16 @@ export async function assertConfigIsValid() {
|
||||
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
|
||||
);
|
||||
}
|
||||
|
||||
await maybeInitializeFirebase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Config keys that are masked on the info page, but not hidden as their
|
||||
* presence may be relevant to the user due to privacy implications.
|
||||
*/
|
||||
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
|
||||
export const SENSITIVE_KEYS: (keyof Config)[] = [
|
||||
"googleSheetsSpreadsheetId",
|
||||
"httpAgent",
|
||||
];
|
||||
|
||||
/**
|
||||
* Config keys that are not displayed on the info page at all, generally because
|
||||
@@ -636,13 +766,17 @@ export const OMITTED_KEYS = [
|
||||
"openaiKey",
|
||||
"anthropicKey",
|
||||
"googleAIKey",
|
||||
"deepseekKey",
|
||||
"xaiKey",
|
||||
"cohereKey",
|
||||
"qwenKey",
|
||||
"moonshotKey",
|
||||
"mistralAIKey",
|
||||
"awsCredentials",
|
||||
"gcpCredentials",
|
||||
"azureCredentials",
|
||||
"proxyKey",
|
||||
"adminKey",
|
||||
"serviceInfoPassword",
|
||||
"rejectPhrases",
|
||||
"rejectMessage",
|
||||
"showTokenCosts",
|
||||
@@ -651,6 +785,7 @@ export const OMITTED_KEYS = [
|
||||
"firebaseKey",
|
||||
"firebaseRtdbUrl",
|
||||
"sqliteDataPath",
|
||||
"sqliteUserStorePath",
|
||||
"eventLogging",
|
||||
"eventLoggingTrim",
|
||||
"gatekeeperStore",
|
||||
@@ -669,6 +804,9 @@ export const OMITTED_KEYS = [
|
||||
"adminWhitelist",
|
||||
"ipBlacklist",
|
||||
"powTokenPurgeHours",
|
||||
"loginImageUrl",
|
||||
"enableInfoPageLogin",
|
||||
"serviceInfoPassword",
|
||||
] satisfies (keyof Config)[];
|
||||
type OmitKeys = (typeof OMITTED_KEYS)[number];
|
||||
|
||||
@@ -731,6 +869,7 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
||||
"AWS_CREDENTIALS",
|
||||
"GCP_CREDENTIALS",
|
||||
"AZURE_CREDENTIALS",
|
||||
"QWEN_KEY",
|
||||
].includes(String(env))
|
||||
) {
|
||||
return value as unknown as T;
|
||||
@@ -747,32 +886,6 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
||||
}
|
||||
}
|
||||
|
||||
let firebaseApp: firebase.app.App | undefined;
|
||||
|
||||
async function maybeInitializeFirebase() {
|
||||
if (!config.gatekeeperStore.startsWith("firebase")) {
|
||||
return;
|
||||
}
|
||||
|
||||
const firebase = await import("firebase-admin");
|
||||
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
|
||||
const app = firebase.initializeApp({
|
||||
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
|
||||
databaseURL: config.firebaseRtdbUrl,
|
||||
});
|
||||
|
||||
await app.database().ref("connection-test").set(Date.now());
|
||||
|
||||
firebaseApp = app;
|
||||
}
|
||||
|
||||
export function getFirebaseApp(): firebase.app.App {
|
||||
if (!firebaseApp) {
|
||||
throw new Error("Firebase app not initialized.");
|
||||
}
|
||||
return firebaseApp;
|
||||
}
|
||||
|
||||
function parseCsv(val: string): string[] {
|
||||
if (!val) return [];
|
||||
|
||||
@@ -782,5 +895,7 @@ function parseCsv(val: string): string[] {
|
||||
}
|
||||
|
||||
function getDefaultModelFamilies(): ModelFamily[] {
|
||||
return MODEL_FAMILIES.filter((f) => !f.includes("dall-e")) as ModelFamily[];
|
||||
return MODEL_FAMILIES.filter(
|
||||
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
|
||||
) as ModelFamily[];
|
||||
}
|
||||
|
||||
+243
-127
@@ -1,4 +1,8 @@
|
||||
/** This whole module kinda sucks */
|
||||
/* ──────────────────────────────────────────────────────────────
|
||||
Login-gated info page
|
||||
drop-in replacement for src/info-page.ts
|
||||
──────────────────────────────────────────────────────────── */
|
||||
|
||||
import fs from "fs";
|
||||
import express, { Router, Request, Response } from "express";
|
||||
import showdown from "showdown";
|
||||
@@ -8,16 +12,49 @@ import { getLastNImages } from "./shared/file-storage/image-history";
|
||||
import { keyPool } from "./shared/key-management";
|
||||
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
|
||||
import { withSession } from "./shared/with-session";
|
||||
import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
|
||||
import { injectCsrfToken, checkCsrfToken } from "./shared/inject-csrf";
|
||||
import { getUser } from "./shared/users/user-store";
|
||||
|
||||
/* ──────────────── TYPES: extend express-session ──────────── */
|
||||
declare module "express-session" {
|
||||
interface Session {
|
||||
infoPageAuthed?: boolean;
|
||||
}
|
||||
}
|
||||
|
||||
/* ──────────────── misc constants ─────────────────────────── */
|
||||
const INFO_PAGE_TTL = 2_000; // ms
|
||||
const LOGIN_ROUTE = "/";
|
||||
|
||||
const INFO_PAGE_TTL = 2000;
|
||||
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
|
||||
qwen: "Qwen",
|
||||
cohere: "Cohere",
|
||||
deepseek: "Deepseek",
|
||||
xai: "Grok",
|
||||
moonshot: "Moonshot",
|
||||
turbo: "GPT-4o Mini / 3.5 Turbo",
|
||||
gpt4: "GPT-4",
|
||||
"gpt4-32k": "GPT-4 32k",
|
||||
"gpt4-turbo": "GPT-4 Turbo",
|
||||
gpt4o: "GPT-4o",
|
||||
gpt41: "GPT-4.1",
|
||||
"gpt41-mini": "GPT-4.1 Mini",
|
||||
"gpt41-nano": "GPT-4.1 Nano",
|
||||
gpt5: "GPT-5",
|
||||
"gpt5-mini": "GPT-5 Mini",
|
||||
"gpt5-nano": "GPT-5 Nano",
|
||||
"gpt5-chat-latest": "GPT-5 Chat Latest",
|
||||
gpt45: "GPT-4.5",
|
||||
o1: "OpenAI o1",
|
||||
"o1-mini": "OpenAI o1 mini",
|
||||
"o1-pro": "OpenAI o1 pro",
|
||||
"o3-pro": "OpenAI o3 pro",
|
||||
"o3-mini": "OpenAI o3 mini",
|
||||
"o3": "OpenAI o3",
|
||||
"o4-mini": "OpenAI o4 mini",
|
||||
"codex-mini": "OpenAI Codex Mini",
|
||||
"dall-e": "DALL-E",
|
||||
"gpt-image": "GPT Image",
|
||||
claude: "Claude (Sonnet)",
|
||||
"claude-opus": "Claude (Opus)",
|
||||
"gemini-flash": "Gemini Flash",
|
||||
@@ -40,17 +77,101 @@ const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
|
||||
"azure-gpt4-32k": "Azure GPT-4 32k",
|
||||
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
|
||||
"azure-gpt4o": "Azure GPT-4o",
|
||||
"azure-gpt45": "Azure GPT-4.5",
|
||||
"azure-gpt41": "Azure GPT-4.1",
|
||||
"azure-gpt41-mini": "Azure GPT-4.1 Mini",
|
||||
"azure-gpt41-nano": "Azure GPT-4.1 Nano",
|
||||
"azure-gpt5": "Azure GPT-5",
|
||||
"azure-gpt5-mini": "Azure GPT-5 Mini",
|
||||
"azure-gpt5-nano": "Azure GPT-5 Nano",
|
||||
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
|
||||
"azure-o1": "Azure o1",
|
||||
"azure-o1-mini": "Azure o1 mini",
|
||||
"azure-o1-pro": "Azure o1 pro",
|
||||
"azure-o3-pro": "Azure o3 pro",
|
||||
"azure-o3-mini": "Azure o3 mini",
|
||||
"azure-o3": "Azure o3",
|
||||
"azure-o4-mini": "Azure o4 mini",
|
||||
"azure-codex-mini": "Azure Codex Mini",
|
||||
"azure-dall-e": "Azure DALL-E",
|
||||
"azure-gpt-image": "Azure GPT Image",
|
||||
};
|
||||
|
||||
const converter = new showdown.Converter();
|
||||
|
||||
/* optional markdown greeting */
|
||||
const customGreeting = fs.existsSync("greeting.md")
|
||||
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
|
||||
: "";
|
||||
|
||||
/* ──────────────── Login page ──────────────────────── */
|
||||
function renderLoginPage(csrf: string, error?: string) {
|
||||
const errBlock = error
|
||||
? `<div class="error-message">${escapeHtml(error)}</div>`
|
||||
: "";
|
||||
const pageTitle = getServerTitle();
|
||||
return `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>${pageTitle} – Login</title>
|
||||
<style>
|
||||
body{font-family:Arial, sans-serif;display:flex;justify-content:center;
|
||||
align-items:center;height:100vh;margin:0;padding:20px;background:#f5f5f5;}
|
||||
.login-container{background:#fff;border-radius:8px;box-shadow:0 4px 8px rgba(0,0,0,.1);
|
||||
padding:30px;width:100%;max-width:400px;text-align:center;}
|
||||
.logo-image{max-width:200px;margin-bottom:20px;}
|
||||
.form-group{margin-bottom:20px;}
|
||||
input[type=text], input[type=password]{width:100%;padding:10px;border:1px solid #ddd;border-radius:4px;
|
||||
box-sizing:border-box;font-size:16px;}
|
||||
button{background:#4caf50;color:#fff;border:none;padding:12px 20px;border-radius:4px;
|
||||
cursor:pointer;font-size:16px;width:100%;}
|
||||
button:hover{background:#45a049;}
|
||||
.error-message{color:#f44336;margin-bottom:15px;}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
body { background: #2c2c2c; color: #e0e0e0; }
|
||||
.login-container { background: #383838; box-shadow: 0 4px 12px rgba(0,0,0,0.4); border: 1px solid #4a4a4a; }
|
||||
input[type=text], input[type=password] { background: #4a4a4a; color: #e0e0e0; border: 1px solid #5a5a5a; }
|
||||
input[type=text]::placeholder, input[type=password]::placeholder { color: #999; }
|
||||
button { background: #007bff; } /* Using a blue for dark mode button */
|
||||
button:hover { background: #0056b3; }
|
||||
.error-message { color: #ff8a80; } /* Lighter red for errors in dark mode */
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="login-container">
|
||||
${config.loginImageUrl ? `<img src="${config.loginImageUrl}" alt="Logo" class="logo-image">` : ''}
|
||||
${errBlock}
|
||||
<form method="POST" action="${LOGIN_ROUTE}">
|
||||
<div class="form-group">
|
||||
${config.serviceInfoAuthMode === "password"
|
||||
? `<input type="password" id="password" name="password" required placeholder="Service Password">`
|
||||
: `<input type="text" id="token" name="token" required placeholder="Your token">`}
|
||||
<input type="hidden" name="_csrf" value="${csrf}">
|
||||
</div>
|
||||
<button type="submit">Access Dashboard</button>
|
||||
</form>
|
||||
</div>
|
||||
</body>
|
||||
</html>`;
|
||||
}
|
||||
|
||||
/* ──────────────── login-required middleware ──────────────── */
|
||||
function requireLogin(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: express.NextFunction
|
||||
) {
|
||||
if (req.session?.infoPageAuthed) return next();
|
||||
return res.send(renderLoginPage(res.locals.csrfToken));
|
||||
}
|
||||
|
||||
/* ──────────────── INFO PAGE CACHING ──────────────────────── */
|
||||
let infoPageHtml: string | undefined;
|
||||
let infoPageLastUpdated = 0;
|
||||
|
||||
export const handleInfoPage = (req: Request, res: Response) => {
|
||||
export function handleInfoPage(req: Request, res: Response) {
|
||||
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
|
||||
return res.send(infoPageHtml);
|
||||
}
|
||||
@@ -65,60 +186,46 @@ export const handleInfoPage = (req: Request, res: Response) => {
|
||||
infoPageLastUpdated = Date.now();
|
||||
|
||||
res.send(infoPageHtml);
|
||||
};
|
||||
}
|
||||
|
||||
/* ──────────────── RENDER FULL INFO PAGE ──────────────────── */
|
||||
export function renderPage(info: ServiceInfo) {
|
||||
const title = getServerTitle();
|
||||
const headerHtml = buildInfoPageHeader(info);
|
||||
|
||||
return `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="robots" content="noindex" />
|
||||
<title>${title}</title>
|
||||
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
|
||||
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
|
||||
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
|
||||
<style>
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
padding: 1em;
|
||||
max-width: 900px;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.self-service-links {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
margin-bottom: 1em;
|
||||
padding: 0.5em;
|
||||
font-size: 0.8em;
|
||||
}
|
||||
|
||||
.self-service-links a {
|
||||
margin: 0 0.5em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
${headerHtml}
|
||||
<hr />
|
||||
${getSelfServiceLinks()}
|
||||
<h2>Service Info</h2>
|
||||
<pre>${JSON.stringify(info, null, 2)}</pre>
|
||||
</body>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="robots" content="noindex" />
|
||||
<title>${title}</title>
|
||||
<link rel="stylesheet" href="/res/css/reset.css" />
|
||||
<link rel="stylesheet" href="/res/css/sakura.css" />
|
||||
<link rel="stylesheet" href="/res/css/sakura-dark.css"
|
||||
media="screen and (prefers-color-scheme: dark)" />
|
||||
<style>
|
||||
body{font-family:sans-serif;padding:1em;max-width:900px;margin:0;}
|
||||
.self-service-links{display:flex;justify-content:center;margin-bottom:1em;
|
||||
padding:0.5em;font-size:0.8em;}
|
||||
.self-service-links a{margin:0 0.5em;}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
${headerHtml}
|
||||
<hr/>
|
||||
${getSelfServiceLinks()}
|
||||
<h2>Service Info</h2>
|
||||
<pre>${JSON.stringify(info, null, 2)}</pre>
|
||||
</body>
|
||||
</html>`;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the server operator provides a `greeting.md` file, it will be included in
|
||||
* the rendered info page.
|
||||
**/
|
||||
/* ──────────────── header & helper functions ──────────────── */
|
||||
/* (all copied verbatim from original file) */
|
||||
function buildInfoPageHeader(info: ServiceInfo) {
|
||||
const title = getServerTitle();
|
||||
// TODO: use some templating engine instead of this mess
|
||||
let infoBody = `# ${title}`;
|
||||
|
||||
if (config.promptLogging) {
|
||||
infoBody += `\n## Prompt Logging Enabled
|
||||
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
|
||||
@@ -137,9 +244,9 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
||||
for (const modelFamily of config.allowedModelFamilies) {
|
||||
const service = MODEL_FAMILY_SERVICE[modelFamily];
|
||||
|
||||
const hasKeys = keyPool.list().some((k) => {
|
||||
return k.service === service && k.modelFamilies.includes(modelFamily);
|
||||
});
|
||||
const hasKeys = keyPool.list().some(
|
||||
(k) => k.service === service && k.modelFamilies.includes(modelFamily)
|
||||
);
|
||||
|
||||
const wait = info[modelFamily]?.estimatedQueueTime;
|
||||
if (hasKeys && wait) {
|
||||
@@ -150,9 +257,7 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
||||
}
|
||||
|
||||
infoBody += "\n\n" + waits.join(" / ");
|
||||
|
||||
infoBody += customGreeting;
|
||||
|
||||
infoBody += buildRecentImageSection();
|
||||
|
||||
return converter.makeHtml(infoBody);
|
||||
@@ -160,63 +265,60 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
|
||||
|
||||
function getSelfServiceLinks() {
|
||||
if (config.gatekeeper !== "user_token") return "";
|
||||
|
||||
const links = [["Check your user token", "/user/lookup"]];
|
||||
if (config.captchaMode !== "none") {
|
||||
links.unshift(["Request a user token", "/user/captcha"]);
|
||||
}
|
||||
|
||||
return `<div class="self-service-links">${links
|
||||
.map(([text, link]) => `<a target="_blank" href="${link}">${text}</a>`)
|
||||
.map(([t, l]) => `<a href="${l}">${t}</a>`)
|
||||
.join(" | ")}</div>`;
|
||||
}
|
||||
|
||||
function getServerTitle() {
|
||||
// Use manually set title if available
|
||||
if (process.env.SERVER_TITLE) {
|
||||
return process.env.SERVER_TITLE;
|
||||
}
|
||||
|
||||
// Huggingface
|
||||
if (process.env.SPACE_ID) {
|
||||
if (process.env.SERVER_TITLE) return process.env.SERVER_TITLE;
|
||||
if (process.env.SPACE_ID)
|
||||
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
|
||||
}
|
||||
|
||||
// Render
|
||||
if (process.env.RENDER) {
|
||||
if (process.env.RENDER)
|
||||
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
|
||||
}
|
||||
|
||||
return "OAI Reverse Proxy";
|
||||
return "Tunnel";
|
||||
}
|
||||
|
||||
function buildRecentImageSection() {
|
||||
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
|
||||
const imageModels: ModelFamily[] = [
|
||||
"azure-dall-e",
|
||||
"dall-e",
|
||||
"gpt-image",
|
||||
"azure-gpt-image",
|
||||
];
|
||||
// Condition 1: Is the feature enabled via config?
|
||||
// Condition 2: Is at least one relevant image model family allowed in config?
|
||||
if (
|
||||
!config.showRecentImages ||
|
||||
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
|
||||
imageModels.every((f) => !config.allowedModelFamilies.includes(f))
|
||||
) {
|
||||
return ""; // Exit if feature is disabled or no relevant models are allowed
|
||||
}
|
||||
|
||||
// Condition 3: Are there any actual images to display?
|
||||
const recentImages = getLastNImages(12).reverse();
|
||||
if (recentImages.length === 0) {
|
||||
// If the feature is enabled and models are allowed, but no images exist,
|
||||
// do not render the section, including its title.
|
||||
return "";
|
||||
}
|
||||
|
||||
let html = `<h2>Recent DALL-E Generations</h2>`;
|
||||
const recentImages = getLastNImages(12).reverse();
|
||||
if (recentImages.length === 0) {
|
||||
html += `<p>No images yet.</p>`;
|
||||
return html;
|
||||
}
|
||||
|
||||
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
|
||||
// If all conditions pass (feature enabled, models allowed, images exist), build and return the HTML
|
||||
let html = `<h2>Recent Image Generations</h2>`;
|
||||
html += `<div style="display:flex;flex-wrap:wrap;" id="recent-images">`;
|
||||
for (const { url, prompt } of recentImages) {
|
||||
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
|
||||
const escapedPrompt = escapeHtml(prompt);
|
||||
html += `<div style="margin: 0.5em;" class="recent-image">
|
||||
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
|
||||
</div>`;
|
||||
html += `<div style="margin:0.5em" class="recent-image">
|
||||
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}"
|
||||
alt="${escapedPrompt}" style="max-width:150px;max-height:150px;"/></a></div>`;
|
||||
}
|
||||
html += `</div>`;
|
||||
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
|
||||
|
||||
html += `</div><p style="clear:both;text-align:center;">
|
||||
<a href="/user/image-history">View all recent images</a></p>`;
|
||||
return html;
|
||||
}
|
||||
|
||||
@@ -231,57 +333,71 @@ function escapeHtml(unsafe: string) {
|
||||
.replace(/]/g, "]");
|
||||
}
|
||||
|
||||
|
||||
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
|
||||
try {
|
||||
const [username, spacename] = spaceId.split("/");
|
||||
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
|
||||
} catch (e) {
|
||||
const [u, s] = spaceId.split("/");
|
||||
return `https://${u}-${s.replace(/_/g, "-")}.hf.space`;
|
||||
} catch {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
function checkIfUnlocked(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: express.NextFunction
|
||||
) {
|
||||
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
|
||||
return res.redirect("/unlock-info");
|
||||
}
|
||||
next();
|
||||
}
|
||||
|
||||
/* ──────────────── ROUTER ─────────────────────────────────── */
|
||||
const infoPageRouter = Router();
|
||||
if (config.serviceInfoPassword?.length) {
|
||||
infoPageRouter.use(
|
||||
express.json({ limit: "1mb" }),
|
||||
express.urlencoded({ extended: true, limit: "1mb" })
|
||||
);
|
||||
infoPageRouter.use(withSession);
|
||||
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
|
||||
infoPageRouter.post("/unlock-info", (req, res) => {
|
||||
if (req.body.password !== config.serviceInfoPassword) {
|
||||
return res.status(403).send("Incorrect password");
|
||||
}
|
||||
req.session!.unlocked = true;
|
||||
res.redirect("/");
|
||||
});
|
||||
infoPageRouter.get("/unlock-info", (_req, res) => {
|
||||
if (_req.session?.unlocked) return res.redirect("/");
|
||||
|
||||
res.send(`
|
||||
<form method="post" action="/unlock-info">
|
||||
<h1>Unlock Service Info</h1>
|
||||
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
|
||||
<input type="password" name="password" placeholder="Password" />
|
||||
<button type="submit">Unlock</button>
|
||||
</form>
|
||||
`);
|
||||
});
|
||||
infoPageRouter.use(checkIfUnlocked);
|
||||
}
|
||||
infoPageRouter.get("/", handleInfoPage);
|
||||
infoPageRouter.get("/status", (req, res) => {
|
||||
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
|
||||
infoPageRouter.use(
|
||||
express.json({ limit: "1mb" }),
|
||||
express.urlencoded({ extended: true, limit: "1mb" }),
|
||||
withSession,
|
||||
injectCsrfToken,
|
||||
checkCsrfToken
|
||||
);
|
||||
|
||||
/* login attempt */
|
||||
infoPageRouter.post(LOGIN_ROUTE, (req, res) => {
|
||||
if (config.serviceInfoAuthMode === "password") {
|
||||
const password = (req.body.password || "").trim();
|
||||
// Simple string comparison; for production, consider a timing-safe comparison library
|
||||
if (config.serviceInfoPassword && password === config.serviceInfoPassword) {
|
||||
req.session!.infoPageAuthed = true;
|
||||
return res.redirect("/");
|
||||
} else {
|
||||
return res
|
||||
.status(401)
|
||||
.send(renderLoginPage(res.locals.csrfToken, "Invalid password. Please try again."));
|
||||
}
|
||||
} else {
|
||||
// Token-based authentication (using any valid user token)
|
||||
const token = (req.body.token || "").trim();
|
||||
const user = getUser(token); // returns undefined if invalid
|
||||
|
||||
if (user && !user.disabledAt) {
|
||||
// Only allow access if user exists AND is not disabled
|
||||
req.session!.infoPageAuthed = true;
|
||||
return res.redirect("/");
|
||||
} else if (user && user.disabledAt) {
|
||||
// User exists but is disabled
|
||||
const reason = user.disabledReason || "Your account has been disabled";
|
||||
return res
|
||||
.status(401)
|
||||
.send(renderLoginPage(res.locals.csrfToken, `Access denied: ${reason}`));
|
||||
} else {
|
||||
// User doesn't exist
|
||||
return res
|
||||
.status(401)
|
||||
.send(renderLoginPage(res.locals.csrfToken, "Invalid token. Please try again."));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
/* GET / – either login form or info page */
|
||||
if (config.enableInfoPageLogin) {
|
||||
infoPageRouter.get(LOGIN_ROUTE, requireLogin, handleInfoPage);
|
||||
} else {
|
||||
infoPageRouter.get(LOGIN_ROUTE, handleInfoPage);
|
||||
}
|
||||
|
||||
/* ─── Removed the public /status route : simply not added ─── */
|
||||
|
||||
export { infoPageRouter };
|
||||
|
||||
+1
-1
@@ -2,7 +2,7 @@ import { NextFunction, Request, Response } from "express";
|
||||
|
||||
export function addV1(req: Request, res: Response, next: NextFunction) {
|
||||
// Clients don't consistently use the /v1 prefix so we'll add it for them.
|
||||
if (!req.path.startsWith("/v1/") && !req.path.startsWith("/v1beta/")) {
|
||||
if (!req.path.startsWith("/v1/") && !req.path.match(/^\/(v1alpha|v1beta)\//)) {
|
||||
req.url = `/v1${req.url}`;
|
||||
}
|
||||
next();
|
||||
|
||||
+176
-111
@@ -1,22 +1,16 @@
|
||||
import { Request, Response, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
addAnthropicPreamble,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
ProxyResHandlerWithBody,
|
||||
createOnProxyResHandler,
|
||||
} from "./middleware/response";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { claudeModels } from "../shared/claude-models";
|
||||
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
@@ -26,41 +20,32 @@ const getModelsResponse = () => {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
if (!config.anthropicKey) return { object: "list", data: [] };
|
||||
if (!config.anthropicKey) return { object: "list", data: [], has_more: false, first_id: null, last_id: null };
|
||||
|
||||
const claudeVariants = [
|
||||
"claude-v1",
|
||||
"claude-v1-100k",
|
||||
"claude-instant-v1",
|
||||
"claude-instant-v1-100k",
|
||||
"claude-v1.3",
|
||||
"claude-v1.3-100k",
|
||||
"claude-v1.2",
|
||||
"claude-v1.0",
|
||||
"claude-instant-v1.1",
|
||||
"claude-instant-v1.1-100k",
|
||||
"claude-instant-v1.0",
|
||||
"claude-2",
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
];
|
||||
|
||||
const models = claudeVariants.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
const date = new Date()
|
||||
const models = claudeModels.map(model => ({
|
||||
// Common
|
||||
id: model.anthropicId,
|
||||
owned_by: "anthropic",
|
||||
permission: [],
|
||||
root: "claude",
|
||||
parent: null,
|
||||
}));
|
||||
// Anthropic
|
||||
type: "model",
|
||||
display_name: model.displayName,
|
||||
created_at: date.toISOString(),
|
||||
// OpenAI
|
||||
object: "model",
|
||||
created: date.getTime(),
|
||||
}));
|
||||
|
||||
modelsCache = { object: "list", data: models };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
modelsCache = {
|
||||
// Common
|
||||
object: "list",
|
||||
data: models,
|
||||
// Anthropic
|
||||
has_more: false,
|
||||
first_id: models[0]?.id,
|
||||
last_id: models[models.length - 1]?.id,
|
||||
};
|
||||
modelsCacheTime = date.getTime();
|
||||
|
||||
return modelsCache;
|
||||
};
|
||||
@@ -69,7 +54,6 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
/** Only used for non-streaming requests. */
|
||||
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
@@ -123,13 +107,7 @@ export function transformAnthropicChatResponseToAnthropicText(
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms a model response from the Anthropic API to match those from the
|
||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||
* is only used for non-streaming requests as streaming requests are handled
|
||||
* on-the-fly.
|
||||
*/
|
||||
export function transformAnthropicTextResponseToOpenAI(
|
||||
function transformAnthropicTextResponseToOpenAI(
|
||||
anthropicBody: Record<string, any>,
|
||||
req: Request
|
||||
): Record<string, any> {
|
||||
@@ -181,100 +159,185 @@ export function transformAnthropicChatResponseToOpenAI(
|
||||
|
||||
/**
|
||||
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
|
||||
* model, reassigns it to Claude 3 Sonnet.
|
||||
* model, reassigns it to Sonnet.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
if (!model.startsWith("gpt-")) return;
|
||||
req.body.model = "claude-3-sonnet-20240229";
|
||||
if (model.includes("claude")) return; // use whatever model the user requested
|
||||
req.body.model = "claude-3-5-sonnet-latest";
|
||||
}
|
||||
|
||||
/**
|
||||
* If client requests more than 4096 output tokens the request must have a
|
||||
* particular version header.
|
||||
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
|
||||
*
|
||||
* Also adds the required beta header for 1-hour cache duration if requested.
|
||||
* Also validates Claude 4.1 Opus parameters (temperature/top_p).
|
||||
*/
|
||||
function setAnthropicBetaHeader(req: Request) {
|
||||
// Validate Claude 4.1 Opus parameters before processing
|
||||
validateClaude41OpusParameters(req);
|
||||
|
||||
const { max_tokens_to_sample } = req.body;
|
||||
|
||||
// Initialize beta headers array
|
||||
const betaHeaders: string[] = [];
|
||||
|
||||
// Add max tokens beta header if needed
|
||||
if (max_tokens_to_sample > 4096) {
|
||||
req.headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15";
|
||||
betaHeaders.push("max-tokens-3-5-sonnet-2024-07-15");
|
||||
}
|
||||
|
||||
// Add extended cache TTL beta header if 1h cache is requested
|
||||
if (req.body.cache_control?.ttl === "1h") {
|
||||
betaHeaders.push("extended-cache-ttl-2025-04-11");
|
||||
}
|
||||
|
||||
// Set the combined beta headers if any were added
|
||||
if (betaHeaders.length > 0) {
|
||||
req.headers["anthropic-beta"] = betaHeaders.join(",");
|
||||
}
|
||||
}
|
||||
|
||||
const anthropicProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.anthropic.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
|
||||
}),
|
||||
proxyRes: createOnProxyResHandler([anthropicBlockingResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
// Abusing pathFilter to rewrite the paths dynamically.
|
||||
pathFilter: (pathname, req) => {
|
||||
const isText = req.outboundApi === "anthropic-text";
|
||||
const isChat = req.outboundApi === "anthropic-chat";
|
||||
if (isChat && pathname === "/v1/complete") {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
if (isText && pathname === "/v1/chat/completions") {
|
||||
req.url = "/v1/complete";
|
||||
}
|
||||
if (isChat && pathname === "/v1/chat/completions") {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
return true;
|
||||
},
|
||||
}),
|
||||
/**
|
||||
* Adds web search tool for Claude-3.5 and Claude-3.7 models when enable_web_search is true
|
||||
*
|
||||
* Supports all optional parameters documented in the Claude API:
|
||||
* - max_uses: Limit the number of searches per request
|
||||
* - allowed_domains: Only include results from these domains
|
||||
* - blocked_domains: Never include results from these domains
|
||||
* - user_location: Localize search results
|
||||
*/
|
||||
function addWebSearchTool(req: Request) {
|
||||
// Check if this is a Claude model that supports web search and if web search is enabled
|
||||
const isClaude35 = req.body.model?.includes("claude-3-5") || req.body.model?.includes("claude-3.5");
|
||||
const isClaude37 = req.body.model?.includes("claude-3-7") || req.body.model?.includes("claude-3.7");
|
||||
const isClaude4 = req.body.model?.includes("claude-sonnet-4") || req.body.model?.includes("claude-opus-4");
|
||||
const useWebSearch = (isClaude35 || isClaude37 || isClaude4) && Boolean(req.body.enable_web_search);
|
||||
|
||||
if (useWebSearch) {
|
||||
// Create the base web search tool
|
||||
const webSearchTool: any = {
|
||||
'type': 'web_search_20250305',
|
||||
'name': 'web_search',
|
||||
};
|
||||
|
||||
// Add optional parameters if provided by the client
|
||||
|
||||
// max_uses: Limit the number of searches per request
|
||||
if (typeof req.body.web_search_max_uses === 'number') {
|
||||
webSearchTool.max_uses = req.body.web_search_max_uses;
|
||||
delete req.body.web_search_max_uses;
|
||||
}
|
||||
|
||||
// allowed_domains: Only include results from these domains
|
||||
if (Array.isArray(req.body.web_search_allowed_domains)) {
|
||||
webSearchTool.allowed_domains = req.body.web_search_allowed_domains;
|
||||
delete req.body.web_search_allowed_domains;
|
||||
}
|
||||
|
||||
// blocked_domains: Never include results from these domains
|
||||
if (Array.isArray(req.body.web_search_blocked_domains)) {
|
||||
webSearchTool.blocked_domains = req.body.web_search_blocked_domains;
|
||||
delete req.body.web_search_blocked_domains;
|
||||
}
|
||||
|
||||
// user_location: Localize search results
|
||||
if (req.body.web_search_user_location) {
|
||||
webSearchTool.user_location = req.body.web_search_user_location;
|
||||
delete req.body.web_search_user_location;
|
||||
}
|
||||
|
||||
// Add the web search tool to the tools array
|
||||
req.body.tools = [...(req.body.tools || []), webSearchTool];
|
||||
}
|
||||
|
||||
// Delete custom parameters as they're not standard Claude API parameters
|
||||
delete req.body.enable_web_search;
|
||||
delete req.body.reasoning_effort;
|
||||
}
|
||||
|
||||
function selectUpstreamPath(manager: ProxyReqManager) {
|
||||
const req = manager.request;
|
||||
const pathname = req.url.split("?")[0];
|
||||
req.log.debug({ pathname }, "Anthropic path filter");
|
||||
const isText = req.outboundApi === "anthropic-text";
|
||||
const isChat = req.outboundApi === "anthropic-chat";
|
||||
if (isChat && pathname === "/v1/complete") {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
if (isText && pathname === "/v1/chat/completions") {
|
||||
manager.setPath("/v1/complete");
|
||||
}
|
||||
if (isChat && pathname === "/v1/chat/completions") {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
}
|
||||
|
||||
const anthropicProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.anthropic.com",
|
||||
mutations: [selectUpstreamPath, addKey, finalizeBody],
|
||||
blockingResponseHandler: anthropicBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
|
||||
{ afterTransform: [setAnthropicBetaHeader] }
|
||||
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||
);
|
||||
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware({
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-text",
|
||||
service: "anthropic",
|
||||
});
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-text",
|
||||
service: "anthropic",
|
||||
},
|
||||
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||
);
|
||||
|
||||
const textToChatPreprocessor = createPreprocessorMiddleware({
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
});
|
||||
const textToChatPreprocessor = createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
},
|
||||
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes text completion prompts to anthropic-chat if they need translation
|
||||
* (claude-3 based models do not support the old text completion endpoint).
|
||||
*/
|
||||
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
|
||||
if (req.body.model?.startsWith("claude-3")) {
|
||||
const model = req.body.model;
|
||||
const isClaude4Model = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
|
||||
if (model?.startsWith("claude-3") || isClaude4Model) {
|
||||
textToChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
nativeTextPreprocessor(req, res, next);
|
||||
}
|
||||
};
|
||||
|
||||
const oaiToTextPreprocessor = createPreprocessorMiddleware({
|
||||
inApi: "openai",
|
||||
outApi: "anthropic-text",
|
||||
service: "anthropic",
|
||||
});
|
||||
const oaiToTextPreprocessor = createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "openai",
|
||||
outApi: "anthropic-text",
|
||||
service: "anthropic",
|
||||
},
|
||||
{ afterTransform: [setAnthropicBetaHeader] }
|
||||
);
|
||||
|
||||
const oaiToChatPreprocessor = createPreprocessorMiddleware({
|
||||
inApi: "openai",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
});
|
||||
const oaiToChatPreprocessor = createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "openai",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
},
|
||||
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||
@@ -282,7 +345,9 @@ const oaiToChatPreprocessor = createPreprocessorMiddleware({
|
||||
*/
|
||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||
maybeReassignModel(req);
|
||||
if (req.body.model?.includes("claude-3")) {
|
||||
const model = req.body.model;
|
||||
const isClaude4 = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
|
||||
if (model?.includes("claude-3") || isClaude4) {
|
||||
oaiToChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
oaiToTextPreprocessor(req, res, next);
|
||||
|
||||
+151
-63
@@ -1,27 +1,21 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { v4 } from "uuid";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
signAwsRequest,
|
||||
finalizeSignedRequest,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
ProxyResHandlerWithBody,
|
||||
createOnProxyResHandler,
|
||||
} from "./middleware/response";
|
||||
import {
|
||||
transformAnthropicChatResponseToAnthropicText,
|
||||
transformAnthropicChatResponseToOpenAI,
|
||||
} from "./anthropic";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
signAwsRequest,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||
|
||||
/** Only used for non-streaming requests. */
|
||||
const awsResponseHandler: ProxyResHandlerWithBody = async (
|
||||
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
@@ -55,12 +49,6 @@ const awsResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
/**
|
||||
* Transforms a model response from the Anthropic API to match those from the
|
||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||
* is only used for non-streaming requests as streaming requests are handled
|
||||
* on-the-fly.
|
||||
*/
|
||||
function transformAwsTextResponseToOpenAI(
|
||||
awsBody: Record<string, any>,
|
||||
req: Request
|
||||
@@ -89,23 +77,13 @@ function transformAwsTextResponseToOpenAI(
|
||||
};
|
||||
}
|
||||
|
||||
const awsClaudeProxy = createQueueMiddleware({
|
||||
beforeProxy: signAwsRequest,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([awsResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const awsClaudeProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signAwsRequest, finalizeSignedRequest],
|
||||
blockingResponseHandler: awsBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||
@@ -191,6 +169,9 @@ awsClaudeRouter.post(
|
||||
* strategies are used to try to map a non-AWS model name to AWS model ID.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
// Validate Claude 4.1 Opus parameters before processing
|
||||
validateClaude41OpusParameters(req);
|
||||
|
||||
const model = req.body.model;
|
||||
|
||||
// If it looks like an AWS model, use it as-is
|
||||
@@ -201,26 +182,73 @@ function maybeReassignModel(req: Request) {
|
||||
// Anthropic model names can look like:
|
||||
// - claude-v1
|
||||
// - claude-2.1
|
||||
// - claude-3-5-sonnet-20240620-v1:0
|
||||
// - claude-3-5-sonnet-20240620 (old format: number-model)
|
||||
// - claude-3-opus-latest (old format: number-model)
|
||||
// - claude-sonnet-4-20250514 (new format: model-number)
|
||||
// - claude-opus-4-latest (new format: model-number)
|
||||
// - anthropic.claude-3-sonnet-20240229-v1:0 (AWS format with old naming)
|
||||
// - anthropic.claude-sonnet-4-20250514-v1:0 (AWS format with new naming)
|
||||
const pattern =
|
||||
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
|
||||
/^(?:anthropic\.)?claude-(?:(?:(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*))|(?:(sonnet-|opus-|haiku-)(\d+)([.-](\d))?(-\d+k)?-(latest|\d+)))(?:-v\d+(?::\d+)?)?$/i;
|
||||
const match = model.match(pattern);
|
||||
|
||||
// If there's no match, fallback to Claude v2 as it is most likely to be
|
||||
// available on AWS.
|
||||
if (!match) {
|
||||
req.body.model = `anthropic.claude-v2:1`;
|
||||
return;
|
||||
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
|
||||
}
|
||||
|
||||
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, _rev] = match;
|
||||
|
||||
if (instant) {
|
||||
req.body.model = "anthropic.claude-instant-v1";
|
||||
return;
|
||||
// Check which format matched (old or new)
|
||||
// New format: claude-sonnet-4-20250514 or anthropic.claude-sonnet-4-20250514-v1:0
|
||||
// Old format: claude-3-sonnet-20240229 or anthropic.claude-3-sonnet-20240229-v1:0
|
||||
const isNewFormat = !!match[9];
|
||||
|
||||
let major, minor, name, rev;
|
||||
|
||||
if (isNewFormat) {
|
||||
// New format: claude-sonnet-4-20250514
|
||||
// match[9] = sonnet-/opus-/haiku-
|
||||
// match[10] = 4 (major version)
|
||||
// match[12] = minor version (if any, from [.-](\d) pattern)
|
||||
// match[14] = revision (latest or date)
|
||||
const modelType = match[9]?.match(/([a-z]+)/)?.[1] || "";
|
||||
name = modelType;
|
||||
major = match[10];
|
||||
minor = match[12];
|
||||
rev = match[14];
|
||||
|
||||
// Special case: if revision is a single digit and no minor version,
|
||||
// treat revision as minor version (e.g., claude-opus-4-1 -> version 4.1)
|
||||
if (!minor && rev && /^\d$/.test(rev)) {
|
||||
minor = rev;
|
||||
rev = undefined;
|
||||
}
|
||||
|
||||
// Handle instant case for completeness
|
||||
const instant = match[1];
|
||||
if (instant) {
|
||||
req.body.model = "anthropic.claude-instant-v1";
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// Old format: claude-3-sonnet-20240229
|
||||
// match[1] = instant- (if any)
|
||||
// match[3] = 3 (major version)
|
||||
// match[5] = minor version (if any)
|
||||
// match[7] = -sonnet-/-opus-/-haiku- (if any)
|
||||
// match[8] = revision (latest or date)
|
||||
const instant = match[1];
|
||||
if (instant) {
|
||||
req.body.model = "anthropic.claude-instant-v1";
|
||||
return;
|
||||
}
|
||||
|
||||
major = match[3];
|
||||
minor = match[5];
|
||||
name = match[7]?.match(/([a-z]+)/)?.[1] || "";
|
||||
rev = match[8];
|
||||
}
|
||||
|
||||
const ver = minor ? `${major}.${minor}` : major;
|
||||
|
||||
switch (ver) {
|
||||
case "1":
|
||||
case "1.0":
|
||||
@@ -230,24 +258,84 @@ function maybeReassignModel(req: Request) {
|
||||
case "2.0":
|
||||
req.body.model = "anthropic.claude-v2";
|
||||
return;
|
||||
case "2.1":
|
||||
req.body.model = "anthropic.claude-v2:1";
|
||||
return;
|
||||
case "3":
|
||||
case "3.0":
|
||||
if (name.includes("opus")) {
|
||||
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
||||
} else if (name.includes("haiku")) {
|
||||
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
||||
} else {
|
||||
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||
// there is only one snapshot for all Claude 3 models so there is no need
|
||||
// to check the revision
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||
return;
|
||||
case "haiku":
|
||||
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
||||
return;
|
||||
case "opus":
|
||||
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
||||
return;
|
||||
}
|
||||
return;
|
||||
break;
|
||||
case "3.5":
|
||||
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
||||
return;
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
switch (rev) {
|
||||
case "20241022":
|
||||
case "latest":
|
||||
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
|
||||
return;
|
||||
case "20240620":
|
||||
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case "haiku":
|
||||
switch (rev) {
|
||||
case "20241022":
|
||||
case "latest":
|
||||
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
|
||||
return;
|
||||
}
|
||||
case "opus":
|
||||
// Add after model id is announced never
|
||||
break;
|
||||
}
|
||||
case "3.7":
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
req.body.model = "anthropic.claude-3-7-sonnet-20250219-v1:0";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case "4":
|
||||
case "4.0":
|
||||
// Mapping "claude-4-..." variants to their actual AWS Bedrock IDs
|
||||
// as defined in src/shared/claude-models.ts.
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
req.body.model = "anthropic.claude-sonnet-4-20250514-v1:0";
|
||||
return;
|
||||
case "opus":
|
||||
req.body.model = "anthropic.claude-opus-4-20250514-v1:0";
|
||||
return;
|
||||
// No case for "haiku" here, as "claude-4-haiku" is not defined
|
||||
// in claude-models.ts. It will fall through and throw an error.
|
||||
}
|
||||
break;
|
||||
case "4.1":
|
||||
// Mapping "claude-4.1-..." variants to their actual AWS Bedrock IDs
|
||||
// as defined in src/shared/claude-models.ts.
|
||||
switch (name) {
|
||||
case "opus":
|
||||
req.body.model = "anthropic.claude-opus-4-1-20250805-v1:0";
|
||||
return;
|
||||
// No sonnet or haiku variants for 4.1 yet
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Fallback to Claude 2.1
|
||||
req.body.model = `anthropic.claude-v2:1`;
|
||||
return;
|
||||
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
|
||||
}
|
||||
|
||||
export const awsClaude = awsClaudeRouter;
|
||||
|
||||
+14
-29
@@ -1,21 +1,16 @@
|
||||
import { Request } from "express";
|
||||
import { Request, Router } from "express";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
detectMistralInputApi,
|
||||
transformMistralTextToMistralChat,
|
||||
} from "./mistral-ai";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import {
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
signAwsRequest,
|
||||
} from "./middleware/request";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { logger } from "../logger";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import { Router } from "express";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { detectMistralInputApi, transformMistralTextToMistralChat } from "./mistral-ai";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
@@ -39,23 +34,13 @@ const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const awsMistralProxy = createQueueMiddleware({
|
||||
beforeProxy: signAwsRequest,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([awsMistralBlockingResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const awsMistralProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signAwsRequest,finalizeSignedRequest],
|
||||
blockingResponseHandler: awsMistralBlockingResponseHandler,
|
||||
});
|
||||
|
||||
function maybeReassignModel(req: Request) {
|
||||
|
||||
+47
-24
@@ -6,6 +6,7 @@ import { addV1 } from "./add-v1";
|
||||
import { awsClaude } from "./aws-claude";
|
||||
import { awsMistral } from "./aws-mistral";
|
||||
import { AwsBedrockKey, keyPool } from "../shared/key-management";
|
||||
import { claudeModels, findByAwsId } from "../shared/claude-models";
|
||||
|
||||
const awsRouter = Router();
|
||||
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
|
||||
@@ -29,45 +30,67 @@ function handleModelsRequest(req: Request, res: Response) {
|
||||
return res.json(modelsCache[vendor]);
|
||||
}
|
||||
|
||||
const availableModelIds = new Set<string>();
|
||||
const availableAwsModelIds = new Set<string>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "aws") continue;
|
||||
(key as AwsBedrockKey).modelIds.forEach((id) => availableModelIds.add(id));
|
||||
(key as AwsBedrockKey).modelIds.forEach((id) => availableAwsModelIds.add(id));
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
const models = [
|
||||
"anthropic.claude-v2",
|
||||
"anthropic.claude-v2:1",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-large-2407-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
]
|
||||
.filter((id) => availableModelIds.has(id))
|
||||
.map((id) => {
|
||||
const vendor = id.match(/^(.*)\./)?.[1];
|
||||
const mistralMappings = new Map([
|
||||
["mistral.mistral-7b-instruct-v0:2", "Mistral 7B Instruct"],
|
||||
["mistral.mixtral-8x7b-instruct-v0:1", "Mixtral 8x7B Instruct"],
|
||||
["mistral.mistral-large-2402-v1:0", "Mistral Large 2402"],
|
||||
["mistral.mistral-large-2407-v1:0", "Mistral Large 2407"],
|
||||
["mistral.mistral-small-2402-v1:0", "Mistral Small 2402"],
|
||||
]);
|
||||
|
||||
const date = new Date();
|
||||
|
||||
const claudeModelsList = claudeModels
|
||||
.filter(model => availableAwsModelIds.has(model.awsId))
|
||||
.map(model => ({
|
||||
id: model.anthropicId,
|
||||
owned_by: "anthropic",
|
||||
type: "model",
|
||||
display_name: model.displayName,
|
||||
created_at: date.toISOString(),
|
||||
object: "model",
|
||||
created: date.getTime(),
|
||||
permission: [],
|
||||
root: "anthropic",
|
||||
parent: null,
|
||||
}));
|
||||
|
||||
const mistralModelsList = Array.from(mistralMappings.keys())
|
||||
.filter(id => availableAwsModelIds.has(id))
|
||||
.map(id => {
|
||||
return {
|
||||
id,
|
||||
owned_by: "mistral",
|
||||
type: "model",
|
||||
display_name: mistralMappings.get(id) || id.split('.')[1],
|
||||
created_at: date.toISOString(),
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: vendor,
|
||||
created: date.getTime(),
|
||||
permission: [],
|
||||
root: vendor,
|
||||
root: "mistral",
|
||||
parent: null,
|
||||
};
|
||||
});
|
||||
|
||||
const allModels = [...claudeModelsList, ...mistralModelsList];
|
||||
const filteredModels = vendor === "all"
|
||||
? allModels
|
||||
: allModels.filter(m => m.root === vendor);
|
||||
|
||||
modelsCache[vendor] = {
|
||||
object: "list",
|
||||
data: models.filter((m) => vendor === "all" || m.root === vendor),
|
||||
data: filteredModels,
|
||||
has_more: false,
|
||||
first_id: filteredModels[0]?.id,
|
||||
last_id: filteredModels[filteredModels.length - 1]?.id,
|
||||
};
|
||||
modelsCacheTime[vendor] = new Date().getTime();
|
||||
modelsCacheTime[vendor] = date.getTime();
|
||||
|
||||
return res.json(modelsCache[vendor]);
|
||||
}
|
||||
|
||||
+23
-75
@@ -1,73 +1,30 @@
|
||||
import { RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
AzureOpenAIModelFamily,
|
||||
getAzureOpenAIModelFamily,
|
||||
ModelFamily,
|
||||
} from "../shared/models";
|
||||
import { logger } from "../logger";
|
||||
import { KNOWN_OPENAI_MODELS } from "./openai";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { generateModelList } from "./openai";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addAzureKey,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
function getModelsResponse() {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
let available = new Set<AzureOpenAIModelFamily>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "azure") continue;
|
||||
key.modelFamilies.forEach((family) =>
|
||||
available.add(family as AzureOpenAIModelFamily)
|
||||
);
|
||||
}
|
||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||
available = new Set([...available].filter((x) => allowed.has(x)));
|
||||
|
||||
const models = KNOWN_OPENAI_MODELS.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "azure",
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
|
||||
|
||||
modelsCache = { object: "list", data: models };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return res.status(200).json(modelsCache);
|
||||
}
|
||||
|
||||
if (!config.azureCredentials) return { object: "list", data: [] };
|
||||
|
||||
const result = generateModelList("azure");
|
||||
|
||||
modelsCache = { object: "list", data: result };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
res.status(200).json(modelsCache);
|
||||
};
|
||||
|
||||
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
@@ -83,26 +40,17 @@ const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const azureOpenAIProxy = createQueueMiddleware({
|
||||
beforeProxy: addAzureKey,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "will be set by router",
|
||||
router: (req) => {
|
||||
if (!req.signedRequest) throw new Error("signedRequest not set");
|
||||
const { hostname, path } = req.signedRequest;
|
||||
return `https://${hostname}${path}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const azureOpenAIProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
const { hostname, protocol } = signedRequest;
|
||||
return `${protocol}//${hostname}`;
|
||||
},
|
||||
mutations: [addAzureKey, finalizeSignedRequest],
|
||||
blockingResponseHandler: azureOpenaiResponseHandler,
|
||||
});
|
||||
|
||||
|
||||
const azureOpenAIRouter = Router();
|
||||
azureOpenAIRouter.get("/v1/models", handleModelRequest);
|
||||
azureOpenAIRouter.post(
|
||||
|
||||
@@ -0,0 +1,222 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { addKey, finalizeBody } from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import axios from "axios";
|
||||
import { CohereKey, keyPool } from "../shared/key-management";
|
||||
import { isCohereModel, normalizeMessages } from "../shared/api-schemas/cohere";
|
||||
import { logger } from "../logger";
|
||||
|
||||
const log = logger.child({ module: "proxy", service: "cohere" });
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const cohereResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const getModelsResponse = async () => {
|
||||
// Return cache if less than 1 minute old
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get a Cohere key directly
|
||||
const modelToUse = "command"; // Use any Cohere model here - just for key selection
|
||||
const cohereKey = keyPool.get(modelToUse, "cohere") as CohereKey;
|
||||
|
||||
if (!cohereKey || !cohereKey.key) {
|
||||
log.warn("No valid Cohere key available for model listing");
|
||||
throw new Error("No valid Cohere API key available");
|
||||
}
|
||||
|
||||
// Fetch models directly from Cohere API
|
||||
const response = await axios.get("https://api.cohere.com/v1/models", {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${cohereKey.key}`,
|
||||
"Cohere-Version": "2022-12-06"
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.data || !response.data.models) {
|
||||
throw new Error("Unexpected response format from Cohere API");
|
||||
}
|
||||
|
||||
// Extract models and filter by those that support the chat endpoint
|
||||
const filteredModels = response.data.models
|
||||
.filter((model: any) => {
|
||||
return model.endpoints && model.endpoints.includes("chat");
|
||||
})
|
||||
.map((model: any) => ({
|
||||
id: model.name,
|
||||
name: model.name,
|
||||
// Adding additional OpenAI-compatible fields
|
||||
context_window: model.context_window_size || 4096,
|
||||
max_tokens: model.max_tokens || 4096
|
||||
}));
|
||||
|
||||
log.debug({ modelCount: filteredModels.length, models: filteredModels.map((m: any) => m.id) }, "Filtered models from Cohere API");
|
||||
|
||||
// Format response to ensure OpenAI compatibility
|
||||
const models = {
|
||||
object: "list",
|
||||
data: filteredModels.map((model: any) => ({
|
||||
id: model.id,
|
||||
object: "model",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
owned_by: "cohere",
|
||||
permission: [],
|
||||
root: model.id,
|
||||
parent: null,
|
||||
context_length: model.context_window,
|
||||
})),
|
||||
};
|
||||
|
||||
log.debug({ modelCount: filteredModels.length }, "Retrieved models from Cohere API");
|
||||
|
||||
// Cache the response
|
||||
modelsCache = models;
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return models;
|
||||
} catch (error) {
|
||||
// Provide detailed logging for better troubleshooting
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error fetching Cohere models"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error fetching Cohere models");
|
||||
}
|
||||
|
||||
// Return empty list as fallback
|
||||
return {
|
||||
object: "list",
|
||||
data: [],
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||
try {
|
||||
const models = await getModelsResponse();
|
||||
res.status(200).json(models);
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error handling model request"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error handling model request");
|
||||
}
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
// Function to prepare messages for Cohere API
|
||||
function prepareMessages(req: Request) {
|
||||
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||
req.body.messages = normalizeMessages(req.body.messages);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to remove parameters not supported by Cohere models
|
||||
function removeUnsupportedParameters(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// Remove parameters that Cohere doesn't support
|
||||
if (req.body.logit_bias !== undefined) {
|
||||
delete req.body.logit_bias;
|
||||
}
|
||||
|
||||
if (req.body.top_logprobs !== undefined) {
|
||||
delete req.body.top_logprobs;
|
||||
}
|
||||
|
||||
if (req.body.max_completion_tokens !== undefined) {
|
||||
delete req.body.max_completion_tokens;
|
||||
}
|
||||
|
||||
// Handle structured output format
|
||||
if (req.body.response_format && req.body.response_format.schema) {
|
||||
// Transform to Cohere's format if needed
|
||||
const jsonSchema = req.body.response_format.schema;
|
||||
req.body.response_format = {
|
||||
type: "json_object",
|
||||
schema: jsonSchema
|
||||
};
|
||||
}
|
||||
|
||||
// Logging for debugging
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
log.debug({ body: req.body }, "Request after parameter cleanup");
|
||||
}
|
||||
}
|
||||
|
||||
// Set up count token functionality for Cohere models
|
||||
function countCohereTokens(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
if (isCohereModel(model)) {
|
||||
// Count tokens using prompt tokens (simplified)
|
||||
if (req.promptTokens) {
|
||||
req.log.debug(
|
||||
{ tokens: req.promptTokens },
|
||||
"Estimated token count for Cohere prompt"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cohereProxy = createQueuedProxyMiddleware({
|
||||
mutations: [
|
||||
addKey,
|
||||
// Add Cohere-Version header to every request
|
||||
(manager) => {
|
||||
manager.setHeader("Cohere-Version", "2022-12-06");
|
||||
},
|
||||
finalizeBody
|
||||
],
|
||||
target: "https://api.cohere.ai/compatibility",
|
||||
blockingResponseHandler: cohereResponseHandler,
|
||||
});
|
||||
|
||||
const cohereRouter = Router();
|
||||
|
||||
cohereRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "cohere" },
|
||||
{ afterTransform: [ prepareMessages, removeUnsupportedParameters, countCohereTokens ] }
|
||||
),
|
||||
cohereProxy
|
||||
);
|
||||
|
||||
cohereRouter.post(
|
||||
"/v1/embeddings",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "cohere" },
|
||||
{ afterTransform: [] }
|
||||
),
|
||||
cohereProxy
|
||||
);
|
||||
|
||||
cohereRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
export const cohere = cohereRouter;
|
||||
@@ -0,0 +1,135 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { addKey, finalizeBody } from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import axios from "axios";
|
||||
import { DeepseekKey, keyPool } from "../shared/key-management";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const deepseekResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const getModelsResponse = async () => {
|
||||
// Return cache if less than 1 minute old
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get a Deepseek key directly using keyPool.get()
|
||||
const modelToUse = "deepseek-chat"; // Use any Deepseek model here - just for key selection
|
||||
const deepseekKey = keyPool.get(modelToUse, "deepseek") as DeepseekKey;
|
||||
|
||||
if (!deepseekKey || !deepseekKey.key) {
|
||||
throw new Error("Failed to get valid Deepseek key");
|
||||
}
|
||||
|
||||
// Fetch models from Deepseek API with authorization
|
||||
const response = await axios.get("https://api.deepseek.com/models", {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${deepseekKey.key}`
|
||||
},
|
||||
});
|
||||
|
||||
// If successful, update the cache
|
||||
if (response.data && response.data.data) {
|
||||
modelsCache = {
|
||||
object: "list",
|
||||
data: response.data.data.map((model: any) => ({
|
||||
id: model.id,
|
||||
object: "model",
|
||||
owned_by: "deepseek",
|
||||
})),
|
||||
};
|
||||
} else {
|
||||
throw new Error("Unexpected response format from Deepseek API");
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error fetching Deepseek models:", error);
|
||||
throw error; // No fallback - error will be passed to caller
|
||||
}
|
||||
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return modelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||
try {
|
||||
const modelsResponse = await getModelsResponse();
|
||||
res.status(200).json(modelsResponse);
|
||||
} catch (error) {
|
||||
console.error("Error in handleModelRequest:", error);
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
const deepseekProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKey, finalizeBody],
|
||||
target: "https://api.deepseek.com/beta",
|
||||
blockingResponseHandler: deepseekResponseHandler,
|
||||
});
|
||||
|
||||
const deepseekRouter = Router();
|
||||
|
||||
// combines all the assistant messages at the end of the context and adds the
|
||||
// beta 'prefix' option, makes prefills work the same way they work for Claude
|
||||
function enablePrefill(req: Request) {
|
||||
// If you want to disable
|
||||
if (process.env.NO_DEEPSEEK_PREFILL) return
|
||||
|
||||
const msgs = req.body.messages;
|
||||
if (msgs.at(-1)?.role !== 'assistant') return;
|
||||
|
||||
let i = msgs.length - 1;
|
||||
let content = '';
|
||||
|
||||
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||
// maybe we should also add a newline between messages? no for now.
|
||||
content = msgs[i--].content + content;
|
||||
}
|
||||
|
||||
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
|
||||
}
|
||||
|
||||
function removeReasonerStuff(req: Request) {
|
||||
if (req.body.model === "deepseek-reasoner") {
|
||||
// https://api-docs.deepseek.com/guides/reasoning_model
|
||||
delete req.body.presence_penalty;
|
||||
delete req.body.frequency_penalty;
|
||||
delete req.body.temperature;
|
||||
delete req.body.top_p;
|
||||
delete req.body.logprobs;
|
||||
delete req.body.top_logprobs;
|
||||
}
|
||||
}
|
||||
|
||||
deepseekRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "deepseek" },
|
||||
{ afterTransform: [ enablePrefill, removeReasonerStuff ] }
|
||||
),
|
||||
deepseekProxy
|
||||
);
|
||||
|
||||
deepseekRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
export const deepseek = deepseekRouter;
|
||||
@@ -25,6 +25,12 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
|
||||
delete req.headers["x-api-key"];
|
||||
return token;
|
||||
}
|
||||
|
||||
if (req.headers["x-goog-api-key"]) {
|
||||
const token = req.headers["x-goog-api-key"]?.toString();
|
||||
delete req.headers["x-goog-api-key"];
|
||||
return token;
|
||||
}
|
||||
|
||||
if (req.query.key) {
|
||||
const token = req.query.key?.toString();
|
||||
|
||||
+121
-57
@@ -1,21 +1,16 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
signGcpRequest,
|
||||
finalizeSignedRequest,
|
||||
createOnProxyReqHandler,
|
||||
signGcpRequest,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
ProxyResHandlerWithBody,
|
||||
createOnProxyResHandler,
|
||||
} from "./middleware/response";
|
||||
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
|
||||
|
||||
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
|
||||
|
||||
let modelsCache: any = null;
|
||||
@@ -31,9 +26,13 @@ const getModelsResponse = () => {
|
||||
// https://docs.anthropic.com/en/docs/about-claude/models
|
||||
const variants = [
|
||||
"claude-3-haiku@20240307",
|
||||
"claude-3-sonnet@20240229",
|
||||
"claude-3-opus@20240229",
|
||||
"claude-3-5-haiku@20241022",
|
||||
"claude-3-5-sonnet@20240620",
|
||||
"claude-3-5-sonnet-v2@20241022",
|
||||
"claude-3-7-sonnet@20250219",
|
||||
"claude-sonnet-4@20250514",
|
||||
"claude-opus-4@20250514",
|
||||
"claude-opus-4-1@20250805",
|
||||
];
|
||||
|
||||
const models = variants.map((id) => ({
|
||||
@@ -56,8 +55,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
/** Only used for non-streaming requests. */
|
||||
const gcpResponseHandler: ProxyResHandlerWithBody = async (
|
||||
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
@@ -78,23 +76,13 @@ const gcpResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const gcpProxy = createQueueMiddleware({
|
||||
beforeProxy: signGcpRequest,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([gcpResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const gcpProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signGcpRequest, finalizeSignedRequest],
|
||||
blockingResponseHandler: gcpBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const oaiToChatPreprocessor = createPreprocessorMiddleware(
|
||||
@@ -138,56 +126,132 @@ gcpRouter.post(
|
||||
* - frontends sending Anthropic model names that GCP doesn't recognize
|
||||
* - frontends sending OpenAI model names because they expect the proxy to
|
||||
* translate them
|
||||
*
|
||||
*
|
||||
* If client sends GCP model ID it will be used verbatim. Otherwise, various
|
||||
* strategies are used to try to map a non-GCP model name to GCP model ID.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
// Validate Claude 4.1 Opus parameters before processing
|
||||
validateClaude41OpusParameters(req);
|
||||
|
||||
const model = req.body.model;
|
||||
const DEFAULT_MODEL = "claude-3-5-sonnet-v2@20241022";
|
||||
|
||||
// If it looks like an GCP model, use it as-is
|
||||
// if (model.includes("anthropic.claude")) {
|
||||
if (model.startsWith("claude-") && model.includes("@")) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Anthropic model names can look like:
|
||||
// - claude-v1
|
||||
// - claude-2.1
|
||||
// - claude-3-5-sonnet-20240620-v1:0
|
||||
const pattern =
|
||||
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d{1}))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
|
||||
// - claude-3-sonnet
|
||||
// - claude-3.5-sonnet
|
||||
// - claude-3-5-haiku
|
||||
// - claude-3-5-haiku-latest
|
||||
// - claude-3-5-sonnet-20240620
|
||||
// - claude-opus-4-1 (new format)
|
||||
// - claude-4.1-opus (alternative format)
|
||||
const pattern = /^claude-(?:(\d+)[.-]?(\d)?-(sonnet|opus|haiku)(?:-(latest|\d+))?|(opus|sonnet|haiku)-(\d+)[.-]?(\d)?(?:-(latest|\d+))?)/i;
|
||||
const match = model.match(pattern);
|
||||
|
||||
// If there's no match, fallback to Claude3 Sonnet as it is most likely to be
|
||||
// available on GCP.
|
||||
if (!match) {
|
||||
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
return;
|
||||
}
|
||||
|
||||
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, _rev] = match;
|
||||
// Handle both formats: claude-3-5-sonnet and claude-opus-4-1
|
||||
const [_, major1, minor1, flavor1, rev1, flavor2, major2, minor2, rev2] = match;
|
||||
|
||||
let major, minor, flavor, rev;
|
||||
if (major1) {
|
||||
// Old format: claude-3-5-sonnet
|
||||
major = major1;
|
||||
minor = minor1;
|
||||
flavor = flavor1;
|
||||
rev = rev1;
|
||||
} else {
|
||||
// New format: claude-opus-4-1
|
||||
major = major2;
|
||||
minor = minor2;
|
||||
flavor = flavor2;
|
||||
rev = rev2;
|
||||
}
|
||||
|
||||
const ver = minor ? `${major}.${minor}` : major;
|
||||
|
||||
switch (ver) {
|
||||
case "3":
|
||||
case "3.0":
|
||||
if (name.includes("opus")) {
|
||||
req.body.model = "claude-3-opus@20240229";
|
||||
} else if (name.includes("haiku")) {
|
||||
req.body.model = "claude-3-haiku@20240307";
|
||||
} else {
|
||||
req.body.model = "claude-3-sonnet@20240229";
|
||||
switch (flavor) {
|
||||
case "haiku":
|
||||
req.body.model = "claude-3-haiku@20240307";
|
||||
break;
|
||||
case "opus":
|
||||
req.body.model = "claude-3-opus@20240229";
|
||||
break;
|
||||
case "sonnet":
|
||||
req.body.model = "claude-3-sonnet@20240229";
|
||||
break;
|
||||
default:
|
||||
req.body.model = "claude-3-sonnet@20240229";
|
||||
}
|
||||
return;
|
||||
case "3.5":
|
||||
req.body.model = "claude-3-5-sonnet@20240620";
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallback to Claude3 Sonnet
|
||||
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
|
||||
return;
|
||||
case "3.5":
|
||||
switch (flavor) {
|
||||
case "haiku":
|
||||
req.body.model = "claude-3-5-haiku@20241022";
|
||||
return;
|
||||
case "opus":
|
||||
// no 3.5 opus yet
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
return;
|
||||
case "sonnet":
|
||||
if (rev === "20240620") {
|
||||
req.body.model = "claude-3-5-sonnet@20240620";
|
||||
} else {
|
||||
// includes -latest, edit if anthropic actually releases 3.5 sonnet v3
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
}
|
||||
return;
|
||||
default:
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
}
|
||||
return;
|
||||
|
||||
case "3.7":
|
||||
switch (flavor) {
|
||||
case "sonnet":
|
||||
req.body.model = "claude-3-7-sonnet@20250219";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
case "4":
|
||||
case "4.0":
|
||||
switch (flavor) {
|
||||
case "opus":
|
||||
req.body.model = "claude-opus-4@20250514";
|
||||
return;
|
||||
case "sonnet":
|
||||
req.body.model = "claude-sonnet-4@20250514";
|
||||
return;
|
||||
default:
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
}
|
||||
break;
|
||||
|
||||
case "4.1":
|
||||
switch (flavor) {
|
||||
case "opus":
|
||||
req.body.model = "claude-opus-4-1@20250805";
|
||||
return;
|
||||
default:
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
req.body.model = DEFAULT_MODEL;
|
||||
}
|
||||
}
|
||||
|
||||
export const gcp = gcpRouter;
|
||||
|
||||
+173
-84
@@ -1,26 +1,24 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { Request, RequestHandler, Router, Response, NextFunction } from "express";
|
||||
import { v4 } from "uuid";
|
||||
import { GoogleAIKey, keyPool } from "../shared/key-management";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
|
||||
import { GoogleAIKey, keyPool } from "../shared/key-management";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import axios from "axios";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
// Cache for native Google AI models
|
||||
let nativeModelsCache: any = null;
|
||||
let nativeModelsCacheTime = 0;
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
|
||||
|
||||
@@ -40,11 +38,15 @@ const getModelsResponse = () => {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
// Get all model IDs from keys, excluding any with "bard" in the name
|
||||
const modelIds = Array.from(
|
||||
new Set(keys.map((k) => k.modelIds).flat())
|
||||
).filter((id) => id.startsWith("models/gemini"));
|
||||
).filter((id) => id.startsWith("models/") && !id.includes("bard"));
|
||||
|
||||
// Strip "models/" prefix from IDs before creating model objects
|
||||
const models = modelIds.map((id) => ({
|
||||
id,
|
||||
// Strip "models/" prefix from ID for consistency with request processing
|
||||
id: id.startsWith("models/") ? id.slice("models/".length) : id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "google",
|
||||
@@ -59,12 +61,51 @@ const getModelsResponse = () => {
|
||||
return modelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
// Function to fetch native models from Google AI API
|
||||
const getNativeModelsResponse = async () => {
|
||||
// Return cached value if it was refreshed in the last minute
|
||||
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
|
||||
return nativeModelsCache;
|
||||
}
|
||||
|
||||
/*
|
||||
* The official Google API requires an API key. However SillyTavern only needs
|
||||
* a list of model IDs and does not care about any other model metadata. We
|
||||
* can therefore generate a **synthetic** response from the keys already
|
||||
* loaded into the proxy (same source we use for the OpenAI-compatible
|
||||
* endpoint) and completely avoid the outbound request. This removes the
|
||||
* need for the frontend to supply the proxy password as an API key and
|
||||
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
|
||||
* is missing.
|
||||
*/
|
||||
const openaiStyle = getModelsResponse();
|
||||
const models = (openaiStyle.data || []).map((m: any) => ({
|
||||
// Google AI Studio returns names in the format "models/<id>"
|
||||
name: `models/${m.id}`,
|
||||
supportedGenerationMethods: ["generateContent"],
|
||||
}));
|
||||
|
||||
nativeModelsCache = { models };
|
||||
nativeModelsCacheTime = new Date().getTime();
|
||||
return nativeModelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
/** Only used for non-streaming requests. */
|
||||
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
|
||||
// Native Gemini API model list request
|
||||
const handleNativeModelRequest: RequestHandler = async (_req: Request, res: any) => {
|
||||
try {
|
||||
const modelsResponse = await getNativeModelsResponse();
|
||||
res.status(200).json(modelsResponse);
|
||||
} catch (error) {
|
||||
console.error("Error in handleNativeModelRequest:", error);
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
@@ -88,8 +129,30 @@ function transformGoogleAIResponse(
|
||||
req: Request
|
||||
): Record<string, any> {
|
||||
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
|
||||
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
|
||||
|
||||
// Handle the case where content might have different structures
|
||||
let content = "";
|
||||
|
||||
// Check if the response has the expected structure
|
||||
if (resBody.candidates && resBody.candidates[0]) {
|
||||
const candidate = resBody.candidates[0];
|
||||
|
||||
// Extract content text with multiple fallbacks
|
||||
if (candidate.content?.parts && candidate.content.parts[0]?.text) {
|
||||
// Regular format with parts array containing text
|
||||
content = candidate.content.parts[0].text;
|
||||
} else if (candidate.content?.text) {
|
||||
// Alternate format with direct text property
|
||||
content = candidate.content.text;
|
||||
} else if (typeof candidate.content?.parts?.[0] === 'string') {
|
||||
// Some formats might have string parts
|
||||
content = candidate.content.parts[0];
|
||||
}
|
||||
|
||||
// Apply cleanup to the content if needed
|
||||
content = content.replace(/^(.{0,50}?): /, () => "");
|
||||
}
|
||||
|
||||
return {
|
||||
id: "goo-" + v4(),
|
||||
object: "chat.completion",
|
||||
@@ -103,70 +166,35 @@ function transformGoogleAIResponse(
|
||||
choices: [
|
||||
{
|
||||
message: { role: "assistant", content },
|
||||
finish_reason: resBody.candidates[0].finishReason,
|
||||
finish_reason: resBody.candidates?.[0]?.finishReason || "STOP",
|
||||
index: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
const googleAIProxy = createQueueMiddleware({
|
||||
beforeProxy: addGoogleAIKey,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
const { protocol, hostname, path } = signedRequest;
|
||||
return `${protocol}//${hostname}${path}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
// Prevent logging of the API key by HPM
|
||||
logger: logger.child(
|
||||
{},
|
||||
{
|
||||
redact: {
|
||||
paths: ["*"],
|
||||
censor: (v) =>
|
||||
typeof v === "string" ? v.replace(/key=\S+/g, "key=xxxxxxx") : v,
|
||||
},
|
||||
}
|
||||
),
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const googleAIProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }: { signedRequest: any }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
const { protocol, hostname} = signedRequest;
|
||||
return `${protocol}//${hostname}`;
|
||||
},
|
||||
mutations: [addGoogleAIKey, finalizeSignedRequest],
|
||||
blockingResponseHandler: googleAIBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const googleAIRouter = Router();
|
||||
googleAIRouter.get("/v1/models", handleModelRequest);
|
||||
googleAIRouter.get("/:apiVersion(v1alpha|v1beta)/models", handleNativeModelRequest);
|
||||
|
||||
// Native Google AI chat completion endpoint
|
||||
googleAIRouter.post(
|
||||
"/v1beta/models/:modelId:(generateContent|streamGenerateContent)",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "google-ai",
|
||||
outApi: "google-ai",
|
||||
service: "google-ai",
|
||||
},
|
||||
{ beforeTransform: [maybeReassignModel], afterTransform: [setStreamFlag] }
|
||||
),
|
||||
googleAIProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-Google AI compatibility endpoint.
|
||||
googleAIRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
),
|
||||
googleAIProxy
|
||||
);
|
||||
/**
|
||||
* Processes the thinking budget for Gemini 2.5 Flash model.
|
||||
* Validation has been disabled - budget is passed through without limits.
|
||||
*/
|
||||
function processThinkingBudget(req: Request) {
|
||||
// Validation disabled - budget is passed through without any range limits
|
||||
// Previously enforced 0-24576 token limit
|
||||
}
|
||||
|
||||
function setStreamFlag(req: Request) {
|
||||
const isStreaming = req.url.includes("streamGenerateContent");
|
||||
@@ -180,8 +208,8 @@ function setStreamFlag(req: Request) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces requests for non-Google AI models with gemini-pro-1.5-latest.
|
||||
* Also strips models/ from the beginning of the model IDs.
|
||||
* Strips 'models/' prefix from the beginning of model IDs if present.
|
||||
* No longer forces redirection to gemini-1.5-pro-latest for non-Gemini models.
|
||||
**/
|
||||
function maybeReassignModel(req: Request) {
|
||||
// Ensure model is on body as a lot of middleware will expect it.
|
||||
@@ -191,17 +219,78 @@ function maybeReassignModel(req: Request) {
|
||||
}
|
||||
req.body.model = model;
|
||||
|
||||
const requested = model;
|
||||
if (requested.startsWith("models/")) {
|
||||
req.body.model = requested.slice("models/".length);
|
||||
// Only strip the 'models/' prefix if present
|
||||
if (model.startsWith("models/")) {
|
||||
req.body.model = model.slice("models/".length);
|
||||
req.log.info({ originalModel: model, updatedModel: req.body.model }, "Stripped 'models/' prefix from model ID");
|
||||
}
|
||||
|
||||
if (requested.includes("gemini")) {
|
||||
return;
|
||||
}
|
||||
|
||||
req.log.info({ requested }, "Reassigning model to gemini-pro-1.5-latest");
|
||||
req.body.model = "gemini-pro-1.5-latest";
|
||||
|
||||
// No longer redirecting non-Gemini models to gemini-1.5-pro-latest
|
||||
// This allows the original model to be passed through to the API
|
||||
// If it's an invalid model, the Google AI API will return the appropriate error
|
||||
}
|
||||
|
||||
/**
|
||||
* Middleware to check for and block requests to experimental models.
|
||||
* This function is intended to be used as a RequestPreprocessor.
|
||||
* It throws an error if an experimental model is detected, which should be
|
||||
* caught by the proxy's onError handler.
|
||||
*
|
||||
* Models can be allowed through the ALLOWED_EXP_MODELS environment variable.
|
||||
*/
|
||||
function checkAndBlockExperimentalModels(req: Request) { // Changed signature
|
||||
const modelId = req.body.model as string | undefined;
|
||||
|
||||
// Check if the model ID contains "exp" (case-insensitive)
|
||||
if (modelId && modelId.toLowerCase().includes("exp")) {
|
||||
// Check if this specific model is in the allowlist
|
||||
const allowedModels = config.allowedExpModels
|
||||
?.split(",")
|
||||
.map(model => model.trim())
|
||||
.filter(model => model.length > 0) || [];
|
||||
|
||||
const isAllowed = allowedModels.some(allowedModel =>
|
||||
modelId.toLowerCase() === allowedModel.toLowerCase()
|
||||
);
|
||||
|
||||
if (isAllowed) {
|
||||
req.log.info({ modelId }, "Allowing experimental Google AI model via allowlist.");
|
||||
return; // Allow the request to proceed
|
||||
}
|
||||
|
||||
req.log.warn({ modelId }, "Blocking request to experimental Google AI model.");
|
||||
const err: any = new Error("Experimental models are too unstable to be supported in proxy code. Please use preview models instead.");
|
||||
err.statusCode = 400;
|
||||
throw err;
|
||||
}
|
||||
// If no experimental model, do nothing, allowing request to proceed.
|
||||
}
|
||||
|
||||
// Native Google AI chat completion endpoint
|
||||
googleAIRouter.post(
|
||||
"/:apiVersion(v1alpha|v1beta)/models/:modelId:(generateContent|streamGenerateContent)",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
|
||||
{
|
||||
beforeTransform: [maybeReassignModel],
|
||||
afterTransform: [checkAndBlockExperimentalModels, setStreamFlag, processThinkingBudget]
|
||||
}
|
||||
),
|
||||
googleAIProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-Google AI compatibility endpoint.
|
||||
googleAIRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
|
||||
{
|
||||
afterTransform: [maybeReassignModel, checkAndBlockExperimentalModels, processThinkingBudget]
|
||||
}
|
||||
),
|
||||
googleAIProxy
|
||||
);
|
||||
|
||||
export const googleAI = googleAIRouter;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Request, Response } from "express";
|
||||
import http from "http";
|
||||
import httpProxy from "http-proxy";
|
||||
import { Socket } from "net";
|
||||
import { ZodError } from "zod";
|
||||
import { generateErrorMessage } from "zod-error";
|
||||
import { HttpError } from "../../shared/errors";
|
||||
@@ -12,11 +12,13 @@ const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
|
||||
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
|
||||
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
|
||||
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
|
||||
const OPENAI_RESPONSES_ENDPOINT = "/v1/responses";
|
||||
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
||||
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
|
||||
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
|
||||
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
|
||||
const GOOGLE_AI_COMPLETION_ENDPOINT = "/v1beta/models";
|
||||
const GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT = "/v1alpha/models";
|
||||
const GOOGLE_AI_BETA_COMPLETION_ENDPOINT = "/v1beta/models";
|
||||
|
||||
export function isTextGenerationRequest(req: Request) {
|
||||
return (
|
||||
@@ -24,11 +26,13 @@ export function isTextGenerationRequest(req: Request) {
|
||||
[
|
||||
OPENAI_CHAT_COMPLETION_ENDPOINT,
|
||||
OPENAI_TEXT_COMPLETION_ENDPOINT,
|
||||
OPENAI_RESPONSES_ENDPOINT,
|
||||
ANTHROPIC_COMPLETION_ENDPOINT,
|
||||
ANTHROPIC_MESSAGES_ENDPOINT,
|
||||
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
|
||||
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
|
||||
GOOGLE_AI_COMPLETION_ENDPOINT,
|
||||
GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT,
|
||||
GOOGLE_AI_BETA_COMPLETION_ENDPOINT,
|
||||
].some((endpoint) => req.path.startsWith(endpoint))
|
||||
);
|
||||
}
|
||||
@@ -72,16 +76,23 @@ export function sendProxyError(
|
||||
});
|
||||
}
|
||||
|
||||
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
|
||||
req.log.error(err, `Error during http-proxy-middleware request`);
|
||||
classifyErrorAndSend(err, req as Request, res as Response);
|
||||
};
|
||||
|
||||
/**
|
||||
* Handles errors thrown during preparation of a proxy request (before it is
|
||||
* sent to the upstream API), typically due to validation, quota, or other
|
||||
* pre-flight checks. Depending on the error class, this function will send an
|
||||
* appropriate error response to the client, streaming it if necessary.
|
||||
*/
|
||||
export const classifyErrorAndSend = (
|
||||
err: Error,
|
||||
req: Request,
|
||||
res: Response
|
||||
res: Response | Socket
|
||||
) => {
|
||||
if (res instanceof Socket) {
|
||||
// We should always have an Express response object here, but http-proxy's
|
||||
// ErrorCallback type says it could be just a Socket.
|
||||
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
|
||||
return res.destroy();
|
||||
}
|
||||
try {
|
||||
const { statusCode, statusMessage, userMessage, ...errorDetails } =
|
||||
classifyError(err);
|
||||
@@ -227,6 +238,22 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||
// - choices[0].message.content
|
||||
// - choices[0].message with no content if model is invoking a tool
|
||||
return body.choices?.[0]?.message?.content || "";
|
||||
case "openai-responses":
|
||||
// Handle the original Responses API format
|
||||
if (body.output && Array.isArray(body.output)) {
|
||||
// Look for a message type in the output array
|
||||
for (const item of body.output) {
|
||||
if (item.type === "message" && item.content && Array.isArray(item.content)) {
|
||||
// Extract text content from each content item
|
||||
return item.content
|
||||
.filter((contentItem: any) => contentItem.type === "output_text")
|
||||
.map((contentItem: any) => contentItem.text)
|
||||
.join("");
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we've been transformed to chat completion format already
|
||||
return body.choices?.[0]?.message?.content || "";
|
||||
case "mistral-text":
|
||||
return body.outputs?.[0]?.text || "";
|
||||
case "openai-text":
|
||||
@@ -257,7 +284,15 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||
if ("choices" in body) {
|
||||
return body.choices[0].message.content;
|
||||
}
|
||||
return body.candidates[0].content.parts[0].text;
|
||||
const text = body.candidates[0].content?.parts?.[0]?.text;
|
||||
if (!text) {
|
||||
req.log.warn(
|
||||
{ body: JSON.stringify(body) },
|
||||
"Received empty Google AI text completion"
|
||||
);
|
||||
return "";
|
||||
}
|
||||
return text;
|
||||
case "openai-image":
|
||||
return body.data?.map((item: any) => item.url).join("\n");
|
||||
default:
|
||||
@@ -270,6 +305,7 @@ export function getModelFromBody(req: Request, resBody: Record<string, any>) {
|
||||
switch (format) {
|
||||
case "openai":
|
||||
case "openai-text":
|
||||
case "openai-responses":
|
||||
return resBody.model;
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
|
||||
@@ -1,44 +1,38 @@
|
||||
import type { Request } from "express";
|
||||
import type { ClientRequest } from "http";
|
||||
import type { ProxyReqCallback } from "http-proxy";
|
||||
|
||||
export { createOnProxyReqHandler } from "./onproxyreq-factory";
|
||||
import { ProxyReqManager } from "./proxy-req-manager";
|
||||
export {
|
||||
createPreprocessorMiddleware,
|
||||
createEmbeddingsPreprocessorMiddleware,
|
||||
} from "./preprocessor-factory";
|
||||
|
||||
// Express middleware (runs before http-proxy-middleware, can be async)
|
||||
export { addAzureKey } from "./preprocessors/add-azure-key";
|
||||
// Preprocessors (runs before request is queued, usually body transformation/validation)
|
||||
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
|
||||
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
|
||||
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
|
||||
export { languageFilter } from "./preprocessors/language-filter";
|
||||
export { setApiFormat } from "./preprocessors/set-api-format";
|
||||
export { signAwsRequest } from "./preprocessors/sign-aws-request";
|
||||
export { signGcpRequest } from "./preprocessors/sign-vertex-ai-request";
|
||||
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
|
||||
export { validateContextSize } from "./preprocessors/validate-context-size";
|
||||
export { validateModelFamily } from "./preprocessors/validate-model-family";
|
||||
export { validateVision } from "./preprocessors/validate-vision";
|
||||
|
||||
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
|
||||
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
|
||||
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
|
||||
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
|
||||
export { checkModelFamily } from "./onproxyreq/check-model-family";
|
||||
export { finalizeBody } from "./onproxyreq/finalize-body";
|
||||
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
|
||||
export { stripHeaders } from "./onproxyreq/strip-headers";
|
||||
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
|
||||
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
|
||||
export { addAzureKey } from "./mutators/add-azure-key";
|
||||
export { finalizeBody } from "./mutators/finalize-body";
|
||||
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
|
||||
export { signAwsRequest } from "./mutators/sign-aws-request";
|
||||
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
|
||||
export { stripHeaders } from "./mutators/strip-headers";
|
||||
|
||||
/**
|
||||
* Middleware that runs prior to the request being handled by http-proxy-
|
||||
* middleware.
|
||||
* Middleware that runs prior to the request being queued or handled by
|
||||
* http-proxy-middleware. You will not have access to the proxied
|
||||
* request/response objects since they have not yet been sent to the API.
|
||||
*
|
||||
* Async functions can be used here, but you will not have access to the proxied
|
||||
* request/response objects, nor the data set by ProxyRequestMiddleware
|
||||
* functions as they have not yet been run.
|
||||
*
|
||||
* User will have been authenticated by the time this middleware runs, but your
|
||||
* request won't have been assigned an API key yet.
|
||||
* User will have been authenticated by the proxy's gatekeeper, but the request
|
||||
* won't have been assigned an upstream API key yet.
|
||||
*
|
||||
* Note that these functions only run once ever per request, even if the request
|
||||
* is automatically retried by the request queue middleware.
|
||||
@@ -46,17 +40,14 @@ export { stripHeaders } from "./onproxyreq/strip-headers";
|
||||
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
||||
|
||||
/**
|
||||
* Callbacks that run immediately before the request is sent to the API in
|
||||
* response to http-proxy-middleware's `proxyReq` event.
|
||||
* Middleware that runs immediately before the request is proxied to the
|
||||
* upstream API, after dequeueing the request from the request queue.
|
||||
*
|
||||
* Async functions cannot be used here as HPM's event emitter is not async and
|
||||
* will not wait for the promise to resolve before sending the request.
|
||||
*
|
||||
* Note that these functions may be run multiple times per request if the
|
||||
* first attempt is rate limited and the request is automatically retried by the
|
||||
* request queue middleware.
|
||||
* Because these middleware may be run multiple times per request if a retryable
|
||||
* error occurs and the request put back in the queue, they must be idempotent.
|
||||
* A change manager is provided to allow the middleware to make changes to the
|
||||
* request which can be automatically reverted.
|
||||
*/
|
||||
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
|
||||
|
||||
export const forceModel = (model: string) => (req: Request) =>
|
||||
void (req.body.model = model);
|
||||
export type ProxyReqMutator = (
|
||||
changeManager: ProxyReqManager
|
||||
) => void | Promise<void>;
|
||||
|
||||
+13
-7
@@ -3,14 +3,16 @@ import {
|
||||
AzureOpenAIKey,
|
||||
keyPool,
|
||||
} from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
const validAPIs: APIFormat[] = ["openai", "openai-image"];
|
||||
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
|
||||
validAPIs.includes(api)
|
||||
);
|
||||
const serviceValid = req.service === "azure";
|
||||
|
||||
if (!apisValid || !serviceValid) {
|
||||
throw new Error("addAzureKey called on invalid request");
|
||||
}
|
||||
@@ -22,11 +24,15 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
const model = req.body.model.startsWith("azure-")
|
||||
? req.body.model
|
||||
: `azure-${req.body.model}`;
|
||||
|
||||
req.key = keyPool.get(model, "azure");
|
||||
// TODO: untracked mutation to body, I think this should just be a
|
||||
// RequestPreprocessor because we don't need to do it every dequeue.
|
||||
req.body.model = model;
|
||||
|
||||
const key = keyPool.get(model, "azure");
|
||||
manager.setKey(key);
|
||||
|
||||
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
|
||||
// TODO: this should also probably be a RequestPreprocessor
|
||||
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
|
||||
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
|
||||
// OpenAI wants logprobs: true/false and top_logprobs: number
|
||||
@@ -43,7 +49,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
}
|
||||
|
||||
req.log.info(
|
||||
{ key: req.key.hash, model },
|
||||
{ key: key.hash, model },
|
||||
"Assigned Azure OpenAI key to request"
|
||||
);
|
||||
|
||||
@@ -55,7 +61,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
const apiVersion =
|
||||
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
|
||||
|
||||
req.signedRequest = {
|
||||
manager.setSignedRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: `${resourceName}.openai.azure.com`,
|
||||
@@ -66,7 +72,7 @@ export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
["api-key"]: apiKey,
|
||||
},
|
||||
body: JSON.stringify(req.body),
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
function getCredentialsFromKey(key: AzureOpenAIKey) {
|
||||
+21
-13
@@ -1,39 +1,47 @@
|
||||
import { keyPool } from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
export const addGoogleAIKey: RequestPreprocessor = (req) => {
|
||||
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
const inboundValid =
|
||||
req.inboundApi === "openai" || req.inboundApi === "google-ai";
|
||||
const outboundValid = req.outboundApi === "google-ai";
|
||||
|
||||
|
||||
const serviceValid = req.service === "google-ai";
|
||||
if (!inboundValid || !outboundValid || !serviceValid) {
|
||||
throw new Error("addGoogleAIKey called on invalid request");
|
||||
}
|
||||
|
||||
|
||||
const model = req.body.model;
|
||||
req.isStreaming = req.isStreaming || req.body.stream;
|
||||
req.key = keyPool.get(model, "google-ai");
|
||||
const key = keyPool.get(model, "google-ai");
|
||||
manager.setKey(key);
|
||||
|
||||
req.log.info(
|
||||
{ key: req.key.hash, model, stream: req.isStreaming },
|
||||
{ key: key.hash, model, stream: req.isStreaming },
|
||||
"Assigned Google AI API key to request"
|
||||
);
|
||||
|
||||
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
|
||||
const payload = { ...req.body, stream: undefined, model: undefined };
|
||||
|
||||
req.signedRequest = {
|
||||
// For OpenAI -> Google conversion we don't actually have the API version
|
||||
const apiVersion = req.params.apiVersion || "v1beta"
|
||||
|
||||
// TODO: this isn't actually signed, so the manager api is a little unclear
|
||||
// with the ProxyReqManager refactor, it's probably no longer necesasry to
|
||||
// do this because we can modify the path using Manager.setPath.
|
||||
manager.setSignedRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: "generativelanguage.googleapis.com",
|
||||
path: `/v1beta/models/${model}:${
|
||||
req.isStreaming ? "streamGenerateContent" : "generateContent"
|
||||
}?key=${req.key.key}`,
|
||||
path: `/${apiVersion}/models/${model}:${
|
||||
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
|
||||
}key=${key.key}`,
|
||||
headers: {
|
||||
["host"]: `generativelanguage.googleapis.com`,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
};
|
||||
});
|
||||
};
|
||||
+44
-19
@@ -2,10 +2,12 @@ import { AnthropicChatMessage } from "../../../../shared/api-schemas";
|
||||
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
|
||||
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
|
||||
import { isEmbeddingsRequest } from "../../common";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
export const addKey: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
|
||||
export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
let assignedKey: Key;
|
||||
const { service, inboundApi, outboundApi, body } = req;
|
||||
|
||||
@@ -29,7 +31,9 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
}
|
||||
|
||||
if (inboundApi === outboundApi) {
|
||||
assignedKey = keyPool.get(body.model, service, needsMultimodal);
|
||||
// Pass streaming information for GPT-5 models that require verified keys for streaming
|
||||
const isStreaming = body.stream === true;
|
||||
assignedKey = keyPool.get(body.model, service, needsMultimodal, isStreaming);
|
||||
} else {
|
||||
switch (outboundApi) {
|
||||
// If we are translating between API formats we may need to select a model
|
||||
@@ -47,7 +51,12 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
|
||||
break;
|
||||
case "openai-image":
|
||||
assignedKey = keyPool.get("dall-e-3", service);
|
||||
// Use the actual model from the request body instead of defaulting to dall-e-3
|
||||
// This ensures that gpt-image-1 requests get keys that are verified for gpt-image-1
|
||||
assignedKey = keyPool.get(body.model, service);
|
||||
break;
|
||||
case "openai-responses":
|
||||
assignedKey = keyPool.get(body.model, service);
|
||||
break;
|
||||
case "openai":
|
||||
throw new Error(
|
||||
@@ -58,7 +67,7 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
}
|
||||
}
|
||||
|
||||
req.key = assignedKey;
|
||||
manager.setKey(assignedKey);
|
||||
req.log.info(
|
||||
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
|
||||
"Assigned key to request"
|
||||
@@ -67,21 +76,39 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
// TODO: KeyProvider should assemble all necessary headers
|
||||
switch (assignedKey.service) {
|
||||
case "anthropic":
|
||||
proxyReq.setHeader("X-API-Key", assignedKey.key);
|
||||
manager.setHeader("X-API-Key", assignedKey.key);
|
||||
if (!manager.request.headers["anthropic-version"]) {
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
}
|
||||
break;
|
||||
case "openai":
|
||||
const key: OpenAIKey = assignedKey as OpenAIKey;
|
||||
if (key.organizationId) {
|
||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||
if (key.organizationId && !key.key.includes("svcacct")) {
|
||||
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||
}
|
||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "mistral-ai":
|
||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "azure":
|
||||
const azureKey = assignedKey.key;
|
||||
proxyReq.setHeader("api-key", azureKey);
|
||||
manager.setHeader("api-key", azureKey);
|
||||
break;
|
||||
case "deepseek":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "xai":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "cohere":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "qwen":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "moonshot":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "aws":
|
||||
case "gcp":
|
||||
@@ -96,10 +123,8 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
* Special case for embeddings requests which don't go through the normal
|
||||
* request pipeline.
|
||||
*/
|
||||
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
||||
proxyReq,
|
||||
req
|
||||
) => {
|
||||
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
if (!isEmbeddingsRequest(req)) {
|
||||
throw new Error(
|
||||
"addKeyForEmbeddingsRequest called on non-embeddings request"
|
||||
@@ -110,18 +135,18 @@ export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
||||
throw new Error("Embeddings requests must be from OpenAI");
|
||||
}
|
||||
|
||||
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
|
||||
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
|
||||
|
||||
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
|
||||
|
||||
req.key = key;
|
||||
manager.setKey(key);
|
||||
req.log.info(
|
||||
{ key: key.hash, toApi: req.outboundApi },
|
||||
"Assigned Turbo key to embeddings request"
|
||||
);
|
||||
|
||||
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
|
||||
manager.setHeader("Authorization", `Bearer ${key.key}`);
|
||||
if (key.organizationId) {
|
||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||
}
|
||||
};
|
||||
@@ -0,0 +1,67 @@
|
||||
import type { ProxyReqMutator } from "../index";
|
||||
|
||||
/** Finalize the rewritten request body. Must be the last mutator. */
|
||||
export const finalizeBody: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
|
||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||
// For image generation requests, remove stream flag.
|
||||
if (req.outboundApi === "openai-image") {
|
||||
delete req.body.stream;
|
||||
}
|
||||
// For anthropic text to chat requests, remove undefined prompt.
|
||||
if (req.outboundApi === "anthropic-chat") {
|
||||
delete req.body.prompt;
|
||||
}
|
||||
// For OpenAI Responses API, ensure messages is in the correct format
|
||||
if (req.outboundApi === "openai-responses") {
|
||||
// Format messages for the Responses API
|
||||
if (req.body.messages) {
|
||||
req.log.info("Formatting messages for Responses API in finalizeBody");
|
||||
// The Responses API expects input to be an array, not an object
|
||||
req.body.input = req.body.messages;
|
||||
delete req.body.messages;
|
||||
} else if (req.body.input && req.body.input.messages) {
|
||||
req.log.info("Reformatting input.messages for Responses API in finalizeBody");
|
||||
// If input already exists but contains a messages object, replace input with the messages array
|
||||
req.body.input = req.body.input.messages;
|
||||
}
|
||||
|
||||
// Final check to ensure max_completion_tokens is converted to max_output_tokens
|
||||
if (req.body.max_completion_tokens) {
|
||||
req.log.info("Converting max_completion_tokens to max_output_tokens in finalizeBody");
|
||||
if (!req.body.max_output_tokens) {
|
||||
req.body.max_output_tokens = req.body.max_completion_tokens;
|
||||
}
|
||||
delete req.body.max_completion_tokens;
|
||||
}
|
||||
|
||||
// Final check to ensure max_tokens is converted to max_output_tokens
|
||||
if (req.body.max_tokens) {
|
||||
req.log.info("Converting max_tokens to max_output_tokens in finalizeBody");
|
||||
if (!req.body.max_output_tokens) {
|
||||
req.body.max_output_tokens = req.body.max_tokens;
|
||||
}
|
||||
delete req.body.max_tokens;
|
||||
}
|
||||
|
||||
// Remove all parameters not supported by Responses API
|
||||
const unsupportedParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
];
|
||||
|
||||
for (const param of unsupportedParams) {
|
||||
if (req.body[param] !== undefined) {
|
||||
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
|
||||
delete req.body[param];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const serialized =
|
||||
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
|
||||
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||
manager.setBody(serialized);
|
||||
}
|
||||
};
|
||||
@@ -0,0 +1,32 @@
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
/**
|
||||
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
|
||||
* pipeline, before the proxy middleware. This function just assigns the path
|
||||
* and headers to the proxy request.
|
||||
*/
|
||||
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
if (!req.signedRequest) {
|
||||
throw new Error("Expected req.signedRequest to be set");
|
||||
}
|
||||
|
||||
// The path depends on the selected model and the assigned key's region.
|
||||
manager.setPath(req.signedRequest.path);
|
||||
|
||||
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||
// reassign only the ones specified in the signed request.
|
||||
const headers = req.signedRequest.headers;
|
||||
Object.keys(headers).forEach((key) => {
|
||||
manager.removeHeader(key);
|
||||
});
|
||||
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||
manager.setHeader(key, value);
|
||||
});
|
||||
const serialized =
|
||||
typeof req.signedRequest.body === "string"
|
||||
? req.signedRequest.body
|
||||
: JSON.stringify(req.signedRequest.body);
|
||||
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||
manager.setBody(serialized);
|
||||
};
|
||||
+21
-24
@@ -7,11 +7,11 @@ import {
|
||||
AnthropicV1MessagesSchema,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import {
|
||||
AWSMistralV1ChatCompletionsSchema,
|
||||
AWSMistralV1TextCompletionsSchema,
|
||||
} from "../../../../shared/api-schemas/mistral-ai";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
const AMZ_HOST =
|
||||
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
||||
@@ -21,16 +21,18 @@ const AMZ_HOST =
|
||||
* request object in place to fix the path.
|
||||
* This happens AFTER request transformation.
|
||||
*/
|
||||
export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||
export const signAwsRequest: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
const { model, stream } = req.body;
|
||||
req.key = keyPool.get(model, "aws");
|
||||
const key = keyPool.get(model, "aws") as AwsBedrockKey;
|
||||
manager.setKey(key);
|
||||
|
||||
req.isStreaming = stream === true || stream === "true";
|
||||
|
||||
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
|
||||
if (req.outboundApi === "anthropic-text") {
|
||||
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
||||
req.body.prompt = preamble + req.body.prompt;
|
||||
let system = req.body.system ?? "";
|
||||
if (Array.isArray(system)) {
|
||||
system = system
|
||||
.map((m: { type: string; text: string }) => m.text)
|
||||
.join("\n");
|
||||
req.body.system = system;
|
||||
}
|
||||
|
||||
const credential = getCredentialParts(req);
|
||||
@@ -38,15 +40,13 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||
|
||||
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
||||
// set it so that the stream adapter always selects the correct transformer.
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
|
||||
// If our key has an inference profile compatible with the requested model,
|
||||
// we want to use the inference profile instead of the model ID when calling
|
||||
// InvokeModel as that will give us higher rate limits.
|
||||
const profile =
|
||||
(req.key as AwsBedrockKey).inferenceProfileIds.find((p) =>
|
||||
p.includes(model)
|
||||
) || model;
|
||||
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
|
||||
|
||||
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
||||
// with the headers generated by the SDK.
|
||||
@@ -59,7 +59,7 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||
["Host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(applyAwsStrictValidation(req)),
|
||||
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
|
||||
});
|
||||
|
||||
if (stream) {
|
||||
@@ -68,19 +68,13 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||
newRequest.headers["accept"] = "*/*";
|
||||
}
|
||||
|
||||
const { key, body, inboundApi, outboundApi } = req;
|
||||
const { body, inboundApi, outboundApi } = req;
|
||||
req.log.info(
|
||||
{
|
||||
key: key.hash,
|
||||
model: body.model,
|
||||
inferenceProfile: profile,
|
||||
inboundApi,
|
||||
outboundApi,
|
||||
},
|
||||
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
|
||||
"Assigned AWS credentials to request"
|
||||
);
|
||||
|
||||
req.signedRequest = await sign(newRequest, getCredentialParts(req));
|
||||
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
|
||||
};
|
||||
|
||||
type Credential = {
|
||||
@@ -116,7 +110,7 @@ async function sign(request: HttpRequest, credential: Credential) {
|
||||
return signer.sign(request);
|
||||
}
|
||||
|
||||
function applyAwsStrictValidation(req: Request): unknown {
|
||||
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
|
||||
// AWS uses vendor API formats but imposes additional (more strict) validation
|
||||
// rules, namely that extraneous parameters are not allowed. We will validate
|
||||
// using the vendor's zod schema but apply `.strip` to ensure that any
|
||||
@@ -144,6 +138,9 @@ function applyAwsStrictValidation(req: Request): unknown {
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
tools: true,
|
||||
tool_choice: true,
|
||||
thinking: true
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
@@ -0,0 +1,78 @@
|
||||
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
|
||||
import { GcpKey, keyPool } from "../../../../shared/key-management";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
import {
|
||||
getCredentialsFromGcpKey,
|
||||
refreshGcpAccessToken,
|
||||
} from "../../../../shared/key-management/gcp/oauth";
|
||||
|
||||
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
|
||||
|
||||
export const signGcpRequest: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
const serviceValid = req.service === "gcp";
|
||||
if (!serviceValid) {
|
||||
throw new Error("addVertexAIKey called on invalid request");
|
||||
}
|
||||
|
||||
if (!req.body?.model) {
|
||||
throw new Error("You must specify a model with your request.");
|
||||
}
|
||||
|
||||
const { model } = req.body;
|
||||
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
|
||||
|
||||
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
|
||||
const [token, durationSec] = await refreshGcpAccessToken(key);
|
||||
keyPool.update(key, {
|
||||
accessToken: token,
|
||||
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
|
||||
} as GcpKey);
|
||||
// nb: key received by `get` is a clone and will not have the new access
|
||||
// token we just set, so it must be manually updated.
|
||||
key.accessToken = token;
|
||||
}
|
||||
|
||||
manager.setKey(key);
|
||||
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
|
||||
|
||||
// TODO: This should happen in transform-outbound-payload.ts
|
||||
// TODO: Support tools
|
||||
let strippedParams: Record<string, unknown>;
|
||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||
messages: true,
|
||||
system: true,
|
||||
max_tokens: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
stream: true,
|
||||
tools: true,
|
||||
tool_choice: true,
|
||||
thinking: true
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
strippedParams.anthropic_version = "vertex-2023-10-16";
|
||||
|
||||
const credential = await getCredentialsFromGcpKey(key);
|
||||
|
||||
const host = GCP_HOST.replace("%REGION%", credential.region);
|
||||
// GCP doesn't use the anthropic-version header, but we set it to ensure the
|
||||
// stream adapter selects the correct transformer.
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
|
||||
manager.setSignedRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: host,
|
||||
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
|
||||
headers: {
|
||||
["host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
["authorization"]: `Bearer ${key.accessToken}`,
|
||||
},
|
||||
body: JSON.stringify(strippedParams),
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,33 @@
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
/**
|
||||
* Removes origin and referer headers before sending the request to the API for
|
||||
* privacy reasons.
|
||||
*/
|
||||
export const stripHeaders: ProxyReqMutator = (manager) => {
|
||||
manager.removeHeader("origin");
|
||||
manager.removeHeader("referer");
|
||||
|
||||
// Some APIs refuse requests coming from browsers to discourage embedding
|
||||
// API keys in client-side code, so we must remove all CORS/fetch headers.
|
||||
Object.keys(manager.request.headers).forEach((key) => {
|
||||
if (key.startsWith("sec-")) {
|
||||
manager.removeHeader(key);
|
||||
}
|
||||
});
|
||||
|
||||
manager.removeHeader("tailscale-user-login");
|
||||
manager.removeHeader("tailscale-user-name");
|
||||
manager.removeHeader("tailscale-headers-info");
|
||||
manager.removeHeader("tailscale-user-profile-pic");
|
||||
manager.removeHeader("cf-connecting-ip");
|
||||
manager.removeHeader("cf-ray");
|
||||
manager.removeHeader("cf-visitor");
|
||||
manager.removeHeader("cf-warp-tag-id");
|
||||
manager.removeHeader("forwarded");
|
||||
manager.removeHeader("true-client-ip");
|
||||
manager.removeHeader("x-forwarded-for");
|
||||
manager.removeHeader("x-forwarded-host");
|
||||
manager.removeHeader("x-forwarded-proto");
|
||||
manager.removeHeader("x-real-ip");
|
||||
};
|
||||
@@ -1,45 +0,0 @@
|
||||
import {
|
||||
applyQuotaLimits,
|
||||
blockZoomerOrigins,
|
||||
checkModelFamily,
|
||||
HPMRequestCallback,
|
||||
stripHeaders,
|
||||
} from "./index";
|
||||
|
||||
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
|
||||
|
||||
/**
|
||||
* Returns an http-proxy-middleware request handler that runs the given set of
|
||||
* onProxyReq callback functions in sequence.
|
||||
*
|
||||
* These will run each time a request is proxied, including on automatic retries
|
||||
* by the queue after encountering a rate limit.
|
||||
*/
|
||||
export const createOnProxyReqHandler = ({
|
||||
pipeline,
|
||||
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
|
||||
const callbackPipeline = [
|
||||
checkModelFamily,
|
||||
applyQuotaLimits,
|
||||
blockZoomerOrigins,
|
||||
stripHeaders,
|
||||
...pipeline,
|
||||
];
|
||||
return (proxyReq, req, res, options) => {
|
||||
// The streaming flag must be set before any other onProxyReq handler runs,
|
||||
// as it may influence the behavior of subsequent handlers.
|
||||
// Image generation requests can't be streamed.
|
||||
// TODO: this flag is set in too many places
|
||||
req.isStreaming =
|
||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
||||
req.body.stream = req.isStreaming;
|
||||
|
||||
try {
|
||||
for (const fn of callbackPipeline) {
|
||||
fn(proxyReq, req, res, options);
|
||||
}
|
||||
} catch (error) {
|
||||
proxyReq.destroy(error);
|
||||
}
|
||||
};
|
||||
};
|
||||
@@ -1,33 +0,0 @@
|
||||
import { AnthropicKey, Key } from "../../../../shared/key-management";
|
||||
import { isTextGenerationRequest } from "../../common";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
|
||||
* know this without trying to send the request and seeing if it fails. If a
|
||||
* key is marked as requiring a preamble, it will be added here.
|
||||
*/
|
||||
export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
|
||||
if (
|
||||
!isTextGenerationRequest(req) ||
|
||||
req.key?.service !== "anthropic" ||
|
||||
req.outboundApi !== "anthropic-text"
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
let preamble = "";
|
||||
let prompt = req.body.prompt;
|
||||
assertAnthropicKey(req.key);
|
||||
if (req.key.requiresPreamble && prompt) {
|
||||
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
||||
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
|
||||
}
|
||||
req.body.prompt = preamble + prompt;
|
||||
};
|
||||
|
||||
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
|
||||
if (key.service !== "anthropic") {
|
||||
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
import { fixRequestBody } from "http-proxy-middleware";
|
||||
import type { HPMRequestCallback } from "../index";
|
||||
|
||||
/** Finalize the rewritten request body. Must be the last rewriter. */
|
||||
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
|
||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||
// For image generation requests, remove stream flag.
|
||||
if (req.outboundApi === "openai-image") {
|
||||
delete req.body.stream;
|
||||
}
|
||||
// For anthropic text to chat requests, remove undefined prompt.
|
||||
if (req.outboundApi === "anthropic-chat") {
|
||||
delete req.body.prompt;
|
||||
}
|
||||
|
||||
const updatedBody = JSON.stringify(req.body);
|
||||
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
|
||||
(req as any).rawBody = Buffer.from(updatedBody);
|
||||
|
||||
// body-parser and http-proxy-middleware don't play nice together
|
||||
fixRequestBody(proxyReq, req);
|
||||
}
|
||||
};
|
||||
@@ -1,26 +0,0 @@
|
||||
import type { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
|
||||
* pipeline, before the proxy middleware. This function just assigns the path
|
||||
* and headers to the proxy request.
|
||||
*/
|
||||
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
|
||||
if (!req.signedRequest) {
|
||||
throw new Error("Expected req.signedRequest to be set");
|
||||
}
|
||||
|
||||
// The path depends on the selected model and the assigned key's region.
|
||||
proxyReq.path = req.signedRequest.path;
|
||||
|
||||
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||
// reassign only the ones specified in the signed request.
|
||||
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
|
||||
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||
proxyReq.setHeader(key, value);
|
||||
});
|
||||
|
||||
// Don't use fixRequestBody here because it adds a content-length header.
|
||||
// Amazon doesn't want that and it breaks the signature.
|
||||
proxyReq.write(req.signedRequest.body);
|
||||
};
|
||||
@@ -1,21 +0,0 @@
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* Removes origin and referer headers before sending the request to the API for
|
||||
* privacy reasons.
|
||||
**/
|
||||
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
|
||||
proxyReq.setHeader("origin", "");
|
||||
proxyReq.setHeader("referer", "");
|
||||
proxyReq.removeHeader("tailscale-user-login");
|
||||
proxyReq.removeHeader("tailscale-user-name");
|
||||
proxyReq.removeHeader("tailscale-headers-info");
|
||||
proxyReq.removeHeader("tailscale-user-profile-pic")
|
||||
proxyReq.removeHeader("cf-connecting-ip");
|
||||
proxyReq.removeHeader("forwarded");
|
||||
proxyReq.removeHeader("true-client-ip");
|
||||
proxyReq.removeHeader("x-forwarded-for");
|
||||
proxyReq.removeHeader("x-forwarded-host");
|
||||
proxyReq.removeHeader("x-forwarded-proto");
|
||||
proxyReq.removeHeader("x-real-ip");
|
||||
};
|
||||
@@ -4,12 +4,15 @@ import { initializeSseStream } from "../../../shared/streaming";
|
||||
import { classifyErrorAndSend } from "../common";
|
||||
import {
|
||||
RequestPreprocessor,
|
||||
blockZoomerOrigins,
|
||||
countPromptTokens,
|
||||
languageFilter,
|
||||
setApiFormat,
|
||||
transformOutboundPayload,
|
||||
validateContextSize,
|
||||
validateModelFamily,
|
||||
validateVision,
|
||||
applyQuotaLimits,
|
||||
} from ".";
|
||||
|
||||
type RequestPreprocessorOptions = {
|
||||
@@ -30,14 +33,15 @@ type RequestPreprocessorOptions = {
|
||||
/**
|
||||
* Returns a middleware function that processes the request body into the given
|
||||
* API format, and then sequentially runs the given additional preprocessors.
|
||||
* These should be used for validation and transformations that only need to
|
||||
* happen once per request.
|
||||
*
|
||||
* These run first in the request lifecycle, a single time per request before it
|
||||
* is added to the request queue. They aren't run again if the request is
|
||||
* re-attempted after a rate limit.
|
||||
*
|
||||
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
|
||||
* It will run after these preprocessors, but before the request is sent to
|
||||
* http-proxy-middleware.
|
||||
* To run functions against requests every time they are re-attempted, write a
|
||||
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
|
||||
*/
|
||||
export const createPreprocessorMiddleware = (
|
||||
apiFormat: Parameters<typeof setApiFormat>[0],
|
||||
@@ -45,6 +49,7 @@ export const createPreprocessorMiddleware = (
|
||||
): RequestHandler => {
|
||||
const preprocessors: RequestPreprocessor[] = [
|
||||
setApiFormat(apiFormat),
|
||||
blockZoomerOrigins,
|
||||
...(beforeTransform ?? []),
|
||||
transformOutboundPayload,
|
||||
countPromptTokens,
|
||||
@@ -52,6 +57,8 @@ export const createPreprocessorMiddleware = (
|
||||
...(afterTransform ?? []),
|
||||
validateContextSize,
|
||||
validateVision,
|
||||
validateModelFamily,
|
||||
applyQuotaLimits,
|
||||
];
|
||||
return async (...args) => executePreprocessors(preprocessors, args);
|
||||
};
|
||||
@@ -83,10 +90,10 @@ async function executePreprocessors(
|
||||
next();
|
||||
} catch (error) {
|
||||
if (error.constructor.name === "ZodError") {
|
||||
const msg = error?.issues
|
||||
const issues = error?.issues
|
||||
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
|
||||
.join("; ");
|
||||
req.log.warn({ issues: msg }, "Prompt validation failed.");
|
||||
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
|
||||
} else {
|
||||
req.log.error(error, "Error while executing request preprocessor");
|
||||
}
|
||||
@@ -136,8 +143,15 @@ const handleTestMessage: RequestHandler = (req, res) => {
|
||||
completion: "Hello!",
|
||||
// anthropic chat
|
||||
content: [{ type: "text", text: "Hello!" }],
|
||||
// gemini
|
||||
candidates: [
|
||||
{
|
||||
content: { parts: [{ text: "Hello!" }] },
|
||||
finishReason: "stop",
|
||||
},
|
||||
],
|
||||
proxy_note:
|
||||
"This response was generated by the proxy's test message handler and did not go to the API.",
|
||||
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -152,10 +166,7 @@ function isTestMessage(body: any) {
|
||||
messages[0].content === "Hi"
|
||||
);
|
||||
} else if (contents) {
|
||||
return (
|
||||
contents.length === 1 &&
|
||||
contents[0].parts[0]?.text === "Hi"
|
||||
);
|
||||
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
|
||||
} else {
|
||||
return (
|
||||
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { hasAvailableQuota } from "../../../../shared/users/user-store";
|
||||
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
export class QuotaExceededError extends Error {
|
||||
public quotaInfo: any;
|
||||
@@ -11,7 +11,7 @@ export class QuotaExceededError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
||||
export const applyQuotaLimits: RequestPreprocessor = (req) => {
|
||||
const subjectToQuota =
|
||||
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
||||
if (!subjectToQuota || !req.user) return;
|
||||
@@ -34,4 +34,4 @@ export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
};
|
||||
+4
-4
@@ -1,6 +1,6 @@
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
|
||||
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai,vip.jewproxy.tech,jewproxy.tech".split(",");
|
||||
|
||||
class ZoomerForbiddenError extends Error {
|
||||
constructor(message: string) {
|
||||
@@ -13,8 +13,8 @@ class ZoomerForbiddenError extends Error {
|
||||
* Blocks requests from Janitor AI users with a fake, scary error message so I
|
||||
* stop getting emails asking for tech support.
|
||||
*/
|
||||
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
|
||||
const origin = req.headers.origin || req.headers.referer;
|
||||
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
|
||||
const origin = req.headers.origin || req.headers.referer || req.headers.host;
|
||||
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
|
||||
// Venus-derivatives send a test prompt to check if the proxy is working.
|
||||
// We don't want to block that just yet.
|
||||
@@ -1,11 +1,18 @@
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { countTokens } from "../../../../shared/tokenization";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import { OpenAIChatMessage } from "../../../../shared/api-schemas";
|
||||
import { GoogleAIChatMessage } from "../../../../shared/api-schemas/google-ai";
|
||||
import {
|
||||
GoogleAIChatMessage,
|
||||
MistralAIChatMessage,
|
||||
OpenAIChatMessage,
|
||||
} from "../../../../shared/api-schemas";
|
||||
AnthropicChatMessage,
|
||||
flattenAnthropicMessages,
|
||||
} from "../../../../shared/api-schemas/anthropic";
|
||||
import {
|
||||
MistralAIChatMessage,
|
||||
ContentItem,
|
||||
isMistralVisionModel
|
||||
} from "../../../../shared/api-schemas/mistral-ai";
|
||||
import { isGrokVisionModel } from "../../../../shared/api-schemas/xai";
|
||||
|
||||
/**
|
||||
* Given a request with an already-transformed body, counts the number of
|
||||
@@ -17,7 +24,13 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
|
||||
switch (service) {
|
||||
case "openai": {
|
||||
req.outputTokens = req.body.max_tokens;
|
||||
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
|
||||
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
}
|
||||
case "openai-responses": {
|
||||
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
|
||||
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
@@ -55,9 +68,47 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
case "mistral-ai":
|
||||
case "mistral-text": {
|
||||
req.outputTokens = req.body.max_tokens;
|
||||
const prompt: string | MistralAIChatMessage[] =
|
||||
req.body.messages ?? req.body.prompt;
|
||||
|
||||
// Handle multimodal content (vision) in Mistral models
|
||||
const isVisionModel = isMistralVisionModel(req.body.model);
|
||||
const messages = req.body.messages;
|
||||
|
||||
// Check if this is a vision request with images
|
||||
const hasImageContent = Array.isArray(messages) && messages.some(
|
||||
(msg: MistralAIChatMessage) => Array.isArray(msg.content) &&
|
||||
msg.content.some((item: ContentItem) => item.type === "image_url")
|
||||
);
|
||||
|
||||
// For vision content, we add a fixed token count per image
|
||||
// This is an estimate as the actual token count depends on image size and complexity
|
||||
const TOKENS_PER_IMAGE = 1200; // Conservative estimate
|
||||
let imageTokens = 0;
|
||||
|
||||
if (hasImageContent && Array.isArray(messages)) {
|
||||
// Count images in the request
|
||||
for (const msg of messages) {
|
||||
if (Array.isArray(msg.content)) {
|
||||
const imageCount = msg.content.filter(
|
||||
(item: ContentItem) => item.type === "image_url"
|
||||
).length;
|
||||
imageTokens += imageCount * TOKENS_PER_IMAGE;
|
||||
}
|
||||
}
|
||||
|
||||
req.log.debug(
|
||||
{ imageCount: imageTokens / TOKENS_PER_IMAGE, tokenEstimate: imageTokens },
|
||||
"Estimated token count for Mistral vision images"
|
||||
);
|
||||
}
|
||||
|
||||
const prompt: string | MistralAIChatMessage[] = messages ?? req.body.prompt;
|
||||
result = await countTokens({ req, prompt, service });
|
||||
|
||||
// Add the image tokens to the total count
|
||||
if (imageTokens > 0) {
|
||||
result.token_count += imageTokens;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case "openai-image": {
|
||||
@@ -65,6 +116,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
result = await countTokens({ req, service });
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle XAI (Grok) vision models
|
||||
// Since it uses the OpenAI API format, it's caught in the "openai" case,
|
||||
// but we need to add additional handling for image tokens after that
|
||||
default:
|
||||
assertNever(service);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Request } from "express";
|
||||
import { z } from "zod";
|
||||
import { config } from "../../../../config";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
@@ -8,6 +9,7 @@ import {
|
||||
OpenAIChatMessage,
|
||||
flattenAnthropicMessages,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
|
||||
|
||||
const rejectedClients = new Map<string, number>();
|
||||
|
||||
@@ -50,6 +52,10 @@ export const languageFilter: RequestPreprocessor = async (req) => {
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
TODO: this is not type safe and does not raise errors if request body zod schema
|
||||
is changed.
|
||||
*/
|
||||
function getPromptFromRequest(req: Request) {
|
||||
const service = req.outboundApi;
|
||||
const body = req.body;
|
||||
@@ -72,11 +78,17 @@ function getPromptFromRequest(req: Request) {
|
||||
.join("\n\n");
|
||||
case "anthropic-text":
|
||||
case "openai-text":
|
||||
case "openai-responses":
|
||||
case "openai-image":
|
||||
case "mistral-text":
|
||||
return body.prompt;
|
||||
case "google-ai":
|
||||
return body.prompt.text;
|
||||
case "google-ai": {
|
||||
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
|
||||
return [
|
||||
b.systemInstruction?.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text),
|
||||
...b.contents.flatMap((c) => c.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text)),
|
||||
].join("\n");
|
||||
}
|
||||
default:
|
||||
assertNever(service);
|
||||
}
|
||||
|
||||
@@ -4,8 +4,22 @@ import { LLMService } from "../../../../shared/models";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
export const setApiFormat = (api: {
|
||||
/**
|
||||
* The API format the user made the request in and expects the response to be
|
||||
* in.
|
||||
*/
|
||||
inApi: Request["inboundApi"];
|
||||
/**
|
||||
* The API format the proxy will make the request in and expects the response
|
||||
* to be in. If different from `inApi`, the proxy will transform the user's
|
||||
* request body to this format, and will transform the response body or stream
|
||||
* events from this format.
|
||||
*/
|
||||
outApi: APIFormat;
|
||||
/**
|
||||
* The service the request will be sent to, which determines authentication
|
||||
* and possibly the streaming transport.
|
||||
*/
|
||||
service: LLMService;
|
||||
}): RequestPreprocessor => {
|
||||
return function configureRequestApiFormat(req) {
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
import express from "express";
|
||||
import crypto from "crypto";
|
||||
import { keyPool } from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
|
||||
|
||||
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
|
||||
|
||||
export const signGcpRequest: RequestPreprocessor = async (req) => {
|
||||
const serviceValid = req.service === "gcp";
|
||||
if (!serviceValid) {
|
||||
throw new Error("addVertexAIKey called on invalid request");
|
||||
}
|
||||
|
||||
if (!req.body?.model) {
|
||||
throw new Error("You must specify a model with your request.");
|
||||
}
|
||||
|
||||
const { model, stream } = req.body;
|
||||
req.key = keyPool.get(model, "gcp");
|
||||
|
||||
req.log.info({ key: req.key.hash, model }, "Assigned GCP key to request");
|
||||
|
||||
req.isStreaming = String(stream) === "true";
|
||||
|
||||
// TODO: This should happen in transform-outbound-payload.ts
|
||||
// TODO: Support tools
|
||||
let strippedParams: Record<string, unknown>;
|
||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||
messages: true,
|
||||
system: true,
|
||||
max_tokens: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
stream: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
strippedParams.anthropic_version = "vertex-2023-10-16";
|
||||
|
||||
const [accessToken, credential] = await getAccessToken(req);
|
||||
|
||||
const host = GCP_HOST.replace("%REGION%", credential.region);
|
||||
// GCP doesn't use the anthropic-version header, but we set it to ensure the
|
||||
// stream adapter selects the correct transformer.
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
req.signedRequest = {
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: host,
|
||||
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
|
||||
headers: {
|
||||
["host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
["authorization"]: `Bearer ${accessToken}`,
|
||||
},
|
||||
body: JSON.stringify(strippedParams),
|
||||
};
|
||||
};
|
||||
|
||||
async function getAccessToken(
|
||||
req: express.Request
|
||||
): Promise<[string, Credential]> {
|
||||
// TODO: access token caching to reduce latency
|
||||
const credential = getCredentialParts(req);
|
||||
const signedJWT = await createSignedJWT(
|
||||
credential.clientEmail,
|
||||
credential.privateKey
|
||||
);
|
||||
const [accessToken, jwtError] = await exchangeJwtForAccessToken(signedJWT);
|
||||
if (accessToken === null) {
|
||||
req.log.warn(
|
||||
{ key: req.key!.hash, jwtError },
|
||||
"Unable to get the access token"
|
||||
);
|
||||
throw new Error("The access token is invalid.");
|
||||
}
|
||||
return [accessToken, credential];
|
||||
}
|
||||
|
||||
async function createSignedJWT(email: string, pkey: string): Promise<string> {
|
||||
let cryptoKey = await crypto.subtle.importKey(
|
||||
"pkcs8",
|
||||
str2ab(atob(pkey)),
|
||||
{
|
||||
name: "RSASSA-PKCS1-v1_5",
|
||||
hash: { name: "SHA-256" },
|
||||
},
|
||||
false,
|
||||
["sign"]
|
||||
);
|
||||
|
||||
const authUrl = "https://www.googleapis.com/oauth2/v4/token";
|
||||
const issued = Math.floor(Date.now() / 1000);
|
||||
const expires = issued + 600;
|
||||
|
||||
const header = {
|
||||
alg: "RS256",
|
||||
typ: "JWT",
|
||||
};
|
||||
|
||||
const payload = {
|
||||
iss: email,
|
||||
aud: authUrl,
|
||||
iat: issued,
|
||||
exp: expires,
|
||||
scope: "https://www.googleapis.com/auth/cloud-platform",
|
||||
};
|
||||
|
||||
const encodedHeader = urlSafeBase64Encode(JSON.stringify(header));
|
||||
const encodedPayload = urlSafeBase64Encode(JSON.stringify(payload));
|
||||
|
||||
const unsignedToken = `${encodedHeader}.${encodedPayload}`;
|
||||
|
||||
const signature = await crypto.subtle.sign(
|
||||
"RSASSA-PKCS1-v1_5",
|
||||
cryptoKey,
|
||||
str2ab(unsignedToken)
|
||||
);
|
||||
|
||||
const encodedSignature = urlSafeBase64Encode(signature);
|
||||
return `${unsignedToken}.${encodedSignature}`;
|
||||
}
|
||||
|
||||
async function exchangeJwtForAccessToken(
|
||||
signedJwt: string
|
||||
): Promise<[string | null, string]> {
|
||||
const authUrl = "https://www.googleapis.com/oauth2/v4/token";
|
||||
const params = {
|
||||
grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer",
|
||||
assertion: signedJwt,
|
||||
};
|
||||
|
||||
const r = await fetch(authUrl, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
||||
body: Object.entries(params)
|
||||
.map(([k, v]) => `${k}=${v}`)
|
||||
.join("&"),
|
||||
}).then((res) => res.json());
|
||||
|
||||
if (r.access_token) {
|
||||
return [r.access_token, ""];
|
||||
}
|
||||
|
||||
return [null, JSON.stringify(r)];
|
||||
}
|
||||
|
||||
function str2ab(str: string): ArrayBuffer {
|
||||
const buffer = new ArrayBuffer(str.length);
|
||||
const bufferView = new Uint8Array(buffer);
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
bufferView[i] = str.charCodeAt(i);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
function urlSafeBase64Encode(data: string | ArrayBuffer): string {
|
||||
let base64: string;
|
||||
if (typeof data === "string") {
|
||||
base64 = btoa(
|
||||
encodeURIComponent(data).replace(/%([0-9A-F]{2})/g, (match, p1) =>
|
||||
String.fromCharCode(parseInt("0x" + p1, 16))
|
||||
)
|
||||
);
|
||||
} else {
|
||||
base64 = btoa(String.fromCharCode(...new Uint8Array(data)));
|
||||
}
|
||||
return base64.replace(/\+/g, "-").replace(/\//g, "_").replace(/=+$/, "");
|
||||
}
|
||||
|
||||
type Credential = {
|
||||
projectId: string;
|
||||
clientEmail: string;
|
||||
region: string;
|
||||
privateKey: string;
|
||||
};
|
||||
|
||||
function getCredentialParts(req: express.Request): Credential {
|
||||
const [projectId, clientEmail, region, rawPrivateKey] =
|
||||
req.key!.key.split(":");
|
||||
if (!projectId || !clientEmail || !region || !rawPrivateKey) {
|
||||
req.log.error(
|
||||
{ key: req.key!.hash },
|
||||
"GCP_CREDENTIALS isn't correctly formatted; refer to the docs"
|
||||
);
|
||||
throw new Error("The key assigned to this request is invalid.");
|
||||
}
|
||||
|
||||
const privateKey = rawPrivateKey
|
||||
.replace(
|
||||
/-----BEGIN PRIVATE KEY-----|-----END PRIVATE KEY-----|\r|\n|\\n/g,
|
||||
""
|
||||
)
|
||||
.trim();
|
||||
|
||||
return { projectId, clientEmail, region, privateKey };
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
API_REQUEST_TRANSFORMERS,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { BadRequestError } from "../../../../shared/errors";
|
||||
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
|
||||
import { fixMistralPrompt, isMistralVisionModel } from "../../../../shared/api-schemas/mistral-ai";
|
||||
import {
|
||||
isImageGenerationRequest,
|
||||
isTextGenerationRequest,
|
||||
@@ -30,20 +30,15 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||
}
|
||||
|
||||
applyMistralPromptFixes(req);
|
||||
applyGoogleAIKeyTransforms(req);
|
||||
applyOpenAIResponsesTransform(req);
|
||||
|
||||
// Native prompts are those which were already provided by the client in the
|
||||
// target API format. We don't need to transform them.
|
||||
const isNativePrompt = req.inboundApi === req.outboundApi;
|
||||
if (isNativePrompt) {
|
||||
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
|
||||
if (!result.success) {
|
||||
req.log.warn(
|
||||
{ issues: result.error.issues, body: req.body },
|
||||
"Native prompt request validation failed."
|
||||
);
|
||||
throw result.error;
|
||||
}
|
||||
req.body = result.data;
|
||||
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
|
||||
req.body = result;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -62,6 +57,58 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||
);
|
||||
};
|
||||
|
||||
// Handle OpenAI Responses API transformation
|
||||
function applyOpenAIResponsesTransform(req: Request): void {
|
||||
if (req.outboundApi === "openai-responses") {
|
||||
req.log.info("Transforming request to OpenAI Responses API format");
|
||||
|
||||
// Store the original body for reference if needed
|
||||
const originalBody = { ...req.body };
|
||||
|
||||
// Map standard OpenAI chat completions format to Responses API format
|
||||
// The main differences are:
|
||||
// 1. Endpoint is /v1/responses instead of /v1/chat/completions
|
||||
// 2. 'messages' field moves to 'input.messages'
|
||||
|
||||
// Move messages to input.messages
|
||||
if (req.body.messages && !req.body.input) {
|
||||
req.body.input = {
|
||||
messages: req.body.messages
|
||||
};
|
||||
delete req.body.messages;
|
||||
}
|
||||
|
||||
// Keep all the original properties of the request but ensure compatibility
|
||||
// with Responses API specifics
|
||||
if (!req.body.previousResponseId && req.body.conversation_id) {
|
||||
req.body.previousResponseId = req.body.conversation_id;
|
||||
delete req.body.conversation_id;
|
||||
}
|
||||
|
||||
// Convert max_tokens to max_output_tokens if present and not already set
|
||||
if (req.body.max_tokens && !req.body.max_output_tokens) {
|
||||
req.body.max_output_tokens = req.body.max_tokens;
|
||||
delete req.body.max_tokens;
|
||||
}
|
||||
|
||||
// Set the correct tools format if needed
|
||||
if (req.body.tools) {
|
||||
// Tools structure is maintained but might need conversion if non-standard
|
||||
if (!req.body.tools.some((tool: any) => tool.type === "function" || tool.type === "web_search")) {
|
||||
req.body.tools = req.body.tools.map((tool: any) => ({
|
||||
...tool,
|
||||
type: tool.type || "function"
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
req.log.info({
|
||||
originalModel: originalBody.model,
|
||||
newFormat: "openai-responses"
|
||||
}, "Successfully transformed request to Responses API format");
|
||||
}
|
||||
}
|
||||
|
||||
// handles weird cases that don't fit into our abstractions
|
||||
function applyMistralPromptFixes(req: Request): void {
|
||||
if (req.inboundApi === "mistral-ai") {
|
||||
@@ -70,12 +117,66 @@ function applyMistralPromptFixes(req: Request): void {
|
||||
// mistral prompt and try to fix it if it fails. It will be re-validated
|
||||
// after this function returns.
|
||||
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
|
||||
|
||||
// Check if this is a vision model request
|
||||
const isVisionModel = isMistralVisionModel(req.body.model);
|
||||
|
||||
// Check if the request contains image content
|
||||
const hasImageContent = result.messages?.some((msg: {content: string | any[]}) =>
|
||||
Array.isArray(msg.content) &&
|
||||
msg.content.some((item: any) => item.type === "image_url")
|
||||
);
|
||||
|
||||
// For vision requests, normalize the image_url format
|
||||
if (hasImageContent && Array.isArray(result.messages)) {
|
||||
// Process each message with image content
|
||||
result.messages.forEach((msg: any) => {
|
||||
if (Array.isArray(msg.content)) {
|
||||
// Process each content item
|
||||
msg.content.forEach((item: any) => {
|
||||
if (item.type === "image_url") {
|
||||
// Normalize the image_url field to a string format that Mistral expects
|
||||
if (typeof item.image_url === "object") {
|
||||
// If it's an object, extract the URL or base64 data
|
||||
if (item.image_url.url) {
|
||||
item.image_url = item.image_url.url;
|
||||
} else if (item.image_url.data) {
|
||||
item.image_url = item.image_url.data;
|
||||
}
|
||||
|
||||
req.log.info(
|
||||
{ model: req.body.model },
|
||||
"Normalized object-format image_url to string format"
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Apply Mistral prompt fixes while preserving multimodal content
|
||||
req.body.messages = fixMistralPrompt(result.messages);
|
||||
req.log.info(
|
||||
{ n: req.body.messages.length, prev: result.messages.length },
|
||||
{
|
||||
n: req.body.messages.length,
|
||||
prev: result.messages.length,
|
||||
isVisionModel,
|
||||
hasImageContent
|
||||
},
|
||||
"Applied Mistral chat prompt fixes."
|
||||
);
|
||||
|
||||
// If this is a vision model with image content, it MUST use the chat API
|
||||
// and cannot be converted to text completions
|
||||
if (hasImageContent) {
|
||||
req.log.info(
|
||||
{ model: req.body.model },
|
||||
"Detected Mistral vision request with image content. Keeping as chat format."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// If the prompt relies on `prefix: true` for the last message, we need to
|
||||
// convert it to a text completions request because AWS Mistral support for
|
||||
// this feature is broken.
|
||||
@@ -94,3 +195,43 @@ function applyMistralPromptFixes(req: Request): void {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function toCamelCase(str: string): string {
|
||||
return str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
|
||||
}
|
||||
|
||||
function transformKeysToCamelCase(obj: any, hasTransformed = { value: false }): any {
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(item => transformKeysToCamelCase(item, hasTransformed));
|
||||
}
|
||||
|
||||
if (obj !== null && typeof obj === 'object') {
|
||||
return Object.fromEntries(
|
||||
Object.entries(obj).map(([key, value]) => {
|
||||
const camelKey = toCamelCase(key);
|
||||
if (camelKey !== key) {
|
||||
hasTransformed.value = true;
|
||||
}
|
||||
return [
|
||||
camelKey,
|
||||
transformKeysToCamelCase(value, hasTransformed)
|
||||
];
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
function applyGoogleAIKeyTransforms(req: Request): void {
|
||||
// Google (Gemini) API in their infinite wisdom accepts both snake_case and camelCase
|
||||
// for some params even though in the docs they use snake_case.
|
||||
// Some frontends (e.g. ST) use snake_case and camelCase so we normalize all keys to camelCase
|
||||
if (req.outboundApi === "google-ai") {
|
||||
const hasTransformed = { value: false };
|
||||
req.body = transformKeysToCamelCase(req.body, hasTransformed);
|
||||
if (hasTransformed.value) {
|
||||
req.log.info("Applied Gemini camelCase -> snake_case transform");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import { RequestPreprocessor } from "../index";
|
||||
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
|
||||
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
|
||||
// todo: make configurable
|
||||
const GOOGLE_AI_MAX_CONTEXT = 1024000;
|
||||
const GOOGLE_AI_MAX_CONTEXT = 2048000;
|
||||
const MISTRAL_AI_MAX_CONTENT = 131072;
|
||||
|
||||
/**
|
||||
@@ -28,6 +28,7 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
switch (req.outboundApi) {
|
||||
case "openai":
|
||||
case "openai-text":
|
||||
case "openai-responses":
|
||||
proxyMax = OPENAI_MAX_CONTEXT;
|
||||
break;
|
||||
case "anthropic-chat":
|
||||
@@ -58,6 +59,22 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 16384;
|
||||
} else if (model.match(/^gpt-4o/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^gpt-4.5/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^gpt-4\.1(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 1000000;
|
||||
} else if (model.match(/^gpt-4\.1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 1000000;
|
||||
} else if (model.match(/^gpt-4\.1-nano(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 1000000;
|
||||
} else if (model.match(/^gpt-5(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 400000;
|
||||
} else if (model.match(/^gpt-5-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 400000;
|
||||
} else if (model.match(/^gpt-5-nano(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 400000;
|
||||
} else if (model.match(/^gpt-5-chat-latest$/)) {
|
||||
modelMax = 400000;
|
||||
} else if (model.match(/^chatgpt-4o/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
@@ -68,6 +85,24 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 131072;
|
||||
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
|
||||
modelMax = 131072;
|
||||
} else if (model.match(/^o3-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^o3(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^o4-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000; // 200k context window for codex-mini-latest
|
||||
} else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/gpt-3.5-turbo/)) {
|
||||
modelMax = 16384;
|
||||
} else if (model.match(/gpt-4-32k/)) {
|
||||
@@ -84,14 +119,38 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^claude-3/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^claude-(?:sonnet|opus)-4/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^gemini-/)) {
|
||||
modelMax = 1024000;
|
||||
} else if (model.match(/^anthropic\.claude-3/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^anthropic\.claude-(?:sonnet|opus)-4/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^anthropic\.claude/)) {
|
||||
modelMax = 100000;
|
||||
} else if (model.match(/^deepseek/)) {
|
||||
modelMax = 64000;
|
||||
} else if (model.match(/^kimi-k2/)) {
|
||||
// Kimi K2 models have 131k context window
|
||||
modelMax = 131000;
|
||||
} else if (model.match(/moonshot/)) {
|
||||
// Moonshot models typically have 200k context window
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/command[\w-]*-03-202[0-9]/)) {
|
||||
// Cohere's command-a-03 models have 256k context window
|
||||
modelMax = 256000;
|
||||
} else if (model.match(/command/) || model.match(/cohere/)) {
|
||||
// Default for all other Cohere models
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^grok-4/)) {
|
||||
modelMax = 256000;
|
||||
} else if (model.match(/^grok/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^magistral/)) {
|
||||
modelMax = 40000;
|
||||
} else if (model.match(/tral/)) {
|
||||
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
|
||||
// no name convention and wildly different context windows so this is a
|
||||
@@ -132,4 +191,4 @@ function assertRequestHasTokenCounts(
|
||||
})
|
||||
.nonstrict()
|
||||
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
|
||||
}
|
||||
}
|
||||
+2
-2
@@ -1,12 +1,12 @@
|
||||
import { config } from "../../../../config";
|
||||
import { ForbiddenError } from "../../../../shared/errors";
|
||||
import { getModelFamilyForRequest } from "../../../../shared/models";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
/**
|
||||
* Ensures the selected model family is enabled by the proxy configuration.
|
||||
*/
|
||||
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req) => {
|
||||
export const validateModelFamily: RequestPreprocessor = (req) => {
|
||||
const family = getModelFamilyForRequest(req);
|
||||
if (!config.allowedModelFamilies.includes(family)) {
|
||||
throw new ForbiddenError(
|
||||
@@ -3,6 +3,7 @@ import { assertNever } from "../../../../shared/utils";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
|
||||
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
|
||||
import { containsImageContent as containsImageContentGoogleAI } from "../../../../shared/api-schemas/google-ai";
|
||||
import { ForbiddenError } from "../../../../shared/errors";
|
||||
|
||||
/**
|
||||
@@ -22,11 +23,16 @@ export const validateVision: RequestPreprocessor = async (req) => {
|
||||
case "openai":
|
||||
hasImage = containsImageContentOpenAI(req.body.messages);
|
||||
break;
|
||||
case "openai-responses":
|
||||
hasImage = containsImageContentOpenAI(req.body.messages);
|
||||
break;
|
||||
case "anthropic-chat":
|
||||
hasImage = containsImageContentAnthropic(req.body.messages);
|
||||
break;
|
||||
case "anthropic-text":
|
||||
case "google-ai":
|
||||
hasImage = containsImageContentGoogleAI(req.body.contents);
|
||||
break;
|
||||
case "anthropic-text":
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
case "openai-image":
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
import { Request, Response } from "express";
|
||||
import http from "http";
|
||||
import ProxyServer from "http-proxy";
|
||||
import { Readable } from "stream";
|
||||
import {
|
||||
createProxyMiddleware,
|
||||
Options,
|
||||
debugProxyErrorsPlugin,
|
||||
proxyEventsPlugin,
|
||||
} from "http-proxy-middleware";
|
||||
import { ProxyReqMutator, stripHeaders } from "./index";
|
||||
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
|
||||
import { createQueueMiddleware } from "../../queue";
|
||||
import { getHttpAgents } from "../../../shared/network";
|
||||
import { classifyErrorAndSend } from "../common";
|
||||
|
||||
/**
|
||||
* Options for the `createQueuedProxyMiddleware` factory function.
|
||||
*/
|
||||
type ProxyMiddlewareFactoryOptions = {
|
||||
/**
|
||||
* Functions which receive a ProxyReqManager and can modify the request before
|
||||
* it is proxied. The modifications will be automatically reverted if the
|
||||
* request needs to be returned to the queue.
|
||||
*/
|
||||
mutations?: ProxyReqMutator[];
|
||||
/**
|
||||
* The target URL to proxy requests to. This can be a string or a function
|
||||
* which accepts the request and returns a string.
|
||||
*/
|
||||
target: string | Options<Request>["router"];
|
||||
/**
|
||||
* A function which receives the proxy response and the JSON-decoded request
|
||||
* body. Only fired for non-streaming responses; streaming responses are
|
||||
* handled in `handle-streaming-response.ts`.
|
||||
*/
|
||||
blockingResponseHandler?: ProxyResHandlerWithBody;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a middleware function that accepts incoming requests and places them
|
||||
* into the request queue. When the request is dequeued, it is proxied to the
|
||||
* target URL using the given options and middleware. Non-streaming responses
|
||||
* are handled by the given `blockingResponseHandler`.
|
||||
*/
|
||||
export function createQueuedProxyMiddleware({
|
||||
target,
|
||||
mutations,
|
||||
blockingResponseHandler,
|
||||
}: ProxyMiddlewareFactoryOptions) {
|
||||
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
|
||||
const hpmRouter = typeof target === "function" ? target : undefined;
|
||||
|
||||
const [httpAgent, httpsAgent] = getHttpAgents();
|
||||
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
|
||||
|
||||
const proxyMiddleware = createProxyMiddleware<Request, Response>({
|
||||
target: hpmTarget,
|
||||
router: hpmRouter,
|
||||
agent,
|
||||
changeOrigin: true,
|
||||
toProxy: true,
|
||||
selfHandleResponse: typeof blockingResponseHandler === "function",
|
||||
// Disable HPM logger plugin (requires re-adding the other default plugins).
|
||||
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
|
||||
// fixes several error handling/connection close issues in http-proxy core.
|
||||
ejectPlugins: true,
|
||||
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
|
||||
// the default plugins only allow http.IncomingMessage for TReq. They are
|
||||
// compatible with express.Request, so we can use them. `Plugin` type is not
|
||||
// exported for some reason.
|
||||
plugins: [
|
||||
debugProxyErrorsPlugin,
|
||||
pinoLoggerPlugin,
|
||||
proxyEventsPlugin,
|
||||
] as any,
|
||||
on: {
|
||||
proxyRes: createOnProxyResHandler(
|
||||
blockingResponseHandler ? [blockingResponseHandler] : []
|
||||
),
|
||||
error: classifyErrorAndSend,
|
||||
},
|
||||
buffer: ((req: Request) => {
|
||||
// This is a hack/monkey patch and is not part of the official
|
||||
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
|
||||
let payload = req.body;
|
||||
if (typeof payload === "string") {
|
||||
payload = Buffer.from(payload);
|
||||
}
|
||||
const stream = new Readable();
|
||||
stream.push(payload);
|
||||
stream.push(null);
|
||||
return stream;
|
||||
}) as any,
|
||||
});
|
||||
|
||||
return createQueueMiddleware({
|
||||
mutations: [stripHeaders, ...(mutations ?? [])],
|
||||
proxyMiddleware,
|
||||
});
|
||||
}
|
||||
|
||||
type ProxiedResponse = http.IncomingMessage & Response & any;
|
||||
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
|
||||
proxyServer.on("error", (err, req, res, target) => {
|
||||
req.log.error(
|
||||
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
|
||||
"Error occurred while proxying request to target"
|
||||
);
|
||||
});
|
||||
proxyServer.on("proxyReq", (proxyReq, req) => {
|
||||
const { protocol, host, path } = proxyReq;
|
||||
req.log.info(
|
||||
{
|
||||
from: req.originalUrl,
|
||||
to: `${protocol}//${host}${path}`,
|
||||
},
|
||||
"Sending request to upstream API..."
|
||||
);
|
||||
});
|
||||
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
|
||||
const { protocol, host, path } = proxyRes.req;
|
||||
req.log.info(
|
||||
{
|
||||
target: `${protocol}//${host}${path}`,
|
||||
status: proxyRes.statusCode,
|
||||
contentType: proxyRes.headers["content-type"],
|
||||
contentEncoding: proxyRes.headers["content-encoding"],
|
||||
contentLength: proxyRes.headers["content-length"],
|
||||
transferEncoding: proxyRes.headers["transfer-encoding"],
|
||||
},
|
||||
"Got response from upstream API."
|
||||
);
|
||||
});
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
import { Request } from "express";
|
||||
import { Key } from "../../../shared/key-management";
|
||||
import { assertNever } from "../../../shared/utils";
|
||||
|
||||
/**
|
||||
* Represents a change to the request that will be reverted if the request
|
||||
* fails.
|
||||
*/
|
||||
interface ProxyReqMutation {
|
||||
target: "header" | "path" | "body" | "api-key" | "signed-request";
|
||||
key?: string;
|
||||
originalValue: any | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages a request's headers, body, and path, allowing them to be modified
|
||||
* before the request is proxied and automatically reverted if the request
|
||||
* needs to be retried.
|
||||
*/
|
||||
export class ProxyReqManager {
|
||||
private req: Request;
|
||||
private mutations: ProxyReqMutation[] = [];
|
||||
|
||||
/**
|
||||
* A read-only proxy of the request object. Avoid changing any properties
|
||||
* here as they will persist across retries.
|
||||
*/
|
||||
public readonly request: Readonly<Request>;
|
||||
|
||||
constructor(req: Request) {
|
||||
this.req = req;
|
||||
|
||||
this.request = new Proxy(req, {
|
||||
get: (target, prop) => {
|
||||
if (typeof prop === "string") return target[prop as keyof Request];
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
setHeader(name: string, newValue: string): void {
|
||||
const originalValue = this.req.get(name);
|
||||
this.mutations.push({ target: "header", key: name, originalValue });
|
||||
this.req.headers[name.toLowerCase()] = newValue;
|
||||
}
|
||||
|
||||
removeHeader(name: string): void {
|
||||
const originalValue = this.req.get(name);
|
||||
this.mutations.push({ target: "header", key: name, originalValue });
|
||||
delete this.req.headers[name.toLowerCase()];
|
||||
}
|
||||
|
||||
setBody(newBody: any): void {
|
||||
const originalValue = this.req.body;
|
||||
this.mutations.push({ target: "body", key: "body", originalValue });
|
||||
this.req.body = newBody;
|
||||
}
|
||||
|
||||
setKey(newKey: Key): void {
|
||||
const originalValue = this.req.key;
|
||||
this.mutations.push({ target: "api-key", key: "key", originalValue });
|
||||
this.req.key = newKey;
|
||||
}
|
||||
|
||||
setPath(newPath: string): void {
|
||||
const originalValue = this.req.path;
|
||||
this.mutations.push({ target: "path", key: "path", originalValue });
|
||||
this.req.url = newPath;
|
||||
}
|
||||
|
||||
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
|
||||
const originalValue = this.req.signedRequest;
|
||||
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
|
||||
this.req.signedRequest = newSignedRequest;
|
||||
}
|
||||
|
||||
hasChanged(): boolean {
|
||||
return this.mutations.length > 0;
|
||||
}
|
||||
|
||||
revert(): void {
|
||||
for (const mutation of this.mutations.reverse()) {
|
||||
switch (mutation.target) {
|
||||
case "header":
|
||||
if (mutation.originalValue === undefined) {
|
||||
delete this.req.headers[mutation.key!.toLowerCase()];
|
||||
continue;
|
||||
} else {
|
||||
this.req.headers[mutation.key!.toLowerCase()] =
|
||||
mutation.originalValue;
|
||||
}
|
||||
break;
|
||||
case "path":
|
||||
this.req.url = mutation.originalValue;
|
||||
break;
|
||||
case "body":
|
||||
this.req.body = mutation.originalValue;
|
||||
break;
|
||||
case "api-key":
|
||||
// We don't reset the key here because it's not a property of the
|
||||
// inbound request, so we'd only ever be reverting it to null.
|
||||
break;
|
||||
case "signed-request":
|
||||
this.req.signedRequest = mutation.originalValue;
|
||||
break;
|
||||
default:
|
||||
assertNever(mutation.target);
|
||||
}
|
||||
}
|
||||
this.mutations = [];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
import util from "util";
|
||||
import zlib from "zlib";
|
||||
import { PassThrough } from "stream";
|
||||
|
||||
const BUFFER_DECODER_MAP = {
|
||||
gzip: util.promisify(zlib.gunzip),
|
||||
deflate: util.promisify(zlib.inflate),
|
||||
br: util.promisify(zlib.brotliDecompress),
|
||||
text: (data: Buffer) => data,
|
||||
};
|
||||
|
||||
const STREAM_DECODER_MAP = {
|
||||
gzip: zlib.createGunzip,
|
||||
deflate: zlib.createInflate,
|
||||
br: zlib.createBrotliDecompress,
|
||||
text: () => new PassThrough(),
|
||||
};
|
||||
|
||||
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
|
||||
const isSupportedContentEncoding = (
|
||||
encoding: string
|
||||
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
|
||||
|
||||
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
|
||||
if (isSupportedContentEncoding(encoding)) {
|
||||
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
|
||||
}
|
||||
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||
}
|
||||
|
||||
export function getStreamDecompressor(encoding: string = "text") {
|
||||
if (isSupportedContentEncoding(encoding)) {
|
||||
return STREAM_DECODER_MAP[encoding]();
|
||||
}
|
||||
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||
}
|
||||
@@ -2,36 +2,33 @@ import express from "express";
|
||||
import { APIFormat } from "../../../shared/key-management";
|
||||
import { assertNever } from "../../../shared/utils";
|
||||
import { initializeSseStream } from "../../../shared/streaming";
|
||||
import http from "http";
|
||||
|
||||
function getMessageContent({
|
||||
title,
|
||||
message,
|
||||
obj,
|
||||
}: {
|
||||
/**
|
||||
* Returns a Markdown-formatted message that renders semi-nicely in most chat
|
||||
* frontends. For example:
|
||||
*
|
||||
* **Proxy error (HTTP 404 Not Found)**
|
||||
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
||||
* ***
|
||||
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
|
||||
* ```
|
||||
* {
|
||||
* "type": "error",
|
||||
* "error": {
|
||||
* "type": "not_found_error",
|
||||
* "message": "model: some-invalid-model-id",
|
||||
* },
|
||||
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
function getMessageContent(params: {
|
||||
title: string;
|
||||
message: string;
|
||||
obj?: Record<string, any>;
|
||||
}) {
|
||||
/*
|
||||
Constructs a Markdown-formatted message that renders semi-nicely in most chat
|
||||
frontends. For example:
|
||||
|
||||
**Proxy error (HTTP 404 Not Found)**
|
||||
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
||||
***
|
||||
*The requested Claude model might not exist, or the key might not be provisioned for it.*
|
||||
```
|
||||
{
|
||||
"type": "error",
|
||||
"error": {
|
||||
"type": "not_found_error",
|
||||
"message": "model: some-invalid-model-id",
|
||||
},
|
||||
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
||||
}
|
||||
```
|
||||
*/
|
||||
|
||||
const { title, message, obj } = params;
|
||||
const note = obj?.proxy_note || obj?.error?.message || "";
|
||||
const header = `### **${title}**`;
|
||||
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
|
||||
@@ -71,7 +68,11 @@ type ErrorGeneratorOptions = {
|
||||
statusCode?: number;
|
||||
};
|
||||
|
||||
export function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
/**
|
||||
* Very crude inference of the request format based on the request body. Don't
|
||||
* rely on this to be very accurate.
|
||||
*/
|
||||
function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
if (typeof body !== "object" || !body.model) {
|
||||
return "unknown";
|
||||
}
|
||||
@@ -95,7 +96,11 @@ export function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
// avoid leaking upstream hostname on dns resolution error
|
||||
/**
|
||||
* Redacts the hostname from the error message if it contains a DNS resolution
|
||||
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
|
||||
* as those may contain sensitive information about the proxy's configuration.
|
||||
*/
|
||||
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
|
||||
if (!options.message.includes("getaddrinfo")) return options;
|
||||
|
||||
@@ -112,46 +117,61 @@ function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
|
||||
return redacted;
|
||||
}
|
||||
|
||||
export function sendErrorToClient({
|
||||
options,
|
||||
req,
|
||||
res,
|
||||
}: {
|
||||
/**
|
||||
* Generates an appropriately-formatted error response and sends it to the
|
||||
* client over their requested transport (blocking or SSE stream).
|
||||
*/
|
||||
export function sendErrorToClient(params: {
|
||||
options: ErrorGeneratorOptions;
|
||||
req: express.Request;
|
||||
res: express.Response;
|
||||
}) {
|
||||
const redactedOpts = redactHostname(options);
|
||||
const { format: inputFormat } = redactedOpts;
|
||||
const { req, res } = params;
|
||||
const options = redactHostname(params.options);
|
||||
const { statusCode, message, title, obj: details } = options;
|
||||
|
||||
// Since we want to send the error in a format the client understands, we
|
||||
// need to know the request format. `setApiFormat` might not have been called
|
||||
// yet, so we'll try to infer it from the request body.
|
||||
const format =
|
||||
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
|
||||
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
|
||||
if (format === "unknown") {
|
||||
return res.status(redactedOpts.statusCode || 400).json({
|
||||
error: redactedOpts.message,
|
||||
details: redactedOpts.obj,
|
||||
// Early middleware error (auth, rate limit) so we can only send something
|
||||
// generic.
|
||||
const code = statusCode || 400;
|
||||
const hasDetails = details && Object.keys(details).length > 0;
|
||||
return res.status(code).json({
|
||||
error: {
|
||||
message,
|
||||
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
|
||||
},
|
||||
...(hasDetails ? { details } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
const completion = buildSpoofedCompletion({ ...redactedOpts, format });
|
||||
const event = buildSpoofedSSE({ ...redactedOpts, format });
|
||||
const isStreaming =
|
||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
||||
|
||||
// Cannot modify headers if client opted into streaming and made it into the
|
||||
// proxy request queue, because that immediately starts an SSE stream.
|
||||
if (!res.headersSent) {
|
||||
res.setHeader("x-oai-proxy-error", redactedOpts.title);
|
||||
res.setHeader("x-oai-proxy-error-status", redactedOpts.statusCode || 500);
|
||||
res.setHeader("x-oai-proxy-error", title);
|
||||
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
|
||||
}
|
||||
|
||||
// By this point, we know the request format. To get the error to display in
|
||||
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
|
||||
// from the language model. Depending on whether the client is streaming, we
|
||||
// will either send an SSE event or a JSON response.
|
||||
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||
if (isStreaming) {
|
||||
// User can have opted into streaming but not made it into the queue yet,
|
||||
// in which case the stream must be started first.
|
||||
if (!res.headersSent) {
|
||||
initializeSseStream(res);
|
||||
}
|
||||
res.write(event);
|
||||
res.write(buildSpoofedSSE({ ...options, format }));
|
||||
res.write(`data: [DONE]\n\n`);
|
||||
res.end();
|
||||
} else {
|
||||
res.status(200).json(completion);
|
||||
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +194,21 @@ export function buildSpoofedCompletion({
|
||||
|
||||
switch (format) {
|
||||
case "openai":
|
||||
case "openai-responses":
|
||||
return {
|
||||
id: "error-" + id,
|
||||
object: "chat.completion",
|
||||
created: Date.now(),
|
||||
model,
|
||||
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
||||
choices: [
|
||||
{
|
||||
message: { role: "assistant", content },
|
||||
finish_reason: title,
|
||||
index: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
case "mistral-ai":
|
||||
return {
|
||||
id: "error-" + id,
|
||||
@@ -193,7 +228,7 @@ export function buildSpoofedCompletion({
|
||||
return {
|
||||
outputs: [{ text: content, stop_reason: title }],
|
||||
model,
|
||||
}
|
||||
};
|
||||
case "openai-text":
|
||||
return {
|
||||
id: "error-" + id,
|
||||
@@ -263,6 +298,15 @@ export function buildSpoofedSSE({
|
||||
|
||||
switch (format) {
|
||||
case "openai":
|
||||
case "openai-responses":
|
||||
event = {
|
||||
id: "chatcmpl-" + id,
|
||||
object: "chat.completion.chunk",
|
||||
created: Date.now(),
|
||||
model,
|
||||
choices: [{ delta: { content }, index: 0, finish_reason: title }],
|
||||
};
|
||||
break;
|
||||
case "mistral-ai":
|
||||
event = {
|
||||
id: "chatcmpl-" + id,
|
||||
|
||||
@@ -1,19 +1,6 @@
|
||||
import util from "util";
|
||||
import zlib from "zlib";
|
||||
import { sendProxyError } from "../common";
|
||||
import type { RawResponseBodyHandler } from "./index";
|
||||
|
||||
const DECODER_MAP = {
|
||||
gzip: util.promisify(zlib.gunzip),
|
||||
deflate: util.promisify(zlib.inflate),
|
||||
br: util.promisify(zlib.brotliDecompress),
|
||||
};
|
||||
|
||||
const isSupportedContentEncoding = (
|
||||
contentEncoding: string
|
||||
): contentEncoding is keyof typeof DECODER_MAP => {
|
||||
return contentEncoding in DECODER_MAP;
|
||||
};
|
||||
import { decompressBuffer } from "./compression";
|
||||
|
||||
/**
|
||||
* Handles the response from the upstream service and decodes the body if
|
||||
@@ -35,42 +22,49 @@ export const handleBlockingResponse: RawResponseBodyHandler = async (
|
||||
throw err;
|
||||
}
|
||||
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
let chunks: Buffer[] = [];
|
||||
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
||||
proxyRes.on("end", async () => {
|
||||
let body = Buffer.concat(chunks);
|
||||
|
||||
const contentEncoding = proxyRes.headers["content-encoding"];
|
||||
if (contentEncoding) {
|
||||
if (isSupportedContentEncoding(contentEncoding)) {
|
||||
const decoder = DECODER_MAP[contentEncoding];
|
||||
// @ts-ignore - started failing after upgrading TypeScript, don't care
|
||||
// as it was never a problem.
|
||||
body = await decoder(body);
|
||||
} else {
|
||||
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
|
||||
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", {
|
||||
error,
|
||||
contentEncoding,
|
||||
});
|
||||
return reject(error);
|
||||
}
|
||||
const contentType = proxyRes.headers["content-type"];
|
||||
let body: string | Buffer = Buffer.concat(chunks);
|
||||
const rejectWithMessage = function (msg: string, err: Error) {
|
||||
const error = `${msg} (${err.message})`;
|
||||
req.log.warn(
|
||||
{ msg: error, stack: err.stack },
|
||||
"Error in blocking response handler"
|
||||
);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", { error });
|
||||
return reject(error);
|
||||
};
|
||||
|
||||
try {
|
||||
body = await decompressBuffer(body, contentEncoding);
|
||||
} catch (e) {
|
||||
return rejectWithMessage(`Could not decode response body`, e);
|
||||
}
|
||||
|
||||
try {
|
||||
if (proxyRes.headers["content-type"]?.includes("application/json")) {
|
||||
const json = JSON.parse(body.toString());
|
||||
return resolve(json);
|
||||
}
|
||||
return resolve(body.toString());
|
||||
return resolve(tryParseAsJson(body, contentType));
|
||||
} catch (e) {
|
||||
const msg = `Proxy received response with invalid JSON: ${e.message}`;
|
||||
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
|
||||
return reject(msg);
|
||||
return rejectWithMessage("API responded with invalid JSON", e);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
function tryParseAsJson(body: string, contentType?: string) {
|
||||
// If the response is declared as JSON, it must parse or we will throw
|
||||
if (contentType?.includes("application/json")) {
|
||||
return JSON.parse(body);
|
||||
}
|
||||
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
|
||||
// anyway since some APIs return the wrong content-type header in some cases.
|
||||
// If it fails to parse, we'll just return the raw body without throwing.
|
||||
try {
|
||||
return JSON.parse(body);
|
||||
} catch (e) {
|
||||
return body;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import express from "express";
|
||||
import { pipeline, Readable, Transform } from "stream";
|
||||
import StreamArray from "stream-json/streamers/StreamArray";
|
||||
import { StringDecoder } from "string_decoder";
|
||||
import { promisify } from "util";
|
||||
import type { logger } from "../../../logger";
|
||||
@@ -18,6 +17,7 @@ import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
|
||||
import { EventAggregator } from "./streaming/event-aggregator";
|
||||
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
|
||||
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
|
||||
import { getStreamDecompressor } from "./compression";
|
||||
|
||||
const pipelineAsync = promisify(pipeline);
|
||||
|
||||
@@ -41,21 +41,21 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
req,
|
||||
res
|
||||
) => {
|
||||
const { hash } = req.key!;
|
||||
const { headers, statusCode } = proxyRes;
|
||||
if (!req.isStreaming) {
|
||||
throw new Error("handleStreamedResponse called for non-streaming request.");
|
||||
}
|
||||
|
||||
if (proxyRes.statusCode! > 201) {
|
||||
if (statusCode! > 201) {
|
||||
req.isStreaming = false;
|
||||
req.log.warn(
|
||||
{ statusCode: proxyRes.statusCode, key: hash },
|
||||
{ statusCode },
|
||||
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
||||
);
|
||||
return handleBlockingResponse(proxyRes, req, res);
|
||||
}
|
||||
|
||||
req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
|
||||
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
|
||||
|
||||
// Typically, streaming will have already been initialized by the request
|
||||
// queue to send heartbeat pings.
|
||||
@@ -66,7 +66,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
|
||||
const prefersNativeEvents = req.inboundApi === req.outboundApi;
|
||||
const streamOptions = {
|
||||
contentType: proxyRes.headers["content-type"],
|
||||
contentType: headers["content-type"],
|
||||
api: req.outboundApi,
|
||||
logger: req.log,
|
||||
};
|
||||
@@ -78,11 +78,10 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
// only have to write one aggregator (OpenAI input) for each output format.
|
||||
const aggregator = new EventAggregator(req);
|
||||
|
||||
// Decoder reads from the raw response buffer and produces a stream of
|
||||
// discrete events in some format (text/event-stream, vnd.amazon.event-stream,
|
||||
// streaming JSON, etc).
|
||||
const decompressor = getStreamDecompressor(headers["content-encoding"]);
|
||||
// Decoder reads from the response bytes to produce a stream of plaintext.
|
||||
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
|
||||
// Adapter consumes the decoded events and produces server-sent events so we
|
||||
// Adapter consumes the decoded text and produces server-sent events so we
|
||||
// have a standard event format for the client and to translate between API
|
||||
// message formats.
|
||||
const adapter = new SSEStreamAdapter(streamOptions);
|
||||
@@ -107,7 +106,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
try {
|
||||
await Promise.race([
|
||||
handleAbortedStream(req, res),
|
||||
pipelineAsync(proxyRes, decoder, adapter, transformer),
|
||||
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
|
||||
]);
|
||||
req.log.debug(`Finished proxying SSE stream.`);
|
||||
res.end();
|
||||
@@ -174,14 +173,13 @@ function getDecoder(options: {
|
||||
logger: typeof logger;
|
||||
contentType?: string;
|
||||
}) {
|
||||
const { api, contentType, input, logger } = options;
|
||||
const { contentType, input, logger } = options;
|
||||
if (contentType?.includes("application/vnd.amazon.eventstream")) {
|
||||
return getAwsEventStreamDecoder({ input, logger });
|
||||
} else if (api === "google-ai") {
|
||||
return StreamArray.withParser();
|
||||
} else if (contentType?.includes("application/json")) {
|
||||
throw new Error("JSON streaming not supported, request SSE instead");
|
||||
} else {
|
||||
// Passthrough stream, but ensures split chunks across multi-byte characters
|
||||
// are handled correctly.
|
||||
// Ensures split chunks across multi-byte characters are handled correctly.
|
||||
const stringDecoder = new StringDecoder("utf8");
|
||||
return new Transform({
|
||||
readableObjectMode: true,
|
||||
|
||||
@@ -4,8 +4,9 @@ import { Request, Response } from "express";
|
||||
import * as http from "http";
|
||||
import { config } from "../../../config";
|
||||
import { HttpError, RetryableError } from "../../../shared/errors";
|
||||
import { keyPool } from "../../../shared/key-management";
|
||||
import { getOpenAIModelFamily } from "../../../shared/models";
|
||||
import { keyPool, GoogleAIKey } from "../../../shared/key-management";
|
||||
import { logger } from "../../../logger";
|
||||
import { getOpenAIModelFamily, GoogleAIModelFamily } from "../../../shared/models";
|
||||
import { countTokens } from "../../../shared/tokenization";
|
||||
import {
|
||||
incrementPromptCount,
|
||||
@@ -47,7 +48,7 @@ export type ProxyResHandlerWithBody = (
|
||||
*/
|
||||
body: string | Record<string, any>
|
||||
) => Promise<void>;
|
||||
export type ProxyResMiddleware = ProxyResHandlerWithBody[];
|
||||
export type ProxyResMiddleware = ProxyResHandlerWithBody[] | undefined;
|
||||
|
||||
/**
|
||||
* Returns a on.proxyRes handler that executes the given middleware stack after
|
||||
@@ -71,11 +72,22 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
req: Request,
|
||||
res: Response
|
||||
) => {
|
||||
const initialHandler: RawResponseBodyHandler = req.isStreaming
|
||||
// Proxied request has by now been sent to the upstream API, so we revert
|
||||
// tracked mutations that were only needed to send the request.
|
||||
// This generally means path adjustment, headers, and body serialization.
|
||||
if (req.changeManager) {
|
||||
req.changeManager.revert();
|
||||
}
|
||||
|
||||
const initialHandler = req.isStreaming
|
||||
? handleStreamedResponse
|
||||
: handleBlockingResponse;
|
||||
let lastMiddleware = initialHandler.name;
|
||||
|
||||
if (Buffer.isBuffer(req.body)) {
|
||||
req.body = JSON.parse(req.body.toString());
|
||||
}
|
||||
|
||||
try {
|
||||
const body = await initialHandler(proxyRes, req, res);
|
||||
const middlewareStack: ProxyResMiddleware = [];
|
||||
@@ -100,7 +112,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
saveImage,
|
||||
logPrompt,
|
||||
logEvent,
|
||||
...apiMiddleware
|
||||
...(apiMiddleware ?? [])
|
||||
);
|
||||
}
|
||||
|
||||
@@ -124,15 +136,15 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
}
|
||||
|
||||
const { stack, message } = error;
|
||||
const info = { stack, lastMiddleware, key: req.key?.hash };
|
||||
const details = { stack, message, lastMiddleware, key: req.key?.hash };
|
||||
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
|
||||
|
||||
if (res.headersSent) {
|
||||
req.log.error(info, description);
|
||||
req.log.error(details, description);
|
||||
if (!res.writableEnded) res.end();
|
||||
return;
|
||||
} else {
|
||||
req.log.error(info, description);
|
||||
req.log.error(details, description);
|
||||
res
|
||||
.status(500)
|
||||
.json({ error: "Internal server error", proxy_note: description });
|
||||
@@ -163,60 +175,61 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
) => {
|
||||
const statusCode = proxyRes.statusCode || 500;
|
||||
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
|
||||
let errorPayload: ProxiedErrorPayload;
|
||||
|
||||
const service = req.key!.service;
|
||||
// Not an error, continue to next response handler
|
||||
if (statusCode < 400) return;
|
||||
|
||||
// Parse the error response body
|
||||
let errorPayload: ProxiedErrorPayload;
|
||||
try {
|
||||
assertJsonResponse(body);
|
||||
errorPayload = body;
|
||||
} catch (parseError) {
|
||||
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
|
||||
const hash = req.key?.hash;
|
||||
req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
|
||||
const strBody = String(body).slice(0, 128);
|
||||
req.log.error({ statusCode, strBody }, "Error body is not JSON");
|
||||
|
||||
const errorObject = {
|
||||
const details = {
|
||||
error: parseError.message,
|
||||
status: statusCode,
|
||||
statusMessage,
|
||||
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
|
||||
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service. Response body: ${strBody}`,
|
||||
};
|
||||
|
||||
sendProxyError(req, res, statusCode, statusMessage, errorObject);
|
||||
sendProxyError(req, res, statusCode, statusMessage, details);
|
||||
throw new HttpError(statusCode, parseError.message);
|
||||
}
|
||||
|
||||
const service = req.key!.service;
|
||||
// Extract the error type from the response body depending on the service
|
||||
if (service === "gcp") {
|
||||
if (Array.isArray(errorPayload)) {
|
||||
errorPayload = errorPayload[0];
|
||||
}
|
||||
}
|
||||
|
||||
const errorType =
|
||||
errorPayload.error?.code ||
|
||||
errorPayload.error?.type ||
|
||||
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
|
||||
|
||||
req.log.warn(
|
||||
{ statusCode, type: errorType, errorPayload, key: req.key?.hash },
|
||||
`Received error response from upstream. (${proxyRes.statusMessage})`
|
||||
{ statusCode, statusMessage, errorType, errorPayload, key: req.key?.hash },
|
||||
`API returned an error.`
|
||||
);
|
||||
|
||||
// TODO: split upstream error handling into separate modules for each service,
|
||||
// this is out of control.
|
||||
|
||||
// Try to convert response body to a ProxiedErrorPayload with message/type
|
||||
if (service === "aws") {
|
||||
// Try to standardize the error format for AWS
|
||||
errorPayload.error = { message: errorPayload.message, type: errorType };
|
||||
delete errorPayload.message;
|
||||
} else if (service === "gcp") {
|
||||
// Try to standardize the error format for GCP
|
||||
if (errorPayload.error?.code) { // GCP Error
|
||||
errorPayload.error = { message: errorPayload.error.message, type: errorPayload.error.status || errorPayload.error.code };
|
||||
if (errorPayload.error?.code) {
|
||||
errorPayload.error = {
|
||||
message: errorPayload.error.message,
|
||||
type: errorPayload.error.status || errorPayload.error.code,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out what to do with the error
|
||||
// TODO: separate error handling for each service
|
||||
if (statusCode === 400) {
|
||||
switch (service) {
|
||||
case "openai":
|
||||
@@ -231,9 +244,15 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
// same 429 billing error that other models return.
|
||||
await handleOpenAIRateLimitError(req, errorPayload);
|
||||
} else {
|
||||
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
|
||||
errorPayload.proxy_note = `The upstream API rejected the request. Check the error message for details.`;
|
||||
}
|
||||
break;
|
||||
case "deepseek":
|
||||
await handleDeepseekBadRequestError(req, errorPayload);
|
||||
break;
|
||||
case "xai":
|
||||
await handleXaiBadRequestError(req, errorPayload);
|
||||
break;
|
||||
case "anthropic":
|
||||
case "aws":
|
||||
case "gcp":
|
||||
@@ -242,13 +261,37 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
case "google-ai":
|
||||
await handleGoogleAIBadRequestError(req, errorPayload);
|
||||
break;
|
||||
case "cohere":
|
||||
errorPayload.proxy_note = `The upstream Cohere API rejected the request. Check the error message for details.`;
|
||||
break;
|
||||
case "qwen":
|
||||
// No special handling yet
|
||||
break;
|
||||
case "moonshot":
|
||||
errorPayload.proxy_note = `The Moonshot API rejected the request. Check the error message for details.`;
|
||||
break;
|
||||
default:
|
||||
assertNever(service);
|
||||
}
|
||||
} else if (statusCode === 401) {
|
||||
// Key is invalid or was revoked
|
||||
// Universal 401 handling - authentication failed, retry with different key
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError(`${service} key authentication failed, retrying with different key.`);
|
||||
} else if (statusCode === 402) {
|
||||
// Deepseek specific - insufficient balance
|
||||
if (service === "deepseek") {
|
||||
keyPool.disable(req.key!, "quota");
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Deepseek key has insufficient balance, retrying with different key.");
|
||||
}
|
||||
} else if (statusCode === 405) {
|
||||
// Xai specific - insufficient balance
|
||||
if (service === "xai") {
|
||||
keyPool.disable(req.key!, "quota");
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("XAI key has insufficient balance, retrying with different key.");
|
||||
}
|
||||
} else if (statusCode === 403) {
|
||||
switch (service) {
|
||||
case "anthropic":
|
||||
@@ -256,10 +299,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
errorType === "permission_error" &&
|
||||
errorPayload.error?.message?.toLowerCase().includes("multimodal")
|
||||
) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash },
|
||||
"This Anthropic key does not support multimodal prompts."
|
||||
);
|
||||
keyPool.update(req.key!, { allowsMultimodality: false });
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError(
|
||||
@@ -275,7 +314,8 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
case "UnrecognizedClientException":
|
||||
// Key is invalid.
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("AWS key is invalid, retrying with different key.");
|
||||
break;
|
||||
case "AccessDeniedException":
|
||||
const isModelAccessError =
|
||||
@@ -293,11 +333,15 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
|
||||
}
|
||||
return;
|
||||
case "mistral-ai":
|
||||
case "gcp":
|
||||
case "mistral-ai":
|
||||
case "gcp":
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||
return;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("GCP key is invalid, retrying with different key.");
|
||||
case "moonshot":
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Moonshot key is invalid, retrying with different key.");
|
||||
}
|
||||
} else if (statusCode === 429) {
|
||||
switch (service) {
|
||||
@@ -320,14 +364,30 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
case "google-ai":
|
||||
await handleGoogleAIRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "deepseek":
|
||||
await handleDeepseekRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "xai":
|
||||
await handleXaiRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "cohere":
|
||||
await handleCohereRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "qwen":
|
||||
// Similar handling to OpenAI for rate limits
|
||||
await handleOpenAIRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "moonshot":
|
||||
await handleMoonshotRateLimitError(req, errorPayload);
|
||||
break;
|
||||
default:
|
||||
assertNever(service);
|
||||
assertNever(service as never);
|
||||
}
|
||||
} else if (statusCode === 404) {
|
||||
// Most likely model not found
|
||||
switch (service) {
|
||||
case "openai":
|
||||
if (errorPayload.error?.code === "model_not_found") {
|
||||
if (errorType === "model_not_found") {
|
||||
const requestedModel = req.body.model;
|
||||
const modelFamily = getOpenAIModelFamily(requestedModel);
|
||||
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
|
||||
@@ -338,31 +398,41 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
}
|
||||
break;
|
||||
case "anthropic":
|
||||
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "google-ai":
|
||||
errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "mistral-ai":
|
||||
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "aws":
|
||||
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
|
||||
break;
|
||||
case "gcp":
|
||||
errorPayload.proxy_note = `The requested GCP resource might not exist, or the key might not have access to it.`;
|
||||
break;
|
||||
case "azure":
|
||||
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
|
||||
case "deepseek":
|
||||
case "xai":
|
||||
case "cohere":
|
||||
case "qwen":
|
||||
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model.`;
|
||||
break;
|
||||
default:
|
||||
assertNever(service);
|
||||
assertNever(service as never);
|
||||
}
|
||||
} else if (statusCode === 503) {
|
||||
switch (service) {
|
||||
case "aws":
|
||||
// Re-enqueue on any 503 from AWS Bedrock
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, errorType, errorPayload },
|
||||
`AWS Bedrock service unavailable (503). Re-enqueueing request.`
|
||||
);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError(
|
||||
"AWS Bedrock service unavailable (503), re-enqueued request."
|
||||
);
|
||||
default:
|
||||
errorPayload.proxy_note = `Upstream service unavailable. Try again later.`;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
||||
}
|
||||
|
||||
// Some OAI errors contain the organization ID, which we don't want to reveal.
|
||||
// Redact the OpenAI org id from the error message
|
||||
if (errorPayload.error?.message) {
|
||||
errorPayload.error.message = errorPayload.error.message.replace(
|
||||
/org-.{24}/gm,
|
||||
@@ -370,9 +440,10 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
);
|
||||
}
|
||||
|
||||
// Send the error to the client
|
||||
sendProxyError(req, res, statusCode, statusMessage, errorPayload);
|
||||
// This is bubbled up to onProxyRes's handler for logging but will not trigger
|
||||
// a write to the response as `sendProxyError` has just done that.
|
||||
|
||||
// Re-throw the error to bubble up to onProxyRes's handler for logging
|
||||
throw new HttpError(statusCode, errorPayload.error?.message);
|
||||
};
|
||||
|
||||
@@ -400,27 +471,32 @@ async function handleAnthropicAwsBadRequestError(
|
||||
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
|
||||
const isOverQuota =
|
||||
error?.message?.match(/usage blocked until/i) ||
|
||||
error?.message?.match(/credit balance is too low/i);
|
||||
error?.message?.match(/credit balance is too low/i) ||
|
||||
error?.message?.match(/You will regain access on/i) ||
|
||||
error?.message?.match(/reached your specified API usage limits/i);
|
||||
if (isOverQuota) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, message: error?.message },
|
||||
"Anthropic key has hit spending limit and will be disabled."
|
||||
);
|
||||
keyPool.disable(req.key!, "quota");
|
||||
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Claude key hit spending limit, retrying with different key.");
|
||||
return;
|
||||
}
|
||||
|
||||
const isDisabled =
|
||||
error?.message?.match(/organization has been disabled/i) ||
|
||||
error?.message?.match(/^operation not allowed/i);
|
||||
error?.message?.match(/^operation not allowed/i) ||
|
||||
error?.message?.match(/credential is only authorized for use with Claude Code/i);
|
||||
if (isDisabled) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, message: error?.message },
|
||||
"Anthropic/AWS key has been disabled."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Claude key has been disabled, retrying with different key.");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -471,6 +547,106 @@ async function handleGcpRateLimitError(
|
||||
}
|
||||
}
|
||||
|
||||
async function handleDeepseekRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
keyPool.markRateLimited(req.key!);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Deepseek rate-limited request re-enqueued.");
|
||||
}
|
||||
|
||||
async function handleDeepseekBadRequestError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
// Based on the checker code, a 400 response means the key is valid but there was some other error
|
||||
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
|
||||
}
|
||||
|
||||
async function handleXaiRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
keyPool.markRateLimited(req.key!);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Xai rate-limited request re-enqueued.");
|
||||
}
|
||||
|
||||
async function handleXaiBadRequestError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
// Based on the checker code, a 400 response means the key is valid but there was some other error
|
||||
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
|
||||
}
|
||||
|
||||
async function handleCohereRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
// Mark the current key as rate limited
|
||||
keyPool.markRateLimited(req.key!);
|
||||
|
||||
// Store the original request attempt count or initialize it
|
||||
req.retryCount = (req.retryCount || 0) + 1;
|
||||
|
||||
// Only retry up to 3 times
|
||||
if (req.retryCount <= 3) {
|
||||
try {
|
||||
// Add a small delay before retrying (1-5 seconds)
|
||||
const delayMs = 1000 + Math.floor(Math.random() * 4000);
|
||||
await new Promise(resolve => setTimeout(resolve, delayMs));
|
||||
|
||||
// Re-enqueue the request to try with a different key
|
||||
await reenqueueRequest(req);
|
||||
req.log.info({ attempt: req.retryCount }, "Cohere rate-limited request re-enqueued");
|
||||
throw new RetryableError(`Cohere rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
|
||||
} catch (error) {
|
||||
if (error instanceof RetryableError) {
|
||||
throw error; // Rethrow RetryableError to continue the flow
|
||||
}
|
||||
req.log.error({ error }, "Failed to re-enqueue rate-limited Cohere request");
|
||||
}
|
||||
}
|
||||
|
||||
// If we've already retried 3 times, show the error to the user
|
||||
errorPayload.proxy_note = "Too many requests to the Cohere API. Please try again later.";
|
||||
}
|
||||
|
||||
async function handleMoonshotRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
// Mark the current key as rate limited
|
||||
keyPool.markRateLimited(req.key!);
|
||||
|
||||
// Store the original request attempt count or initialize it
|
||||
req.retryCount = (req.retryCount || 0) + 1;
|
||||
|
||||
// Only retry up to 3 times with different keys
|
||||
if (req.retryCount <= 3) {
|
||||
try {
|
||||
// Add a small delay before retrying (2-6 seconds for Moonshot)
|
||||
const delayMs = 2000 + Math.floor(Math.random() * 4000);
|
||||
await new Promise(resolve => setTimeout(resolve, delayMs));
|
||||
|
||||
// Re-enqueue the request to try with a different key
|
||||
await reenqueueRequest(req);
|
||||
req.log.info({ attempt: req.retryCount }, "Moonshot rate-limited request re-enqueued");
|
||||
throw new RetryableError(`Moonshot rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
|
||||
} catch (error) {
|
||||
if (error instanceof RetryableError) {
|
||||
throw error; // Rethrow RetryableError to continue the flow
|
||||
}
|
||||
req.log.error({ error }, "Failed to re-enqueue rate-limited Moonshot request");
|
||||
}
|
||||
}
|
||||
|
||||
// If we've already retried 3 times, show the error to the user
|
||||
errorPayload.proxy_note = "Too many requests to the Moonshot API. Please try again later.";
|
||||
}
|
||||
|
||||
async function handleOpenAIRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
@@ -481,17 +657,20 @@ async function handleOpenAIRateLimitError(
|
||||
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
|
||||
// Billing quota exceeded (key is dead, disable it)
|
||||
keyPool.disable(req.key!, "quota");
|
||||
errorPayload.proxy_note = `Assigned key's quota has been exceeded. Please try again.`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Google AI key quota exceeded, retrying with different key.");
|
||||
break;
|
||||
case "access_terminated":
|
||||
// Account banned (key is dead, disable it)
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. Please try again.`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Google AI key banned for policy violations, retrying with different key.");
|
||||
break;
|
||||
case "billing_not_active":
|
||||
// Key valid but account billing is delinquent
|
||||
keyPool.disable(req.key!, "quota");
|
||||
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. Please try again.`;
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Google AI key billing not active, retrying with different key.");
|
||||
break;
|
||||
case "requests":
|
||||
case "tokens":
|
||||
@@ -505,56 +684,6 @@ async function handleOpenAIRateLimitError(
|
||||
// Per-minute request or token rate limit is exceeded, which we can retry
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
// WIP/nonfunctional
|
||||
// case "tokens_usage_based":
|
||||
// // Weird new rate limit type that seems limited to preview models.
|
||||
// // Distinct from `tokens` type. Can be per-minute or per-day.
|
||||
//
|
||||
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
|
||||
// // 10k tokens per minute is problematic, because this is much less than
|
||||
// // GPT4-Turbo's max context size for a single prompt and is effectively a
|
||||
// // cap on the max context size for just that key+model, which the app is
|
||||
// // not able to deal with.
|
||||
//
|
||||
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
|
||||
// // been used today, the max context for that key becomes 50k tokens until
|
||||
// // the next day and becomes progressively smaller as more tokens are used.
|
||||
//
|
||||
// // To work around these keys we will first retry the request a few times.
|
||||
// // After that we will reject the request, and if it's a per-day limit we
|
||||
// // will also disable the key.
|
||||
//
|
||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
|
||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
|
||||
//
|
||||
// const regex =
|
||||
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
|
||||
// const [, period, limit, used, requested] =
|
||||
// errorPayload.error?.message?.match(regex) || [];
|
||||
//
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, period, limit, used, requested },
|
||||
// "Received `tokens_usage_based` rate limit error from OpenAI."
|
||||
// );
|
||||
//
|
||||
// if (!period || !limit || !requested) {
|
||||
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// if (req.retryCount < 2) {
|
||||
// await reenqueueRequest(req);
|
||||
// throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
// }
|
||||
//
|
||||
// if (period === "min") {
|
||||
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
|
||||
// } else {
|
||||
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
|
||||
// }
|
||||
//
|
||||
// keyPool.markRateLimited(req.key!);
|
||||
// break;
|
||||
default:
|
||||
errorPayload.proxy_note = `This is likely a temporary error with the API. Try again in a few seconds.`;
|
||||
break;
|
||||
@@ -585,46 +714,186 @@ async function handleGoogleAIBadRequestError(
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
const error = errorPayload.error || {};
|
||||
const { message, status, details } = error;
|
||||
|
||||
if (status === "INVALID_ARGUMENT") {
|
||||
const reason = details?.[0]?.reason;
|
||||
if (reason === "API_KEY_INVALID") {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, status, reason, msg: error.message },
|
||||
"Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid.`;
|
||||
}
|
||||
} else if (status === "FAILED_PRECONDITION") {
|
||||
if (message.match(/please enable billing/i)) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, status, msg: error.message },
|
||||
"Cannot use key due to billing restrictions."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
}
|
||||
// google changes this shit every few months
|
||||
// i don't want to deal with it
|
||||
const keyDeadMsgs = [
|
||||
/please enable billing/i,
|
||||
/API key not valid/i,
|
||||
/API key expired/i,
|
||||
/pass a valid API/i,
|
||||
];
|
||||
const text = JSON.stringify(error);
|
||||
if (keyDeadMsgs.some((msg) => text.match(msg))) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, error: text },
|
||||
"Google API key appears to be inoperative."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Google API key inoperative, retrying with different key.");
|
||||
} else {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, status, msg: error.message },
|
||||
"Received unexpected 400 error from Google AI."
|
||||
{ key: req.key?.hash, error: text },
|
||||
"Unknown Google API error."
|
||||
);
|
||||
errorPayload.proxy_note = `Unrecognized error from Google AI.`;
|
||||
}
|
||||
|
||||
// const { message, status, details } = error;
|
||||
//
|
||||
// if (status === "INVALID_ARGUMENT") {
|
||||
// const reason = details?.[0]?.reason;
|
||||
// if (reason === "API_KEY_INVALID") {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, reason, msg: error.message },
|
||||
// "Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
|
||||
// );
|
||||
// keyPool.disable(req.key!, "revoked");
|
||||
// errorPayload.proxy_note = `Assigned API key is invalid.`;
|
||||
// }
|
||||
// } else if (status === "FAILED_PRECONDITION") {
|
||||
// if (message.match(/please enable billing/i)) {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, msg: error.message },
|
||||
// "Cannot use key due to billing restrictions."
|
||||
// );
|
||||
// keyPool.disable(req.key!, "revoked");
|
||||
// errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
// }
|
||||
// } else {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, msg: error.message },
|
||||
// "Received unexpected 400 error from Google AI."
|
||||
// );
|
||||
// }
|
||||
}
|
||||
|
||||
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
|
||||
//
|
||||
async function handleGoogleAIRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
const status = errorPayload.error?.status;
|
||||
const text = JSON.stringify(errorPayload.error);
|
||||
const errorMessage = errorPayload.error?.message?.toLowerCase() || '';
|
||||
|
||||
// sometimes they block keys by rate limiting them to 0 requests per minute
|
||||
// for some indefinite period of time
|
||||
const keyDeadMsgs = [
|
||||
/GenerateContentRequestsPerMinutePerProjectPerRegion/i,
|
||||
/"quota_limit_value":"0"/i,
|
||||
];
|
||||
|
||||
// Quota exhaustion indicators in error messages
|
||||
const quotaExhaustedMsgs = [
|
||||
/quota exceeded/i,
|
||||
/free tier|free_tier/i,
|
||||
/quota limit/i
|
||||
];
|
||||
|
||||
// If we don't have a key in the request, we can't process rate limits
|
||||
if (!req.key) {
|
||||
errorPayload.proxy_note = `Rate limit error but no key was found in the request.`;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case "RESOURCE_EXHAUSTED":
|
||||
keyPool.markRateLimited(req.key!);
|
||||
case "RESOURCE_EXHAUSTED": {
|
||||
// Hard disabled keys - these are completely blocked
|
||||
if (keyDeadMsgs.some((msg) => msg.test(text))) {
|
||||
req.log.warn(
|
||||
{ key: req.key.hash, error: text },
|
||||
"Google API key appears to be completely disabled and will be removed from rotation."
|
||||
);
|
||||
keyPool.disable(req.key, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if this is a quota exhaustion error rather than just a rate limit
|
||||
const isQuotaExhausted = quotaExhaustedMsgs.some(pattern => pattern.test(text) || pattern.test(errorMessage));
|
||||
|
||||
if (isQuotaExhausted && req.body?.model) {
|
||||
// Get model family for the current request
|
||||
const modelName = req.body.model;
|
||||
const isPro = modelName.includes('pro');
|
||||
const isFlash = modelName.includes('flash');
|
||||
const isUltra = modelName.includes('ultra');
|
||||
|
||||
req.log.warn(
|
||||
{ key: req.key.hash, model: modelName, error: text },
|
||||
"Google API key has exhausted its quota for this model family and will be marked as overquota."
|
||||
);
|
||||
|
||||
// Create a filtered list of model families that excludes the over-quota family
|
||||
let familyToRemove: GoogleAIModelFamily | null = null;
|
||||
if (isPro) {
|
||||
familyToRemove = 'gemini-pro';
|
||||
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Pro models.`;
|
||||
} else if (isFlash) {
|
||||
familyToRemove = 'gemini-flash';
|
||||
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Flash models.`;
|
||||
} else if (isUltra) {
|
||||
familyToRemove = 'gemini-ultra';
|
||||
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Ultra models.`;
|
||||
} else {
|
||||
// If model family can't be determined, just mark as rate limited
|
||||
keyPool.markRateLimited(req.key);
|
||||
errorPayload.proxy_note = `Assigned API key has exhausted quota but model family couldn't be determined.`;
|
||||
}
|
||||
|
||||
// Update the modelFamilies in the key if we identified a family to remove
|
||||
if (familyToRemove) {
|
||||
// Get current model families, filter out the one that's over quota
|
||||
const updatedFamilies = [...req.key.modelFamilies].filter(f => f !== familyToRemove);
|
||||
|
||||
// Cast the key to GoogleAIKey type to access its specific properties
|
||||
const googleKey = req.key as GoogleAIKey;
|
||||
|
||||
// Track which families are over quota for future rechecking
|
||||
const overQuotaFamilies = googleKey.overQuotaFamilies || [];
|
||||
if (!overQuotaFamilies.includes(familyToRemove)) {
|
||||
overQuotaFamilies.push(familyToRemove);
|
||||
}
|
||||
|
||||
// Mark the key as over quota but still usable for other model families
|
||||
req.log.info(
|
||||
{ key: req.key.hash, family: familyToRemove },
|
||||
"Marking Google AI key as over quota for specific model family"
|
||||
);
|
||||
|
||||
// First make a typed update object that includes only the properties we want to update
|
||||
interface GoogleAIPartialUpdate {
|
||||
modelFamilies: GoogleAIModelFamily[];
|
||||
isOverQuota: boolean;
|
||||
overQuotaFamilies: GoogleAIModelFamily[];
|
||||
}
|
||||
|
||||
// Create a properly typed update
|
||||
const update: GoogleAIPartialUpdate = {
|
||||
modelFamilies: updatedFamilies as GoogleAIModelFamily[],
|
||||
isOverQuota: true,
|
||||
overQuotaFamilies
|
||||
};
|
||||
|
||||
// Use the standard KeyPool interface
|
||||
// This gets around the TypeScript issues by letting KeyPool handle routing
|
||||
const clonedKey = { ...req.key }; // Make a clone since we'll be modifying it
|
||||
keyPool.update(clonedKey, update as any);
|
||||
}
|
||||
|
||||
// Re-enqueue with a different key
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Quota-exhausted request re-enqueued with a different key.");
|
||||
}
|
||||
|
||||
// Standard rate limiting - just mark as rate limited temporarily
|
||||
req.log.debug({ key: req.key.hash, error: text }, "Google API request rate limited, will retry.");
|
||||
keyPool.markRateLimited(req.key);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
}
|
||||
default:
|
||||
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
|
||||
break;
|
||||
@@ -644,10 +913,12 @@ const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
|
||||
},
|
||||
`Incrementing usage for model`
|
||||
);
|
||||
keyPool.incrementUsage(req.key!, model, tokensUsed);
|
||||
// Get modelFamily for the key usage log
|
||||
const modelFamilyForKeyPool = req.modelFamily!; // Should be set by getModelFamilyForRequest earlier
|
||||
keyPool.incrementUsage(req.key!, modelFamilyForKeyPool, { input: req.promptTokens!, output: req.outputTokens! });
|
||||
if (req.user) {
|
||||
incrementPromptCount(req.user.token);
|
||||
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
|
||||
incrementTokenCount(req.user.token, model, req.outboundApi, { input: req.promptTokens!, output: req.outputTokens! });
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -673,16 +944,24 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
|
||||
const service = req.outboundApi;
|
||||
const completion = getCompletionFromBody(req, body);
|
||||
const tokens = await countTokens({ req, completion, service });
|
||||
|
||||
if (req.service === "openai" || req.service === "azure" || req.service === "deepseek" || req.service === "cohere" || req.service === "qwen") {
|
||||
// O1 consumes (a significant amount of) invisible tokens for the chain-
|
||||
// of-thought reasoning. We have no way to count these other than to check
|
||||
// the response body.
|
||||
tokens.reasoning_tokens =
|
||||
body.usage?.completion_tokens_details?.reasoning_tokens;
|
||||
}
|
||||
|
||||
req.log.debug(
|
||||
{ service, tokens, prevOutputTokens: req.outputTokens },
|
||||
{ service, prevOutputTokens: req.outputTokens, tokens },
|
||||
`Counted tokens for completion`
|
||||
);
|
||||
if (req.tokenizerInfo) {
|
||||
req.tokenizerInfo.completion_tokens = tokens;
|
||||
}
|
||||
|
||||
req.outputTokens = tokens.token_count;
|
||||
req.outputTokens = tokens.token_count + (tokens.reasoning_tokens ?? 0);
|
||||
} catch (error) {
|
||||
req.log.warn(
|
||||
error,
|
||||
@@ -697,22 +976,30 @@ const trackKeyRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
|
||||
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
||||
};
|
||||
|
||||
const omittedHeaders = new Set<string>([
|
||||
// Omit content-encoding because we will always decode the response body
|
||||
"content-encoding",
|
||||
// Omit transfer-encoding because we are using response.json which will
|
||||
// set a content-length header, which is not valid for chunked responses.
|
||||
"transfer-encoding",
|
||||
// Don't set cookies from upstream APIs because proxied requests are stateless
|
||||
"set-cookie",
|
||||
"openai-organization",
|
||||
"x-request-id",
|
||||
"x-ds-request-id",
|
||||
"x-ds-trace-id",
|
||||
"cf-ray",
|
||||
]);
|
||||
const copyHttpHeaders: ProxyResHandlerWithBody = async (
|
||||
proxyRes,
|
||||
_req,
|
||||
res
|
||||
) => {
|
||||
// Hack: we don't copy headers since with chunked transfer we've already sent them.
|
||||
if (_req.isChunkedTransfer) return;
|
||||
|
||||
Object.keys(proxyRes.headers).forEach((key) => {
|
||||
// Omit content-encoding because we will always decode the response body
|
||||
if (key === "content-encoding") {
|
||||
return;
|
||||
}
|
||||
// We're usually using res.json() to send the response, which causes express
|
||||
// to set content-length. That's not valid for chunked responses and some
|
||||
// clients will reject it so we need to omit it.
|
||||
if (key === "transfer-encoding") {
|
||||
return;
|
||||
}
|
||||
if (omittedHeaders.has(key)) return;
|
||||
res.setHeader(key, proxyRes.headers[key] as string);
|
||||
});
|
||||
};
|
||||
@@ -756,6 +1043,6 @@ function getAwsErrorType(header: string | string[] | undefined) {
|
||||
|
||||
function assertJsonResponse(body: any): asserts body is Record<string, any> {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected response to be an object");
|
||||
throw new Error(`Expected response to be an object, got ${typeof body}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +72,8 @@ const getPromptForRequest = (
|
||||
// format.
|
||||
switch (req.outboundApi) {
|
||||
case "openai":
|
||||
case "openai-responses":
|
||||
return req.body.messages;
|
||||
case "mistral-ai":
|
||||
return req.body.messages;
|
||||
case "anthropic-chat":
|
||||
@@ -120,7 +122,7 @@ const flattenMessages = (
|
||||
if (isGoogleAIChatPrompt(val)) {
|
||||
return val.contents
|
||||
.map(({ parts, role }) => {
|
||||
const text = parts.map((p) => p.text).join("\n");
|
||||
const text = parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text).join("\n");
|
||||
return `${role}: ${text}`;
|
||||
})
|
||||
.join("\n");
|
||||
|
||||
@@ -84,7 +84,8 @@ export class EventAggregator {
|
||||
getFinalResponse() {
|
||||
switch (this.responseFormat) {
|
||||
case "openai":
|
||||
case "google-ai": // TODO: this is probably wrong now that we support native Google Makersuite prompts
|
||||
case "openai-responses":
|
||||
case "google-ai":
|
||||
return mergeEventsForOpenAIChat(this.events);
|
||||
case "openai-text":
|
||||
return mergeEventsForOpenAIText(this.events);
|
||||
|
||||
@@ -158,6 +158,8 @@ function getTransformer(
|
||||
: mistralAIToOpenAI;
|
||||
case "openai-image":
|
||||
throw new Error(`SSE transformation not supported for ${responseApi}`);
|
||||
case "openai-responses":
|
||||
return passthroughToOpenAI;
|
||||
default:
|
||||
assertNever(responseApi);
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ import pino from "pino";
|
||||
import { Transform, TransformOptions } from "stream";
|
||||
import { Message } from "@smithy/eventstream-codec";
|
||||
import { APIFormat } from "../../../../shared/key-management";
|
||||
import { buildSpoofedSSE } from "../error-generator";
|
||||
import { BadRequestError, RetryableError } from "../../../../shared/errors";
|
||||
|
||||
type SSEStreamAdapterOptions = TransformOptions & {
|
||||
@@ -20,7 +19,6 @@ type SSEStreamAdapterOptions = TransformOptions & {
|
||||
*/
|
||||
export class SSEStreamAdapter extends Transform {
|
||||
private readonly isAwsStream;
|
||||
private readonly isGoogleStream;
|
||||
private api: APIFormat;
|
||||
private partialMessage = "";
|
||||
private textDecoder = new TextDecoder("utf8");
|
||||
@@ -30,7 +28,6 @@ export class SSEStreamAdapter extends Transform {
|
||||
super({ ...options, objectMode: true });
|
||||
this.isAwsStream =
|
||||
options?.contentType === "application/vnd.amazon.eventstream";
|
||||
this.isGoogleStream = options?.api === "google-ai";
|
||||
this.api = options.api;
|
||||
this.log = options.logger.child({ module: "sse-stream-adapter" });
|
||||
}
|
||||
@@ -110,44 +107,12 @@ export class SSEStreamAdapter extends Transform {
|
||||
}
|
||||
}
|
||||
|
||||
/** Processes an incoming array element from the Google AI JSON stream. */
|
||||
protected processGoogleObject(data: any): string | null {
|
||||
// Sometimes data has fields key and value, sometimes it's just the
|
||||
// candidates array.
|
||||
const candidates = data.value?.candidates ?? data.candidates ?? [{}];
|
||||
try {
|
||||
const hasParts = candidates[0].content?.parts?.length > 0;
|
||||
if (hasParts) {
|
||||
return `data: ${JSON.stringify(data.value ?? data)}`;
|
||||
} else {
|
||||
this.log.error({ event: data }, "Received bad Google AI event");
|
||||
return `data: ${buildSpoofedSSE({
|
||||
format: "google-ai",
|
||||
title: "Proxy stream error",
|
||||
message:
|
||||
"The proxy received malformed or unexpected data from Google AI while streaming.",
|
||||
obj: data,
|
||||
reqId: "proxy-sse-adapter-message",
|
||||
model: "",
|
||||
})}`;
|
||||
}
|
||||
} catch (error) {
|
||||
error.lastEvent = data;
|
||||
this.emit("error", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
|
||||
try {
|
||||
if (this.isAwsStream) {
|
||||
// `data` is a Message object
|
||||
const message = this.processAwsMessage(data);
|
||||
if (message) this.push(message + "\n\n");
|
||||
} else if (this.isGoogleStream) {
|
||||
// `data` is an element from the Google AI JSON stream
|
||||
const message = this.processGoogleObject(data);
|
||||
if (message) this.push(message + "\n\n");
|
||||
} else {
|
||||
// `data` is a string, but possibly only a partial message
|
||||
const fullMessages = (this.partialMessage + data).split(
|
||||
|
||||
@@ -9,7 +9,7 @@ const log = logger.child({
|
||||
|
||||
type GoogleAIStreamEvent = {
|
||||
candidates: {
|
||||
content: { parts: { text: string }[]; role: string };
|
||||
content?: { parts?: { text: string }[]; role: string };
|
||||
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
|
||||
index: number;
|
||||
tokenCount?: number;
|
||||
@@ -34,9 +34,15 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
const parts = completionEvent.candidates[0].content.parts;
|
||||
const parts = completionEvent.candidates[0].content?.parts || [];
|
||||
let content = parts[0]?.text ?? "";
|
||||
|
||||
if (isSafetyStop(completionEvent)) {
|
||||
content = `[Proxy Warning] Gemini safety filter triggered: ${JSON.stringify(
|
||||
completionEvent.candidates[0].safetyRatings
|
||||
)}`;
|
||||
}
|
||||
|
||||
// If this is the first chunk, try stripping speaker names from the response
|
||||
// e.g. "John: Hello" -> "Hello"
|
||||
if (index === 0) {
|
||||
@@ -60,6 +66,14 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||
return { position: -1, event: newEvent };
|
||||
};
|
||||
|
||||
function isSafetyStop(completion: GoogleAIStreamEvent) {
|
||||
const isSafetyStop = ["SAFETY", "OTHER"].includes(
|
||||
completion.candidates[0].finishReason ?? ""
|
||||
);
|
||||
const hasNoContent = completion.candidates[0].content?.parts?.length === 0;
|
||||
return isSafetyStop && hasNoContent;
|
||||
}
|
||||
|
||||
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
|
||||
try {
|
||||
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
|
||||
|
||||
+62
-53
@@ -1,66 +1,85 @@
|
||||
import express, { Request, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { BadRequestError } from "../shared/errors";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
getMistralAIModelFamily,
|
||||
MistralAIModelFamily,
|
||||
ModelFamily,
|
||||
} from "../shared/models";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { config } from "../config";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { BadRequestError } from "../shared/errors";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
// Mistral can't settle on a single naming scheme and deprecates models within
|
||||
// months of releasing them so this list is hard to keep up to date. 2024-07-28
|
||||
// https://docs.mistral.ai/platform/endpoints
|
||||
export const KNOWN_MISTRAL_AI_MODELS = [
|
||||
/*
|
||||
Mistral Nemo
|
||||
"A 12B model built with the partnership with Nvidia. It is easy to use and a
|
||||
drop-in replacement in any system using Mistral 7B that it supersedes."
|
||||
*/
|
||||
/* Premier models */
|
||||
// Mistral Large (top-tier reasoning model)
|
||||
"mistral-large-latest",
|
||||
"mistral-large-2411",
|
||||
"mistral-large-2407",
|
||||
"mistral-large-2402", // older version
|
||||
|
||||
// Pixtral Large (multimodal/vision model)
|
||||
"pixtral-large-latest",
|
||||
"pixtral-large-2411",
|
||||
|
||||
// Mistral Saba (language-specialized model)
|
||||
"mistral-saba-latest",
|
||||
"mistral-saba-2502",
|
||||
|
||||
// Codestral (code model)
|
||||
"codestral-latest",
|
||||
"codestral-2501",
|
||||
"codestral-2405",
|
||||
|
||||
// Ministral models (edge models)
|
||||
"ministral-8b-latest",
|
||||
"ministral-8b-2410",
|
||||
"ministral-3b-latest",
|
||||
"ministral-3b-2410",
|
||||
|
||||
// Embedding & Moderation
|
||||
"mistral-embed",
|
||||
"mistral-embed-2312",
|
||||
"mistral-moderation-latest",
|
||||
"mistral-moderation-2411",
|
||||
|
||||
/* Free models */
|
||||
// Mistral Small (with vision in latest version)
|
||||
"mistral-small-latest",
|
||||
"mistral-small-2503", // v3.1 with vision
|
||||
"mistral-small-2402", // older version
|
||||
"magistral-small-latest",
|
||||
|
||||
// Pixtral 12B (vision model)
|
||||
"pixtral-12b-latest",
|
||||
"pixtral-12b-2409",
|
||||
|
||||
/* Research & Open Models */
|
||||
// Mistral Nemo
|
||||
"open-mistral-nemo",
|
||||
"open-mistral-nemo-2407",
|
||||
/*
|
||||
Mistral Large
|
||||
"Our flagship model with state-of-the-art reasoning, knowledge, and coding
|
||||
capabilities."
|
||||
*/
|
||||
"mistral-large-latest",
|
||||
"mistral-large-2407",
|
||||
"mistral-large-2402", // deprecated
|
||||
/*
|
||||
Codestral
|
||||
"A cutting-edge generative model that has been specifically designed and
|
||||
optimized for code generation tasks, including fill-in-the-middle and code
|
||||
completion."
|
||||
note: this uses a separate bidi completion endpoint that is not implemented
|
||||
*/
|
||||
"codestral-latest",
|
||||
"codestral-2405",
|
||||
/* So-called "Research Models" */
|
||||
|
||||
// Earlier Mixtral & Mistral models
|
||||
"open-mistral-7b",
|
||||
"open-mixtral-8x7b",
|
||||
"open-mistral-8x22b",
|
||||
"open-mixtral-8x22b",
|
||||
"open-codestral-mamba",
|
||||
/* Deprecated production models */
|
||||
"mistral-small-latest",
|
||||
"mistral-small-2402",
|
||||
"mathstral",
|
||||
|
||||
/* Other, too lazy to do it properly now */
|
||||
"mistral-medium-latest",
|
||||
"mistral-medium-2312",
|
||||
"mistral-medium-2505",
|
||||
"magistral-medium-latest",
|
||||
"mistral-tiny",
|
||||
"mistral-tiny-2312",
|
||||
];
|
||||
@@ -127,20 +146,10 @@ export function transformMistralTextToMistralChat(textBody: any) {
|
||||
};
|
||||
}
|
||||
|
||||
const mistralAIProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.mistral.ai",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKey, finalizeBody],
|
||||
}),
|
||||
proxyRes: createOnProxyResHandler([mistralAIResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
const mistralAIProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.mistral.ai",
|
||||
mutations: [addKey, finalizeBody],
|
||||
blockingResponseHandler: mistralAIResponseHandler,
|
||||
});
|
||||
|
||||
const mistralAIRouter = Router();
|
||||
|
||||
@@ -0,0 +1,219 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { addKey, finalizeBody } from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import axios from "axios";
|
||||
import { MoonshotKey, keyPool } from "../shared/key-management";
|
||||
import { isMoonshotModel, isMoonshotVisionModel } from "../shared/api-schemas/moonshot";
|
||||
import { logger } from "../logger";
|
||||
|
||||
const log = logger.child({ module: "proxy", service: "moonshot" });
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const moonshotResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const getModelsResponse = async () => {
|
||||
// Return cache if less than 1 minute old
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
const modelToUse = "moonshot-v1-8k";
|
||||
const moonshotKey = keyPool.get(modelToUse, "moonshot") as MoonshotKey;
|
||||
|
||||
if (!moonshotKey || !moonshotKey.key) {
|
||||
log.warn("No valid Moonshot key available for model listing");
|
||||
throw new Error("No valid Moonshot API key available");
|
||||
}
|
||||
|
||||
// Fetch models from Moonshot API
|
||||
const response = await axios.get("https://api.moonshot.cn/v1/models", {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${moonshotKey.key}`
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.data || !response.data.data) {
|
||||
throw new Error("Unexpected response format from Moonshot API");
|
||||
}
|
||||
|
||||
// Format response to ensure OpenAI compatibility
|
||||
const models = {
|
||||
object: "list",
|
||||
data: response.data.data.map((model: any) => ({
|
||||
id: model.id,
|
||||
object: "model",
|
||||
created: model.created || Math.floor(Date.now() / 1000),
|
||||
owned_by: model.owned_by || "moonshot",
|
||||
permission: model.permission || [],
|
||||
root: model.root || model.id,
|
||||
parent: model.parent || null,
|
||||
})),
|
||||
};
|
||||
|
||||
log.debug({ modelCount: models.data.length }, "Retrieved models from Moonshot API");
|
||||
|
||||
// Cache the response
|
||||
modelsCache = models;
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return models;
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error fetching Moonshot models"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error fetching Moonshot models");
|
||||
}
|
||||
|
||||
// Return a default list of known Moonshot models as fallback
|
||||
return {
|
||||
object: "list",
|
||||
data: [
|
||||
{ id: "moonshot-v1-8k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||
{ id: "moonshot-v1-32k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||
{ id: "moonshot-v1-128k", object: "model", created: 1678888000, owned_by: "moonshot" },
|
||||
],
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||
try {
|
||||
const models = await getModelsResponse();
|
||||
res.status(200).json(models);
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error handling model request"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error handling model request");
|
||||
}
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
// Function to handle partial mode for Moonshot
|
||||
function handlePartialMode(req: Request) {
|
||||
if (!process.env.NO_MOONSHOT_PARTIAL && req.body.messages && Array.isArray(req.body.messages)) {
|
||||
const msgs = req.body.messages;
|
||||
if (msgs.at(-1)?.role !== 'assistant') return;
|
||||
|
||||
let i = msgs.length - 1;
|
||||
let content = '';
|
||||
|
||||
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||
// Consolidate consecutive assistant messages
|
||||
content = msgs[i--].content + content;
|
||||
}
|
||||
|
||||
// Replace consecutive assistant messages with single message with partial: true
|
||||
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, partial: true });
|
||||
log.debug("Consolidated assistant messages and enabled partial mode for Moonshot request");
|
||||
}
|
||||
}
|
||||
|
||||
// Function to handle vision model content transformation
|
||||
function handleVisionContent(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
if (isMoonshotVisionModel(model) && req.body.messages) {
|
||||
// Ensure vision content is properly formatted
|
||||
req.body.messages = req.body.messages.map((msg: any) => {
|
||||
if (msg.content && typeof msg.content === 'string') {
|
||||
// Keep string content as is for non-vision requests
|
||||
return msg;
|
||||
}
|
||||
return msg;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Function to count tokens for Moonshot models
|
||||
function countMoonshotTokens(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
if (isMoonshotModel(model)) {
|
||||
if (req.promptTokens) {
|
||||
log.debug(
|
||||
{ tokens: req.promptTokens, model },
|
||||
"Estimated token count for Moonshot prompt"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle rate limit errors for Moonshot
|
||||
async function handleMoonshotRateLimitError(req: Request, error: any) {
|
||||
if (error.response?.status === 429) {
|
||||
log.warn({ model: req.body.model }, "Moonshot rate limit hit, rotating key");
|
||||
|
||||
const currentKey = req.key as MoonshotKey;
|
||||
keyPool.markRateLimited(currentKey);
|
||||
|
||||
// Try to get a new key
|
||||
const newKey = keyPool.get(req.body.model, "moonshot") as MoonshotKey;
|
||||
if (newKey.hash !== currentKey.hash) {
|
||||
req.key = newKey;
|
||||
return true; // Retry with new key
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const moonshotProxy = createQueuedProxyMiddleware({
|
||||
mutations: [
|
||||
addKey,
|
||||
finalizeBody
|
||||
],
|
||||
target: "https://api.moonshot.cn",
|
||||
blockingResponseHandler: moonshotResponseHandler,
|
||||
});
|
||||
|
||||
const moonshotRouter = Router();
|
||||
|
||||
// Chat completions endpoint
|
||||
moonshotRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "moonshot" },
|
||||
{ afterTransform: [ handlePartialMode, handleVisionContent, countMoonshotTokens ] }
|
||||
),
|
||||
moonshotProxy
|
||||
);
|
||||
|
||||
// Embeddings endpoint
|
||||
moonshotRouter.post(
|
||||
"/v1/embeddings",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "moonshot" },
|
||||
{ afterTransform: [ countMoonshotTokens ] }
|
||||
),
|
||||
moonshotProxy
|
||||
);
|
||||
|
||||
// Models endpoint
|
||||
moonshotRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
export const moonshot = moonshotRouter;
|
||||
+123
-37
@@ -1,24 +1,17 @@
|
||||
import { RequestHandler, Router, Request } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
||||
import { generateModelList } from "./openai";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { generateModelList } from "./openai";
|
||||
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
|
||||
const KNOWN_MODELS = ["dall-e-2", "dall-e-3", "gpt-image-1"];
|
||||
|
||||
let modelListCache: any = null;
|
||||
let modelListValid = 0;
|
||||
@@ -26,7 +19,9 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
if (new Date().getTime() - modelListValid < 1000 * 60) {
|
||||
return res.status(200).json(modelListCache);
|
||||
}
|
||||
const result = generateModelList(KNOWN_MODELS);
|
||||
const result = generateModelList("openai").filter((m: { id: string }) =>
|
||||
KNOWN_MODELS.includes(m.id)
|
||||
);
|
||||
modelListCache = { object: "list", data: result };
|
||||
modelListValid = new Date().getTime();
|
||||
res.status(200).json(modelListCache);
|
||||
@@ -63,27 +58,46 @@ function transformResponseForChat(
|
||||
req: Request
|
||||
): Record<string, any> {
|
||||
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
|
||||
const isGptImage = req.body.model?.includes("gpt-image") || false;
|
||||
|
||||
const content = imageBody.data
|
||||
.map((item) => {
|
||||
const { url, b64_json } = item;
|
||||
// The gpt-image-1 model always returns b64_json
|
||||
// Format will depend on output_format parameter (defaults to png)
|
||||
// For simplicity, we'll assume png if not specified
|
||||
const format = req.body.output_format || "png";
|
||||
|
||||
if (b64_json) {
|
||||
return ``;
|
||||
return ``;
|
||||
} else {
|
||||
return ``;
|
||||
}
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
// Prepare the usage information - gpt-image-1 includes detailed token usage
|
||||
let usage = {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: req.outputTokens,
|
||||
total_tokens: req.outputTokens,
|
||||
};
|
||||
|
||||
// If this is a gpt-image-1 response, it includes detailed usage info
|
||||
if (imageBody.usage) {
|
||||
usage = {
|
||||
prompt_tokens: imageBody.usage.input_tokens || 0,
|
||||
completion_tokens: imageBody.usage.output_tokens || 0,
|
||||
total_tokens: imageBody.usage.total_tokens || 0,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
id: "dalle-" + req.id,
|
||||
id: req.body.model?.includes("gpt-image") ? "gptimage-" + req.id : "dalle-" + req.id,
|
||||
object: "chat.completion",
|
||||
created: Date.now(),
|
||||
model: req.body.model,
|
||||
usage: {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: req.outputTokens,
|
||||
total_tokens: req.outputTokens,
|
||||
},
|
||||
usage,
|
||||
choices: [
|
||||
{
|
||||
message: { role: "assistant", content },
|
||||
@@ -94,21 +108,82 @@ function transformResponseForChat(
|
||||
};
|
||||
}
|
||||
|
||||
const openaiImagesProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
pathRewrite: {
|
||||
"^/v1/chat/completions": "/v1/images/generations",
|
||||
},
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
||||
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
// Filter parameters based on the model being used to avoid sending unsupported parameters
|
||||
function filterModelParameters(manager: ProxyReqManager) {
|
||||
const req = manager.request;
|
||||
const originalBody = req.body;
|
||||
const modelName = originalBody?.model || "";
|
||||
|
||||
// Skip if no body or it's not an object
|
||||
if (!originalBody || typeof originalBody !== 'object') return;
|
||||
|
||||
// Create a deep copy of the body to filter
|
||||
const filteredBody = { ...originalBody };
|
||||
|
||||
// Define allowed parameters for each model
|
||||
if (modelName.includes('dall-e-2')) {
|
||||
// DALL-E 2 parameters
|
||||
const allowedParams = [
|
||||
'model', 'prompt', 'n', 'size', 'response_format', 'user'
|
||||
];
|
||||
|
||||
// Remove any parameter not in the allowed list
|
||||
Object.keys(filteredBody).forEach(key => {
|
||||
if (!allowedParams.includes(key)) {
|
||||
delete filteredBody[key];
|
||||
}
|
||||
});
|
||||
|
||||
req.log.info({ model: 'dall-e-2', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 2");
|
||||
} else if (modelName.includes('dall-e-3')) {
|
||||
// DALL-E 3 parameters
|
||||
const allowedParams = [
|
||||
'model', 'prompt', 'n', 'quality', 'size', 'style', 'response_format', 'user'
|
||||
];
|
||||
|
||||
// Remove any parameter not in the allowed list
|
||||
Object.keys(filteredBody).forEach(key => {
|
||||
if (!allowedParams.includes(key)) {
|
||||
delete filteredBody[key];
|
||||
}
|
||||
});
|
||||
|
||||
req.log.info({ model: 'dall-e-3', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 3");
|
||||
} else if (modelName.includes('gpt-image')) {
|
||||
// Define allowed parameters for gpt-image-1
|
||||
const allowedParams = [
|
||||
'model', 'prompt', 'background', 'moderation', 'n', 'output_compression',
|
||||
'output_format', 'quality', 'size', 'user', 'image', 'mask'
|
||||
];
|
||||
|
||||
// Remove any parameter not in the allowed list, especially 'style' which is only for DALL-E 3
|
||||
Object.keys(filteredBody).forEach(key => {
|
||||
if (!allowedParams.includes(key)) {
|
||||
req.log.info({ model: 'gpt-image-1', removedParam: key }, "Removing unsupported parameter for GPT Image");
|
||||
delete filteredBody[key];
|
||||
}
|
||||
});
|
||||
|
||||
req.log.info({ model: 'gpt-image-1', params: Object.keys(filteredBody) }, "Filtered parameters for GPT Image");
|
||||
}
|
||||
|
||||
// Use the proper method to update the body
|
||||
manager.setBody(filteredBody);
|
||||
}
|
||||
|
||||
function replacePath(manager: ProxyReqManager) {
|
||||
const req = manager.request;
|
||||
const pathname = req.url.split("?")[0];
|
||||
req.log.debug({ pathname }, "OpenAI image path filter");
|
||||
if (req.path.startsWith("/v1/chat/completions")) {
|
||||
manager.setPath("/v1/images/generations");
|
||||
}
|
||||
}
|
||||
|
||||
const openaiImagesProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
mutations: [replacePath, filterModelParameters, addKey, finalizeBody],
|
||||
blockingResponseHandler: openaiImagesResponseHandler,
|
||||
});
|
||||
|
||||
const openaiImagesRouter = Router();
|
||||
@@ -123,6 +198,17 @@ openaiImagesRouter.post(
|
||||
}),
|
||||
openaiImagesProxy
|
||||
);
|
||||
// Add support for the /v1/images/edits endpoint (used by gpt-image-1 for image editing)
|
||||
openaiImagesRouter.post(
|
||||
"/v1/images/edits",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "openai-image",
|
||||
outApi: "openai-image",
|
||||
service: "openai",
|
||||
}),
|
||||
openaiImagesProxy
|
||||
);
|
||||
openaiImagesRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
|
||||
+367
-129
@@ -1,129 +1,81 @@
|
||||
import { RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { config } from "../config";
|
||||
import { keyPool, OpenAIKey } from "../shared/key-management";
|
||||
import {
|
||||
getOpenAIModelFamily,
|
||||
ModelFamily,
|
||||
OpenAIModelFamily,
|
||||
} from "../shared/models";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { BadRequestError } from "../shared/errors";
|
||||
import { AzureOpenAIKey, keyPool, OpenAIKey } from "../shared/key-management";
|
||||
import { getOpenAIModelFamily } from "../shared/models";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
addKeyForEmbeddingsRequest,
|
||||
createEmbeddingsPreprocessorMiddleware,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
forceModel,
|
||||
RequestPreprocessor,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
// https://platform.openai.com/docs/models/overview
|
||||
export const KNOWN_OPENAI_MODELS = [
|
||||
// GPT4o
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
// GPT4o Mini
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
// GPT4o (ChatGPT)
|
||||
"chatgpt-4o-latest",
|
||||
// GPT4 Turbo (superceded by GPT4o)
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09", // gpt4-turbo stable, with vision
|
||||
"gpt-4-turbo-preview", // alias for latest turbo preview
|
||||
"gpt-4-0125-preview", // gpt4-turbo preview 2
|
||||
"gpt-4-1106-preview", // gpt4-turbo preview 1
|
||||
// Launch GPT4
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-0314", // legacy
|
||||
// GPT3.5 Turbo (superceded by GPT4o Mini)
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0125", // latest turbo
|
||||
"gpt-3.5-turbo-1106", // older turbo
|
||||
// Text Completion
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
// Embeddings
|
||||
"text-embedding-ada-002",
|
||||
// Known deprecated models
|
||||
"gpt-4-32k", // alias for 0613
|
||||
"gpt-4-32k-0314", // EOL 2025-06-06
|
||||
"gpt-4-32k-0613", // EOL 2025-06-06
|
||||
"gpt-4-vision-preview", // EOL 2024-12-06
|
||||
"gpt-4-1106-vision-preview", // EOL 2024-12-06
|
||||
"gpt-3.5-turbo-0613", // EOL 2024-09-13
|
||||
"gpt-3.5-turbo-0301", // not on the website anymore, maybe unavailable
|
||||
"gpt-3.5-turbo-16k", // alias for 0613
|
||||
"gpt-3.5-turbo-16k-0613", // EOL 2024-09-13
|
||||
];
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
|
||||
// Get available families and snapshots
|
||||
let availableFamilies = new Set<OpenAIModelFamily>();
|
||||
const availableSnapshots = new Set<string>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "openai") continue;
|
||||
const asOpenAIKey = key as OpenAIKey;
|
||||
asOpenAIKey.modelFamilies.forEach((f) => availableFamilies.add(f));
|
||||
asOpenAIKey.modelSnapshots.forEach((s) => availableSnapshots.add(s));
|
||||
}
|
||||
export function generateModelList(service: "openai" | "azure") {
|
||||
const keys = keyPool
|
||||
.list()
|
||||
.filter((k) => k.service === service && !k.isDisabled) as
|
||||
| OpenAIKey[]
|
||||
| AzureOpenAIKey[];
|
||||
if (keys.length === 0) return [];
|
||||
|
||||
// Remove disabled families
|
||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||
availableFamilies = new Set(
|
||||
[...availableFamilies].filter((x) => allowed.has(x))
|
||||
const allowedModelFamilies = new Set(config.allowedModelFamilies);
|
||||
const modelFamilies = new Set(
|
||||
keys
|
||||
.flatMap((k) => k.modelFamilies)
|
||||
.filter((f) => allowedModelFamilies.has(f))
|
||||
);
|
||||
|
||||
return models
|
||||
.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "openai",
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
}))
|
||||
.filter((model) => {
|
||||
// First check if the family is available
|
||||
const hasFamily = availableFamilies.has(getOpenAIModelFamily(model.id));
|
||||
if (!hasFamily) return false;
|
||||
const modelIds = new Set(
|
||||
keys
|
||||
.flatMap((k) => k.modelIds)
|
||||
.filter((id) => {
|
||||
const allowed = modelFamilies.has(getOpenAIModelFamily(id));
|
||||
const known = ["gpt", "o", "dall-e", "chatgpt", "text-embedding", "codex"].some(
|
||||
(prefix) => id.startsWith(prefix)
|
||||
);
|
||||
const isFinetune = id.includes("ft");
|
||||
return allowed && known && !isFinetune;
|
||||
})
|
||||
);
|
||||
|
||||
// Then for snapshots, ensure the specific snapshot is available
|
||||
const isSnapshot = model.id.match(/-\d{4}(-preview)?$/);
|
||||
if (!isSnapshot) return true;
|
||||
return availableSnapshots.has(model.id);
|
||||
});
|
||||
return Array.from(modelIds).map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: service,
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
}));
|
||||
}
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return res.status(200).json(modelsCache);
|
||||
}
|
||||
const result = generateModelList();
|
||||
|
||||
if (!config.openaiKey) return { object: "list", data: [] };
|
||||
|
||||
const result = generateModelList("openai");
|
||||
|
||||
modelsCache = { object: "list", data: result };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
res.status(200).json(modelsCache);
|
||||
@@ -158,16 +110,26 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
const interval = (req as any)._keepAliveInterval
|
||||
if (interval) {
|
||||
clearInterval(interval);
|
||||
res.write(JSON.stringify(body));
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
|
||||
req.log.info("Transforming Turbo-Instruct response to Chat format");
|
||||
newBody = transformTurboInstructResponse(body);
|
||||
} else if (req.outboundApi === "openai-responses" && req.inboundApi === "openai") {
|
||||
req.log.info("Transforming Responses API response to Chat format");
|
||||
newBody = transformResponsesApiResponse(body);
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
/** Only used for non-streaming responses. */
|
||||
function transformTurboInstructResponse(
|
||||
turboInstructBody: Record<string, any>
|
||||
): Record<string, any> {
|
||||
@@ -185,31 +147,151 @@ function transformTurboInstructResponse(
|
||||
return transformed;
|
||||
}
|
||||
|
||||
const openaiProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
||||
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
function transformResponsesApiResponse(
|
||||
responsesBody: Record<string, any>
|
||||
): Record<string, any> {
|
||||
// If the response is already in chat completion format, return it as is
|
||||
if (responsesBody.choices && responsesBody.choices[0]?.message) {
|
||||
return responsesBody;
|
||||
}
|
||||
|
||||
// Create a compatible format for clients expecting chat completions format
|
||||
const transformed: Record<string, any> = {
|
||||
id: responsesBody.id || `chatcmpl-${Date.now()}`,
|
||||
object: "chat.completion",
|
||||
created: responsesBody.created_at || Math.floor(Date.now() / 1000),
|
||||
model: responsesBody.model || "o1-pro",
|
||||
choices: [],
|
||||
usage: responsesBody.usage || {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: 0,
|
||||
total_tokens: 0
|
||||
}
|
||||
};
|
||||
|
||||
// Extract content from the Responses API format - multiple possible structures
|
||||
|
||||
// Structure 1: output array with message objects
|
||||
if (responsesBody.output && Array.isArray(responsesBody.output)) {
|
||||
// Look for a message type in the output array
|
||||
let messageOutput = null;
|
||||
for (const output of responsesBody.output) {
|
||||
if (output.type === "message") {
|
||||
messageOutput = output;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (messageOutput) {
|
||||
if (messageOutput.content && Array.isArray(messageOutput.content) && messageOutput.content.length > 0) {
|
||||
// Handle text content
|
||||
let content = "";
|
||||
const toolCalls: any[] = [];
|
||||
|
||||
for (const contentItem of messageOutput.content) {
|
||||
if (contentItem.type === "output_text") {
|
||||
content += contentItem.text;
|
||||
} else if (contentItem.type === "tool_calls" && Array.isArray(contentItem.tool_calls)) {
|
||||
toolCalls.push(...contentItem.tool_calls);
|
||||
}
|
||||
}
|
||||
|
||||
const message: Record<string, any> = {
|
||||
role: messageOutput.role || "assistant",
|
||||
content: content
|
||||
};
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
message.tool_calls = toolCalls;
|
||||
}
|
||||
|
||||
transformed.choices.push({
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: "stop"
|
||||
});
|
||||
} else if (typeof messageOutput.content === 'string') {
|
||||
// Simple string content
|
||||
transformed.choices.push({
|
||||
index: 0,
|
||||
message: {
|
||||
role: messageOutput.role || "assistant",
|
||||
content: messageOutput.content
|
||||
},
|
||||
finish_reason: "stop"
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Structure 2: response object with content
|
||||
else if (responsesBody.response && responsesBody.response.content) {
|
||||
transformed.choices.push({
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: typeof responsesBody.response.content === 'string'
|
||||
? responsesBody.response.content
|
||||
: JSON.stringify(responsesBody.response.content)
|
||||
},
|
||||
finish_reason: responsesBody.response.finish_reason || "stop"
|
||||
});
|
||||
}
|
||||
|
||||
// Structure 3: look for 'content' field directly
|
||||
else if (responsesBody.content) {
|
||||
transformed.choices.push({
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: typeof responsesBody.content === 'string'
|
||||
? responsesBody.content
|
||||
: JSON.stringify(responsesBody.content)
|
||||
},
|
||||
finish_reason: "stop"
|
||||
});
|
||||
}
|
||||
|
||||
// If we couldn't extract content, create a basic response
|
||||
if (transformed.choices.length === 0) {
|
||||
transformed.choices.push({
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: ""
|
||||
},
|
||||
finish_reason: "stop"
|
||||
});
|
||||
}
|
||||
|
||||
// Copy usage information if available
|
||||
if (responsesBody.usage) {
|
||||
transformed.usage = {
|
||||
prompt_tokens: responsesBody.usage.input_tokens || 0,
|
||||
completion_tokens: responsesBody.usage.output_tokens || 0,
|
||||
total_tokens: responsesBody.usage.total_tokens || 0
|
||||
};
|
||||
}
|
||||
|
||||
return transformed;
|
||||
}
|
||||
|
||||
const openaiProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKey, finalizeBody],
|
||||
target: "https://api.openai.com",
|
||||
blockingResponseHandler: openaiResponseHandler,
|
||||
});
|
||||
|
||||
const openaiEmbeddingsProxy = createProxyMiddleware({
|
||||
const openaiEmbeddingsProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: false,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||
}),
|
||||
error: handleProxyError,
|
||||
},
|
||||
});
|
||||
|
||||
// New proxy middleware for the Responses API
|
||||
const openaiResponsesProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKey, finalizeBody],
|
||||
target: "https://api.openai.com",
|
||||
blockingResponseHandler: openaiResponseHandler,
|
||||
});
|
||||
|
||||
const openaiRouter = Router();
|
||||
@@ -238,17 +320,120 @@ openaiRouter.post(
|
||||
),
|
||||
openaiProxy
|
||||
);
|
||||
|
||||
const setupChunkedTransfer: RequestHandler = (req, res, next) => {
|
||||
req.log.info("Setting chunked transfer for o1 to prevent Cloudflare timeouts")
|
||||
|
||||
// Check if user is trying to use streaming with codex-mini models
|
||||
if (req.body.model?.startsWith("codex-mini") && req.body.stream === true) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: "The codex-mini models do not support streaming. Please set 'stream: false' in your request.",
|
||||
type: "invalid_request_error",
|
||||
param: "stream",
|
||||
code: "streaming_not_supported"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Only o1 doesn't support streaming
|
||||
if (req.body.model === "o1" || req.body.model === "o1-2024-12-17") {
|
||||
req.isChunkedTransfer = true;
|
||||
res.writeHead(200, {
|
||||
'Content-Type': 'application/json',
|
||||
'Transfer-Encoding': 'chunked'
|
||||
});
|
||||
|
||||
// Higher values are required - otherwise Cloudflare will buffer and not pass
|
||||
// the separate chunks, which means that a >100s response will get terminated anyway
|
||||
const keepAlive = setInterval(() => {
|
||||
res.write(' '.repeat(4096));
|
||||
}, 48_000);
|
||||
|
||||
(req as any)._keepAliveInterval = keepAlive;
|
||||
}
|
||||
next();
|
||||
};
|
||||
|
||||
// Functions to handle model-specific API routing
|
||||
function shouldUseResponsesApi(model: string): boolean {
|
||||
return model === "o1-pro" || model.startsWith("o1-pro-") ||
|
||||
model === "o3-pro" || model.startsWith("o3-pro-") ||
|
||||
model === "codex-mini-latest" || model.startsWith("codex-mini-");
|
||||
}
|
||||
|
||||
// Preprocessor to redirect requests to the responses API
|
||||
const routeToResponsesApi: RequestPreprocessor = (req) => {
|
||||
if (shouldUseResponsesApi(req.body.model)) {
|
||||
req.log.info(`Routing ${req.body.model} to OpenAI Responses API`);
|
||||
req.url = "/v1/responses";
|
||||
req.outboundApi = "openai-responses";
|
||||
}
|
||||
};
|
||||
|
||||
// General chat completion endpoint. Turbo-instruct is not supported here.
|
||||
openaiRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "openai",
|
||||
outApi: "openai",
|
||||
service: "openai",
|
||||
}),
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "openai" },
|
||||
{
|
||||
afterTransform: [
|
||||
fixupMaxTokens,
|
||||
filterGPT5UnsupportedParams,
|
||||
routeToResponsesApi
|
||||
]
|
||||
}
|
||||
),
|
||||
setupChunkedTransfer,
|
||||
(req, _res, next) => {
|
||||
// Route to the responses endpoint if needed
|
||||
if (req.outboundApi === "openai-responses") {
|
||||
// Ensure messages is moved to input properly
|
||||
req.log.info("Final check for Responses API format in chat completions");
|
||||
if (req.body.messages) {
|
||||
req.log.info("Moving 'messages' to 'input' for Responses API");
|
||||
req.body.input = req.body.messages;
|
||||
delete req.body.messages;
|
||||
} else if (req.body.input && req.body.input.messages) {
|
||||
req.log.info("Reformatting input.messages for Responses API");
|
||||
req.body.input = req.body.input.messages;
|
||||
}
|
||||
|
||||
return openaiResponsesProxy(req, _res, next);
|
||||
}
|
||||
next();
|
||||
},
|
||||
openaiProxy
|
||||
);
|
||||
|
||||
// New endpoint for OpenAI Responses API
|
||||
openaiRouter.post(
|
||||
"/v1/responses",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai-responses", service: "openai" },
|
||||
{ afterTransform: [fixupMaxTokens, filterGPT5UnsupportedParams] }
|
||||
),
|
||||
// Add final check to ensure the body is in the correct format for Responses API
|
||||
(req, _res, next) => {
|
||||
req.log.info("Final check for Responses API format");
|
||||
|
||||
// Ensure messages is properly formatted for input
|
||||
if (req.body.messages) {
|
||||
req.log.info("Moving 'messages' to 'input' for Responses API");
|
||||
req.body.input = req.body.messages;
|
||||
delete req.body.messages;
|
||||
} else if (req.body.input && req.body.input.messages) {
|
||||
req.log.info("Reformatting input.messages for Responses API");
|
||||
req.body.input = req.body.input.messages;
|
||||
}
|
||||
|
||||
next();
|
||||
},
|
||||
openaiResponsesProxy
|
||||
);
|
||||
|
||||
// Embeddings endpoint.
|
||||
openaiRouter.post(
|
||||
"/v1/embeddings",
|
||||
@@ -257,4 +442,57 @@ openaiRouter.post(
|
||||
openaiEmbeddingsProxy
|
||||
);
|
||||
|
||||
function forceModel(model: string): RequestPreprocessor {
|
||||
return (req: Request) => void (req.body.model = model);
|
||||
}
|
||||
|
||||
function fixupMaxTokens(req: Request) {
|
||||
// For Responses API, use max_output_tokens instead of max_completion_tokens
|
||||
if (req.outboundApi === "openai-responses") {
|
||||
if (!req.body.max_output_tokens) {
|
||||
req.body.max_output_tokens = req.body.max_tokens || req.body.max_completion_tokens;
|
||||
}
|
||||
// Remove the other token params to avoid API errors
|
||||
delete req.body.max_tokens;
|
||||
delete req.body.max_completion_tokens;
|
||||
|
||||
// Remove other parameters not supported by Responses API
|
||||
const unsupportedParams = ['frequency_penalty', 'presence_penalty'];
|
||||
for (const param of unsupportedParams) {
|
||||
if (req.body[param] !== undefined) {
|
||||
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
|
||||
delete req.body[param];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Original behavior for other APIs
|
||||
if (!req.body.max_completion_tokens) {
|
||||
req.body.max_completion_tokens = req.body.max_tokens;
|
||||
}
|
||||
delete req.body.max_tokens;
|
||||
}
|
||||
}
|
||||
|
||||
// GPT-5, GPT-5-mini, and GPT-5-nano don't support certain parameters
|
||||
// Remove them if present to prevent API errors
|
||||
function filterGPT5UnsupportedParams(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// Only apply filtering to these specific models (gpt5-chat-latest supports all params)
|
||||
const restrictedModels = /^gpt-5(-mini|-nano)?(-\d{4}-\d{2}-\d{2})?$/;
|
||||
|
||||
if (!restrictedModels.test(model)) {
|
||||
return; // Not a restricted model, no filtering needed
|
||||
}
|
||||
|
||||
// Remove unsupported parameters if they exist
|
||||
const unsupportedParams = ['temperature', 'top_p', 'presence_penalty', 'frequency_penalty'];
|
||||
|
||||
for (const param of unsupportedParams) {
|
||||
if (req.body[param] !== undefined) {
|
||||
delete req.body[param];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const openai = openaiRouter;
|
||||
|
||||
+44
-20
@@ -13,6 +13,7 @@
|
||||
|
||||
import crypto from "crypto";
|
||||
import { Handler, Request } from "express";
|
||||
import { config } from "../config";
|
||||
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
@@ -23,9 +24,10 @@ import {
|
||||
import { initializeSseStream } from "../shared/streaming";
|
||||
import { logger } from "../logger";
|
||||
import { getUniqueIps } from "./rate-limit";
|
||||
import { RequestPreprocessor } from "./middleware/request";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import { ProxyReqMutator, RequestPreprocessor } from "./middleware/request";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { classifyErrorAndSend } from "./middleware/common";
|
||||
|
||||
const queue: Request[] = [];
|
||||
const log = logger.child({ module: "request-queue" });
|
||||
@@ -34,14 +36,12 @@ const log = logger.child({ module: "request-queue" });
|
||||
const USER_CONCURRENCY_LIMIT = parseInt(
|
||||
process.env.USER_CONCURRENCY_LIMIT ?? "1"
|
||||
);
|
||||
/** Maximum number of queue slots for Agnai.chat requests. */
|
||||
const AGNAI_CONCURRENCY_LIMIT = USER_CONCURRENCY_LIMIT * 5;
|
||||
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
|
||||
const MAX_HEARTBEAT_SIZE =
|
||||
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
|
||||
const HEARTBEAT_INTERVAL =
|
||||
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
|
||||
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
|
||||
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "150");
|
||||
const PAYLOAD_SCALE_FACTOR = parseFloat(
|
||||
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
|
||||
);
|
||||
@@ -68,9 +68,17 @@ const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
|
||||
getIdentifier(queued) === getIdentifier(incoming);
|
||||
|
||||
async function enqueue(req: Request) {
|
||||
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
||||
if (req.socket.destroyed || req.res?.writableEnded) {
|
||||
// In rare cases, a request can be disconnected after it is dequeued for a
|
||||
// retry, but before it is re-enqueued. In this case we may miss the abort
|
||||
// and the request will loop in the queue forever.
|
||||
req.log.warn("Attempt to enqueue aborted request.");
|
||||
throw new Error("Attempt to enqueue aborted request.");
|
||||
}
|
||||
|
||||
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT) {
|
||||
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
||||
// Do not apply concurrency limit to "special" users
|
||||
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT && req.user?.type !== "special") {
|
||||
throw new TooManyRequestsError(
|
||||
"Your IP or user token already has another request in the queue."
|
||||
);
|
||||
@@ -140,7 +148,14 @@ export function dequeue(partition: ModelFamily): Request | undefined {
|
||||
}
|
||||
|
||||
const req = modelQueue.reduce((prev, curr) =>
|
||||
prev.startTime < curr.startTime ? prev : curr
|
||||
prev.startTime +
|
||||
config.tokensPunishmentFactor *
|
||||
((prev.promptTokens ?? 0) + (prev.outputTokens ?? 0)) <
|
||||
curr.startTime +
|
||||
config.tokensPunishmentFactor *
|
||||
((curr.promptTokens ?? 0) + (curr.outputTokens ?? 0))
|
||||
? prev
|
||||
: curr
|
||||
);
|
||||
queue.splice(queue.indexOf(req), 1);
|
||||
|
||||
@@ -307,26 +322,35 @@ export function getQueueLength(partition: ModelFamily | "all" = "all") {
|
||||
}
|
||||
|
||||
export function createQueueMiddleware({
|
||||
beforeProxy,
|
||||
mutations = [],
|
||||
proxyMiddleware,
|
||||
}: {
|
||||
beforeProxy?: RequestPreprocessor;
|
||||
mutations?: ProxyReqMutator[];
|
||||
proxyMiddleware: Handler;
|
||||
}): Handler {
|
||||
return async (req, res, next) => {
|
||||
req.proceed = async () => {
|
||||
if (beforeProxy) {
|
||||
try {
|
||||
// Hack to let us run asynchronous middleware before the
|
||||
// http-proxy-middleware handler. This is used to sign AWS requests
|
||||
// before they are proxied, as the signing is asynchronous.
|
||||
// Unlike RequestPreprocessors, this runs every time the request is
|
||||
// dequeued, not just the first time.
|
||||
await beforeProxy(req);
|
||||
} catch (err) {
|
||||
return handleProxyError(err, req, res);
|
||||
// canonicalize the stream field which is set in a few places not always
|
||||
// consistently
|
||||
req.isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||
req.body.stream = req.isStreaming;
|
||||
|
||||
try {
|
||||
// Just before executing the proxyMiddleware, we will create a
|
||||
// ProxyReqManager to track modifications to the request. This allows
|
||||
// us to revert those changes if the proxied request fails with a
|
||||
// retryable error. That happens in proxyMiddleware's onProxyRes
|
||||
// handler.
|
||||
const changeManager = new ProxyReqManager(req);
|
||||
req.changeManager = changeManager;
|
||||
for (const mutator of mutations) {
|
||||
await mutator(changeManager);
|
||||
}
|
||||
} catch (err) {
|
||||
// Failure during request preparation is a fatal error.
|
||||
return classifyErrorAndSend(err, req, res);
|
||||
}
|
||||
|
||||
proxyMiddleware(req, res, next);
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,361 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { addKey, finalizeBody } from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import axios from "axios";
|
||||
import { QwenKey, keyPool } from "../shared/key-management";
|
||||
import {
|
||||
isQwenModel,
|
||||
isQwenThinkingModel,
|
||||
normalizeMessages,
|
||||
isQwen3Model,
|
||||
isThinkingVariant,
|
||||
isNonThinkingVariant,
|
||||
getBaseModelName
|
||||
} from "../shared/api-schemas/qwen";
|
||||
import { logger } from "../logger";
|
||||
|
||||
const log = logger.child({ module: "proxy", service: "qwen" });
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const qwenResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const getModelsResponse = async () => {
|
||||
// Return cache if less than 1 minute old
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get a Qwen key directly
|
||||
const modelToUse = "qwen-plus"; // Use any Qwen model here - just for key selection
|
||||
const qwenKey = keyPool.get(modelToUse, "qwen") as QwenKey;
|
||||
|
||||
if (!qwenKey || !qwenKey.key) {
|
||||
log.warn("No valid Qwen key available for model listing");
|
||||
throw new Error("No valid Qwen API key available");
|
||||
}
|
||||
|
||||
// Fetch models directly from Qwen API
|
||||
const response = await axios.get("https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${qwenKey.key}`
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.data || !response.data.data) {
|
||||
throw new Error("Unexpected response format from Qwen API");
|
||||
}
|
||||
|
||||
// Extract models
|
||||
const models = response.data;
|
||||
|
||||
// Ensure we have all known Qwen models in the list
|
||||
const knownQwenModels = [
|
||||
"qwen-max",
|
||||
"qwen-max-latest",
|
||||
"qwen-max-2025-01-25",
|
||||
"qwen-plus",
|
||||
"qwen-plus-latest",
|
||||
"qwen-plus-2025-01-25",
|
||||
"qwen-turbo",
|
||||
"qwen-turbo-latest",
|
||||
"qwen-turbo-2024-11-01",
|
||||
"qwen3-235b-a22b",
|
||||
"qwen3-32b",
|
||||
"qwen3-30b-a3b"
|
||||
];
|
||||
|
||||
// Add thinking capability flag to models that support it
|
||||
if (models.data && Array.isArray(models.data)) {
|
||||
// Create a set of existing model IDs for quick lookup
|
||||
const existingModelIds = new Set(models.data.map((model: any) => model.id));
|
||||
|
||||
// Filter out base Qwen3 models since we'll add variants instead
|
||||
models.data = models.data.filter((model: any) => {
|
||||
return !isQwen3Model(model.id) || isThinkingVariant(model.id) || isNonThinkingVariant(model.id);
|
||||
});
|
||||
|
||||
// Add any missing models from our known list
|
||||
knownQwenModels.forEach(modelId => {
|
||||
if (!existingModelIds.has(modelId)) {
|
||||
models.data.push({
|
||||
id: modelId,
|
||||
object: "model",
|
||||
created: Date.now(),
|
||||
owned_by: "qwen",
|
||||
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Add thinking capability flag to existing models
|
||||
const processedModelIds = new Set();
|
||||
const originalModelsData = [...models.data];
|
||||
|
||||
models.data = originalModelsData.flatMap((model: any) => {
|
||||
const modelId = model.id;
|
||||
processedModelIds.add(modelId);
|
||||
|
||||
// Apply capabilities to all models
|
||||
if (isQwenThinkingModel(modelId)) {
|
||||
model.capabilities = model.capabilities || {};
|
||||
model.capabilities.thinking = true;
|
||||
}
|
||||
|
||||
// For Qwen3 models, add thinking and non-thinking variants, but not the original
|
||||
if (isQwen3Model(modelId) &&
|
||||
!isThinkingVariant(modelId) &&
|
||||
!isNonThinkingVariant(modelId)) {
|
||||
|
||||
// Create thinking variant
|
||||
const thinkingModel = {
|
||||
id: `${modelId}-thinking`,
|
||||
object: "model",
|
||||
created: model.created || Date.now(),
|
||||
owned_by: model.owned_by || "qwen",
|
||||
capabilities: { thinking: true },
|
||||
proxy_managed: true,
|
||||
display_name: `${model.display_name || modelId} (Thinking Mode)`
|
||||
};
|
||||
|
||||
// Create non-thinking variant
|
||||
const nonThinkingModel = {
|
||||
id: `${modelId}-nonthinking`,
|
||||
object: "model",
|
||||
created: model.created || Date.now(),
|
||||
owned_by: model.owned_by || "qwen",
|
||||
capabilities: { thinking: true },
|
||||
proxy_managed: true,
|
||||
display_name: `${model.display_name || modelId} (Standard Mode)`
|
||||
};
|
||||
|
||||
// Only add variants, not the original model
|
||||
return [thinkingModel, nonThinkingModel];
|
||||
}
|
||||
|
||||
return [model];
|
||||
});
|
||||
} else {
|
||||
// If the API response didn't include models, create our own list
|
||||
models.data = knownQwenModels.flatMap(modelId => {
|
||||
// For Qwen3 models, add only thinking and non-thinking variants (not the base model)
|
||||
if (isQwen3Model(modelId) &&
|
||||
!isThinkingVariant(modelId) &&
|
||||
!isNonThinkingVariant(modelId)) {
|
||||
|
||||
return [
|
||||
{
|
||||
id: `${modelId}-thinking`,
|
||||
object: "model",
|
||||
created: Date.now(),
|
||||
owned_by: "qwen",
|
||||
capabilities: { thinking: true },
|
||||
proxy_managed: true,
|
||||
display_name: `${modelId} (Thinking Mode)`
|
||||
},
|
||||
{
|
||||
id: `${modelId}-nonthinking`,
|
||||
object: "model",
|
||||
created: Date.now(),
|
||||
owned_by: "qwen",
|
||||
capabilities: { thinking: true },
|
||||
proxy_managed: true,
|
||||
display_name: `${modelId} (Standard Mode)`
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
// For non-Qwen3 models, return the base model
|
||||
const baseModel = {
|
||||
id: modelId,
|
||||
object: "model",
|
||||
created: Date.now(),
|
||||
owned_by: "qwen",
|
||||
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
|
||||
};
|
||||
|
||||
return [baseModel];
|
||||
});
|
||||
}
|
||||
|
||||
log.debug({ modelCount: models.data?.length }, "Retrieved models from Qwen API");
|
||||
|
||||
// Cache the response
|
||||
modelsCache = models;
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return models;
|
||||
} catch (error) {
|
||||
// Provide detailed logging for better troubleshooting
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error fetching Qwen models"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error fetching Qwen models");
|
||||
}
|
||||
|
||||
// Return empty list as fallback
|
||||
return {
|
||||
object: "list",
|
||||
data: [],
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||
try {
|
||||
const models = await getModelsResponse();
|
||||
res.status(200).json(models);
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
log.error(
|
||||
{ errorMessage: error.message, stack: error.stack },
|
||||
"Error handling model request"
|
||||
);
|
||||
} else {
|
||||
log.error({ error }, "Unknown error handling model request");
|
||||
}
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
// Function to prepare messages for Qwen API
|
||||
function prepareMessages(req: Request) {
|
||||
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||
req.body.messages = normalizeMessages(req.body.messages);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to handle thinking capability for Qwen models
|
||||
function handleThinkingCapability(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// Special handling for our proxy-managed variants
|
||||
if (isThinkingVariant(model)) {
|
||||
// Set the base model name without the suffix
|
||||
req.body.model = getBaseModelName(model);
|
||||
// Force enable thinking for the -thinking variant
|
||||
req.body.enable_thinking = true;
|
||||
|
||||
// Log the transformation
|
||||
log.debug(
|
||||
{ originalModel: model, transformedModel: req.body.model, enableThinking: true },
|
||||
"Transformed request for thinking variant"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isNonThinkingVariant(model)) {
|
||||
// Set the base model name without the suffix
|
||||
req.body.model = getBaseModelName(model);
|
||||
// Force disable thinking for the -nonthinking variant
|
||||
req.body.enable_thinking = false;
|
||||
|
||||
// Log the transformation
|
||||
log.debug(
|
||||
{ originalModel: model, transformedModel: req.body.model, enableThinking: false },
|
||||
"Transformed request for non-thinking variant"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// For standard models with thinking capability
|
||||
if (isQwenThinkingModel(model) && req.body.stream === true) {
|
||||
// Only add enable_thinking if it's not already set
|
||||
if (req.body.enable_thinking === undefined) {
|
||||
req.body.enable_thinking = false; // Default to false, let users explicitly enable it
|
||||
}
|
||||
|
||||
// If thinking_budget is provided but enable_thinking is false, enable thinking
|
||||
if (req.body.thinking_budget !== undefined && req.body.enable_thinking === false) {
|
||||
req.body.enable_thinking = true;
|
||||
}
|
||||
} else if (isQwenThinkingModel(model) && req.body.stream !== true) {
|
||||
// For non-streaming requests with thinking-capable models, always disable thinking
|
||||
req.body.enable_thinking = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Function to remove parameters not supported by Qwen models
|
||||
function removeUnsupportedParameters(req: Request) {
|
||||
// Remove parameters that Qwen doesn't support
|
||||
if (req.body.logit_bias !== undefined) {
|
||||
delete req.body.logit_bias;
|
||||
}
|
||||
|
||||
if (req.body.top_logprobs !== undefined) {
|
||||
delete req.body.top_logprobs;
|
||||
}
|
||||
|
||||
// Logging for debugging
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
log.debug({ body: req.body }, "Request after parameter cleanup");
|
||||
}
|
||||
}
|
||||
|
||||
// Set up count token functionality for Qwen models
|
||||
function countQwenTokens(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
if (isQwenModel(model)) {
|
||||
// Count tokens using prompt tokens (simplified)
|
||||
if (req.promptTokens) {
|
||||
req.log.debug(
|
||||
{ tokens: req.promptTokens },
|
||||
"Estimated token count for Qwen prompt"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const qwenProxy = createQueuedProxyMiddleware({
|
||||
mutations: [
|
||||
addKey,
|
||||
finalizeBody
|
||||
],
|
||||
target: "https://dashscope-intl.aliyuncs.com/compatible-mode",
|
||||
blockingResponseHandler: qwenResponseHandler,
|
||||
});
|
||||
|
||||
const qwenRouter = Router();
|
||||
|
||||
qwenRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "qwen" },
|
||||
{ afterTransform: [ prepareMessages, handleThinkingCapability, removeUnsupportedParameters, countQwenTokens ] }
|
||||
),
|
||||
qwenProxy
|
||||
);
|
||||
|
||||
qwenRouter.post(
|
||||
"/v1/embeddings",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "qwen" },
|
||||
{ afterTransform: [] }
|
||||
),
|
||||
qwenProxy
|
||||
);
|
||||
|
||||
qwenRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
export const qwen = qwenRouter;
|
||||
@@ -10,6 +10,11 @@ import { googleAI } from "./google-ai";
|
||||
import { mistralAI } from "./mistral-ai";
|
||||
import { openai } from "./openai";
|
||||
import { openaiImage } from "./openai-image";
|
||||
import { deepseek } from "./deepseek";
|
||||
import { xai } from "./xai";
|
||||
import { cohere } from "./cohere";
|
||||
import { qwen } from "./qwen";
|
||||
import { moonshot } from "./moonshot";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
|
||||
const proxyRouter = express.Router();
|
||||
@@ -49,6 +54,11 @@ proxyRouter.use("/mistral-ai", addV1, mistralAI);
|
||||
proxyRouter.use("/aws", aws);
|
||||
proxyRouter.use("/gcp/claude", addV1, gcp);
|
||||
proxyRouter.use("/azure/openai", addV1, azure);
|
||||
proxyRouter.use("/deepseek", addV1, deepseek);
|
||||
proxyRouter.use("/xai", addV1, xai);
|
||||
proxyRouter.use("/cohere", addV1, cohere);
|
||||
proxyRouter.use("/qwen", addV1, qwen);
|
||||
proxyRouter.use("/moonshot", addV1, moonshot);
|
||||
|
||||
// Redirect browser requests to the homepage.
|
||||
proxyRouter.get("*", (req, res, next) => {
|
||||
|
||||
@@ -0,0 +1,394 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createPreprocessorMiddleware } from "./middleware/request";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { addKey, finalizeBody } from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import axios from "axios";
|
||||
import { XaiKey, keyPool } from "../shared/key-management";
|
||||
import { isGrokVisionModel, isGrokImageGenModel, isGrokReasoningModel, isGrokReasoningEffortModel, isGrokReasoningContentModel } from "../shared/api-schemas/xai";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const xaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
// Preserve the original body (including potential reasoning_content) for grok-3-mini models
|
||||
// which support the reasoning feature
|
||||
let newBody = body;
|
||||
|
||||
// Check if this is an image generation response (data array with url or b64_json)
|
||||
if (body.data && Array.isArray(body.data)) {
|
||||
req.log.debug(
|
||||
{ imageCount: body.data.length },
|
||||
"Grok image generation response detected"
|
||||
);
|
||||
|
||||
// Transform the image generation response into a chat completion format
|
||||
// that SillyTavern can display
|
||||
const images = body.data;
|
||||
|
||||
// Create a chat completion style response
|
||||
newBody = {
|
||||
id: `grok-image-${Date.now()}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: req.body.model,
|
||||
choices: images.map((image, index) => {
|
||||
// Create markdown image content for each generated image
|
||||
let content = '';
|
||||
|
||||
// Add the image using data URL for b64_json
|
||||
if (image.b64_json) {
|
||||
// If it doesn't start with data:image/, add the prefix
|
||||
const imgData = image.b64_json.startsWith('data:image/')
|
||||
? image.b64_json
|
||||
: `data:image/jpeg;base64,${image.b64_json}`;
|
||||
|
||||
content = ``;
|
||||
}
|
||||
// Fall back to URL if b64_json isn't available
|
||||
else if (image.url) {
|
||||
content = ``;
|
||||
}
|
||||
|
||||
return {
|
||||
index,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content
|
||||
},
|
||||
finish_reason: "stop"
|
||||
};
|
||||
}),
|
||||
usage: body.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
|
||||
};
|
||||
|
||||
req.log.debug("Transformed image generation response to chat format");
|
||||
}
|
||||
// Check if this is a chat completion response with choices
|
||||
else if (body.choices && Array.isArray(body.choices) && body.choices.length > 0) {
|
||||
// Make sure each choice's message is preserved, especially reasoning_content
|
||||
// Only grok-3-mini models return reasoning_content
|
||||
const model = req.body.model;
|
||||
if (isGrokReasoningContentModel(model)) {
|
||||
body.choices.forEach(choice => {
|
||||
if (choice.message && choice.message.reasoning_content) {
|
||||
req.log.debug(
|
||||
{ reasoning_length: choice.message.reasoning_content.length },
|
||||
"Grok reasoning content detected"
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const getModelsResponse = async () => {
|
||||
// Return cache if less than 1 minute old
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get an XAI key directly using keyPool.get()
|
||||
const modelToUse = "grok-3"; // Use any XAI model here - just for key selection
|
||||
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
|
||||
|
||||
if (!xaiKey || !xaiKey.key) {
|
||||
throw new Error("Failed to get valid XAI key");
|
||||
}
|
||||
|
||||
// Fetch models from XAI API with authorization
|
||||
const response = await axios.get("https://api.x.ai/v1/models", {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${xaiKey.key}`
|
||||
},
|
||||
});
|
||||
|
||||
// If successful, update the cache
|
||||
if (response.data && response.data.data) {
|
||||
modelsCache = {
|
||||
object: "list",
|
||||
data: response.data.data.map((model: any) => ({
|
||||
id: model.id,
|
||||
object: "model",
|
||||
owned_by: "xai",
|
||||
})),
|
||||
};
|
||||
} else {
|
||||
throw new Error("Unexpected response format from XAI API");
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error fetching XAI models:", error);
|
||||
throw error; // No fallback - error will be passed to caller
|
||||
}
|
||||
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return modelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = async (_req, res) => {
|
||||
try {
|
||||
const modelsResponse = await getModelsResponse();
|
||||
res.status(200).json(modelsResponse);
|
||||
} catch (error) {
|
||||
console.error("Error in handleModelRequest:", error);
|
||||
res.status(500).json({ error: "Failed to fetch models" });
|
||||
}
|
||||
};
|
||||
|
||||
const xaiProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKey, finalizeBody],
|
||||
target: "https://api.x.ai",
|
||||
blockingResponseHandler: xaiResponseHandler,
|
||||
});
|
||||
|
||||
const xaiRouter = Router();
|
||||
|
||||
// combines all the assistant messages at the end of the context and adds the
|
||||
// beta 'prefix' option, makes prefills work the same way they work for Claude
|
||||
function enablePrefill(req: Request) {
|
||||
// If you want to disable
|
||||
if (process.env.NO_XAI_PREFILL) return
|
||||
|
||||
// Skip if no messages (e.g., for image generation requests)
|
||||
if (!req.body.messages || !Array.isArray(req.body.messages)) return;
|
||||
|
||||
const msgs = req.body.messages;
|
||||
if (msgs.length === 0 || msgs.at(-1)?.role !== 'assistant') return;
|
||||
|
||||
let i = msgs.length - 1;
|
||||
let content = '';
|
||||
|
||||
while (i >= 0 && msgs[i].role === 'assistant') {
|
||||
// maybe we should also add a newline between messages? no for now.
|
||||
content = msgs[i--].content + content;
|
||||
}
|
||||
|
||||
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
|
||||
}
|
||||
|
||||
// Function to redirect image model requests to the image generations endpoint
|
||||
function redirectImageRequests(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// If this is an image generation model but the endpoint is chat/completions,
|
||||
// we need to transform the request to match the image generations endpoint format
|
||||
if (isGrokImageGenModel(model) && req.path === "/v1/chat/completions") {
|
||||
req.log.info(`Redirecting ${model} request to /v1/images/generations endpoint`);
|
||||
|
||||
// Save original URL and path for later
|
||||
const originalUrl = req.url;
|
||||
const originalPath = req.path;
|
||||
|
||||
// Change the request URL and path to the images endpoint
|
||||
req.url = req.url.replace("/v1/chat/completions", "/v1/images/generations");
|
||||
Object.defineProperty(req, 'path', { value: "/v1/images/generations" });
|
||||
|
||||
// Extract the prompt from the messages if present
|
||||
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||
// Find the last user message and use its content as the prompt
|
||||
for (let i = req.body.messages.length - 1; i >= 0; i--) {
|
||||
const msg = req.body.messages[i];
|
||||
if (msg.role === 'user') {
|
||||
// Extract text content
|
||||
let prompt = "";
|
||||
if (typeof msg.content === 'string') {
|
||||
prompt = msg.content;
|
||||
} else if (Array.isArray(msg.content)) {
|
||||
// Collect all text content items
|
||||
prompt = msg.content
|
||||
.filter((item: any) => item.type === 'text')
|
||||
.map((item: any) => item.text)
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
if (prompt) {
|
||||
// Create a new request body for image generation
|
||||
req.body = {
|
||||
model: model,
|
||||
prompt: prompt,
|
||||
n: req.body.n || 1,
|
||||
response_format: "b64_json", // Always use b64_json for better client compatibility
|
||||
user: req.body.user
|
||||
};
|
||||
req.log.debug({ newBody: req.body }, "Transformed request for image generation");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log transformation
|
||||
req.log.info(`Request transformed from ${originalUrl} to ${req.url}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to remove parameters not supported by X.AI/Grok models and handle special cases
|
||||
function removeUnsupportedParameters(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// Check if this is a reasoning model (grok-3-mini or grok-4-0709)
|
||||
const isReasoningModel = isGrokReasoningModel(model);
|
||||
const isReasoningEffortModel = isGrokReasoningEffortModel(model);
|
||||
|
||||
if (isReasoningModel) {
|
||||
// List of parameters not supported by reasoning models
|
||||
const unsupportedParams = [
|
||||
'presence_penalty',
|
||||
'frequency_penalty',
|
||||
'stop' // stop parameter is not supported by reasoning models
|
||||
];
|
||||
|
||||
for (const param of unsupportedParams) {
|
||||
if (req.body[param] !== undefined) {
|
||||
req.log.info(`Removing unsupported parameter for reasoning model ${model}: ${param}`);
|
||||
delete req.body[param];
|
||||
}
|
||||
}
|
||||
|
||||
// Handle reasoning_effort parameter - only supported by grok-3-mini
|
||||
if (isReasoningEffortModel) {
|
||||
// This is grok-3-mini, handle reasoning_effort
|
||||
if (req.body.reasoning_effort) {
|
||||
// If reasoning_effort is already present in the request, validate it
|
||||
if (!['low', 'medium', 'high'].includes(req.body.reasoning_effort)) {
|
||||
req.log.warn(`Invalid reasoning_effort value: ${req.body.reasoning_effort}, removing it`);
|
||||
delete req.body.reasoning_effort;
|
||||
}
|
||||
} else {
|
||||
// Default to low reasoning effort if not specified
|
||||
req.body.reasoning_effort = 'low';
|
||||
req.log.debug(`Setting default reasoning_effort=low for Grok-3-mini model`);
|
||||
}
|
||||
} else {
|
||||
// This is grok-4-0709 or other reasoning model that doesn't support reasoning_effort
|
||||
if (req.body.reasoning_effort !== undefined) {
|
||||
req.log.info(`Removing unsupported reasoning_effort parameter for model ${model}`);
|
||||
delete req.body.reasoning_effort;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Special handling for vision models
|
||||
if (isGrokVisionModel(model)) {
|
||||
req.log.debug(`Detected Grok vision model: ${model}`);
|
||||
|
||||
// Check that messages have proper format for vision models
|
||||
if (req.body.messages && Array.isArray(req.body.messages)) {
|
||||
req.body.messages.forEach((msg: { content: string | any[] }) => {
|
||||
// If content is a string but the model is vision-capable,
|
||||
// convert it to an array with a single text item for consistency
|
||||
if (typeof msg.content === 'string') {
|
||||
req.log.debug('Converting string content to array format for vision model');
|
||||
msg.content = [{ type: 'text', text: msg.content }];
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Special handling for image generation models is handled by separate endpoint
|
||||
}
|
||||
|
||||
// Handler for image generation requests
|
||||
const handleImageGenerationRequest: RequestHandler = async (req, res) => {
|
||||
try {
|
||||
// Get an XAI key directly for image generation
|
||||
const modelToUse = req.body.model || "grok-2-image"; // Default model
|
||||
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
|
||||
|
||||
if (!xaiKey || !xaiKey.key) {
|
||||
throw new Error("Failed to get valid XAI key for image generation");
|
||||
}
|
||||
|
||||
// Forward the request to XAI API
|
||||
const response = await axios.post("https://api.x.ai/v1/images/generations", req.body, {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${xaiKey.key}`
|
||||
},
|
||||
});
|
||||
|
||||
// Return the response directly
|
||||
res.status(200).json(response.data);
|
||||
} catch (error) {
|
||||
req.log.error({ error }, "Error in image generation request");
|
||||
// Pass through the error response if available
|
||||
if (error.response && error.response.data) {
|
||||
res.status(error.response.status || 500).json(error.response.data);
|
||||
} else {
|
||||
res.status(500).json({ error: "Failed to generate image", message: error.message });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Set up count token functionality for XAI models
|
||||
function countXaiTokens(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// For vision models, estimate image token usage
|
||||
if (isGrokVisionModel(model) && req.body.messages && Array.isArray(req.body.messages)) {
|
||||
// Initialize image count
|
||||
let imageCount = 0;
|
||||
|
||||
// Count images in the request
|
||||
for (const msg of req.body.messages) {
|
||||
if (Array.isArray(msg.content)) {
|
||||
const imagesInMessage = msg.content.filter(
|
||||
(item: any) => item.type === "image_url"
|
||||
).length;
|
||||
imageCount += imagesInMessage;
|
||||
}
|
||||
}
|
||||
|
||||
// Apply token estimations for images
|
||||
// Each image is approximately 1500 tokens based on documentation
|
||||
const TOKENS_PER_IMAGE = 1500;
|
||||
const imageTokens = imageCount * TOKENS_PER_IMAGE;
|
||||
|
||||
if (imageTokens > 0) {
|
||||
req.log.debug(
|
||||
{ imageCount, tokenEstimate: imageTokens },
|
||||
"Estimated token count for Grok vision images"
|
||||
);
|
||||
|
||||
// Add the image tokens to the existing token count if available
|
||||
if (req.promptTokens) {
|
||||
req.promptTokens += imageTokens;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
xaiRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "xai" },
|
||||
{ afterTransform: [ redirectImageRequests, enablePrefill, removeUnsupportedParameters, countXaiTokens ] }
|
||||
),
|
||||
xaiProxy
|
||||
);
|
||||
|
||||
// Add endpoint for image generation
|
||||
xaiRouter.post(
|
||||
"/v1/images/generations",
|
||||
ipLimiter,
|
||||
handleImageGenerationRequest
|
||||
);
|
||||
|
||||
xaiRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
export const xai = xaiRouter;
|
||||
+9
-2
@@ -23,6 +23,7 @@ import { init as initTokenizers } from "./shared/tokenization";
|
||||
import { checkOrigin } from "./proxy/check-origin";
|
||||
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
|
||||
import { initializeDatabase, getDatabase } from "./shared/database";
|
||||
import { initializeFirebase } from "./shared/firebase";
|
||||
|
||||
const PORT = config.port;
|
||||
const BIND_ADDRESS = config.bindAddress;
|
||||
@@ -91,7 +92,7 @@ app.use("/admin", adminRouter);
|
||||
app.use((req, _, next) => {
|
||||
// For whatever reason SillyTavern just ignores the path a user provides
|
||||
// when using Google AI with reverse proxy. We'll fix it here.
|
||||
if (req.path.startsWith("/v1beta/models/")) {
|
||||
if (req.path.match(/^\/v1(alpha|beta)\/models(\/|$)/)) {
|
||||
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
|
||||
return next();
|
||||
}
|
||||
@@ -137,6 +138,12 @@ async function start() {
|
||||
logger.info("Checking configs and external dependencies...");
|
||||
await assertConfigIsValid();
|
||||
|
||||
if (config.gatekeeperStore.startsWith("firebase")) {
|
||||
logger.info("Testing Firebase connection...");
|
||||
await initializeFirebase();
|
||||
logger.info("Firebase connection successful.");
|
||||
}
|
||||
|
||||
keyPool.init();
|
||||
|
||||
await initTokenizers();
|
||||
@@ -166,7 +173,7 @@ async function start() {
|
||||
app.listen(PORT, BIND_ADDRESS, () => {
|
||||
logger.info(
|
||||
{ port: PORT, interface: BIND_ADDRESS },
|
||||
"Now listening for connections."
|
||||
"Server ready to accept connections."
|
||||
);
|
||||
registerUncaughtExceptionHandler();
|
||||
});
|
||||
|
||||
+328
-32
@@ -2,9 +2,14 @@ import { config, listConfig } from "./config";
|
||||
import {
|
||||
AnthropicKey,
|
||||
AwsBedrockKey,
|
||||
DeepseekKey,
|
||||
GcpKey,
|
||||
keyPool,
|
||||
OpenAIKey,
|
||||
XaiKey,
|
||||
CohereKey,
|
||||
QwenKey,
|
||||
MoonshotKey,
|
||||
} from "./shared/key-management";
|
||||
import {
|
||||
AnthropicModelFamily,
|
||||
@@ -19,6 +24,11 @@ import {
|
||||
MODEL_FAMILY_SERVICE,
|
||||
ModelFamily,
|
||||
OpenAIModelFamily,
|
||||
DeepseekModelFamily,
|
||||
XaiModelFamily,
|
||||
CohereModelFamily,
|
||||
QwenModelFamily,
|
||||
MoonshotModelFamily,
|
||||
} from "./shared/models";
|
||||
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
|
||||
import { getUniqueIps } from "./proxy/rate-limit";
|
||||
@@ -27,6 +37,87 @@ import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
|
||||
|
||||
const CACHE_TTL = 2000;
|
||||
|
||||
// Define the preferred order for model families in the service info display
|
||||
// This ensures logical grouping (GPT-4 models together, then GPT-4.1, then GPT-5, etc.)
|
||||
const MODEL_FAMILY_ORDER: ModelFamily[] = [
|
||||
// OpenAI models in logical order
|
||||
"turbo",
|
||||
"gpt4",
|
||||
"gpt4-32k",
|
||||
"gpt4-turbo",
|
||||
"gpt4o",
|
||||
"gpt41",
|
||||
"gpt41-mini",
|
||||
"gpt41-nano",
|
||||
"gpt45",
|
||||
"gpt5",
|
||||
"gpt5-mini",
|
||||
"gpt5-nano",
|
||||
"gpt5-chat-latest",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-pro",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
"o3-pro",
|
||||
"o4-mini",
|
||||
"codex-mini",
|
||||
"dall-e",
|
||||
"gpt-image",
|
||||
// Azure OpenAI models (same order as OpenAI)
|
||||
"azure-turbo",
|
||||
"azure-gpt4",
|
||||
"azure-gpt4-32k",
|
||||
"azure-gpt4-turbo",
|
||||
"azure-gpt4o",
|
||||
"azure-gpt41",
|
||||
"azure-gpt41-mini",
|
||||
"azure-gpt41-nano",
|
||||
"azure-gpt45",
|
||||
"azure-gpt5",
|
||||
"azure-gpt5-mini",
|
||||
"azure-gpt5-nano",
|
||||
"azure-gpt5-chat-latest",
|
||||
"azure-o1",
|
||||
"azure-o1-mini",
|
||||
"azure-o1-pro",
|
||||
"azure-o3",
|
||||
"azure-o3-mini",
|
||||
"azure-o3-pro",
|
||||
"azure-o4-mini",
|
||||
"azure-codex-mini",
|
||||
"azure-dall-e",
|
||||
"azure-gpt-image",
|
||||
// Anthropic models
|
||||
"claude",
|
||||
"claude-opus",
|
||||
// Google AI models
|
||||
"gemini-flash",
|
||||
"gemini-pro",
|
||||
"gemini-ultra",
|
||||
// Mistral AI models
|
||||
"mistral-tiny",
|
||||
"mistral-small",
|
||||
"mistral-medium",
|
||||
"mistral-large",
|
||||
// AWS Bedrock models
|
||||
"aws-claude",
|
||||
"aws-claude-opus",
|
||||
"aws-mistral-tiny",
|
||||
"aws-mistral-small",
|
||||
"aws-mistral-medium",
|
||||
"aws-mistral-large",
|
||||
// GCP models
|
||||
"gcp-claude",
|
||||
"gcp-claude-opus",
|
||||
// Other services
|
||||
"deepseek",
|
||||
"xai",
|
||||
"cohere",
|
||||
"qwen",
|
||||
"moonshot"
|
||||
];
|
||||
|
||||
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
|
||||
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
|
||||
k.service === "openai";
|
||||
@@ -34,6 +125,16 @@ const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
|
||||
k.service === "anthropic";
|
||||
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
|
||||
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
|
||||
const keyIsDeepseekKey = (k: KeyPoolKey): k is DeepseekKey =>
|
||||
k.service === "deepseek";
|
||||
const keyIsXaiKey = (k: KeyPoolKey): k is XaiKey =>
|
||||
k.service === "xai";
|
||||
const keyIsCohereKey = (k: KeyPoolKey): k is CohereKey =>
|
||||
k.service === "cohere";
|
||||
const keyIsQwenKey = (k: KeyPoolKey): k is QwenKey =>
|
||||
k.service === "qwen";
|
||||
const keyIsMoonshotKey = (k: KeyPoolKey): k is MoonshotKey =>
|
||||
k.service === "moonshot";
|
||||
|
||||
/** Stats aggregated across all keys for a given service. */
|
||||
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
|
||||
@@ -49,19 +150,27 @@ type ModelAggregates = {
|
||||
awsClaude2?: number;
|
||||
awsSonnet3?: number;
|
||||
awsSonnet3_5?: number;
|
||||
awsSonnet3_7?: number;
|
||||
awsSonnet4?: number;
|
||||
awsOpus3?: number;
|
||||
awsOpus4?: number;
|
||||
awsHaiku: number;
|
||||
gcpSonnet?: number;
|
||||
gcpSonnet35?: number;
|
||||
gcpHaiku?: number;
|
||||
queued: number;
|
||||
tokens: number;
|
||||
inputTokens: number; // Changed from tokens
|
||||
outputTokens: number; // Added
|
||||
legacyTokens?: number; // Added for migrated totals
|
||||
};
|
||||
/** All possible combinations of model family and aggregate type. */
|
||||
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
|
||||
|
||||
type AllStats = {
|
||||
proompts: number;
|
||||
tokens: number;
|
||||
inputTokens: number; // Changed from tokens
|
||||
outputTokens: number; // Added
|
||||
legacyTokens?: number; // Added
|
||||
tokenCost: number;
|
||||
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
|
||||
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
|
||||
@@ -96,6 +205,8 @@ export type ServiceInfo = {
|
||||
uptime: number;
|
||||
endpoints: {
|
||||
openai?: string;
|
||||
deepseek?: string;
|
||||
xai?: string;
|
||||
anthropic?: string;
|
||||
"google-ai"?: string;
|
||||
"mistral-ai"?: string;
|
||||
@@ -116,8 +227,13 @@ export type ServiceInfo = {
|
||||
& { [f in AwsBedrockModelFamily]?: AwsInfo }
|
||||
& { [f in GcpModelFamily]?: GcpInfo }
|
||||
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
|
||||
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
|
||||
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo & { overQuotaKeys?: number } }
|
||||
& { [f in MistralAIModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in DeepseekModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in XaiModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in CohereModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in QwenModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in MoonshotModelFamily]?: BaseFamilyInfo };
|
||||
|
||||
// https://stackoverflow.com/a/66661477
|
||||
// type DeepKeyOf<T> = (
|
||||
@@ -159,6 +275,21 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
||||
azure: `%BASE%/azure/openai`,
|
||||
"azure-image": `%BASE%/azure/openai`,
|
||||
},
|
||||
deepseek: {
|
||||
deepseek: `%BASE%/deepseek`,
|
||||
},
|
||||
xai: {
|
||||
xai: `%BASE%/xai`,
|
||||
},
|
||||
cohere: {
|
||||
cohere: `%BASE%/cohere`,
|
||||
},
|
||||
qwen: {
|
||||
qwen: `%BASE%/qwen`,
|
||||
},
|
||||
moonshot: {
|
||||
moonshot: `%BASE%/moonshot`,
|
||||
},
|
||||
};
|
||||
|
||||
const familyStats = new Map<ModelAggregateKey, number>();
|
||||
@@ -250,11 +381,14 @@ function getEndpoints(baseUrl: string, accessibleFamilies: Set<ModelFamily>) {
|
||||
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
|
||||
|
||||
function getTrafficStats(): TrafficStats {
|
||||
const tokens = serviceStats.get("tokens") || 0;
|
||||
const inputTokens = serviceStats.get("inputTokens") || 0;
|
||||
const outputTokens = serviceStats.get("outputTokens") || 0;
|
||||
// const legacyTokens = serviceStats.get("legacyTokens") || 0; // Optional: include in total if desired
|
||||
const totalTokens = inputTokens + outputTokens; // + legacyTokens;
|
||||
const tokenCost = serviceStats.get("tokenCost") || 0;
|
||||
return {
|
||||
proompts: serviceStats.get("proompts") || 0,
|
||||
tookens: `${prettyTokens(tokens)}${getCostSuffix(tokenCost)}`,
|
||||
tookens: `${prettyTokens(totalTokens)}${getCostSuffix(tokenCost)}`, // Simplified to show aggregate and cost
|
||||
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
|
||||
};
|
||||
}
|
||||
@@ -270,16 +404,18 @@ function getServiceModelStats(accessibleFamilies: Set<ModelFamily>) {
|
||||
if (!hasKeys) continue;
|
||||
|
||||
serviceInfo[`${service}Keys`] = hasKeys;
|
||||
accessibleFamilies.forEach((f) => {
|
||||
if (MODEL_FAMILY_SERVICE[f] === service) {
|
||||
modelFamilyInfo[f] = getInfoForFamily(f);
|
||||
}
|
||||
});
|
||||
|
||||
if (service === "openai" && config.checkKeys) {
|
||||
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
|
||||
}
|
||||
}
|
||||
|
||||
// Build model family info in the defined order for logical grouping
|
||||
for (const family of MODEL_FAMILY_ORDER) {
|
||||
if (accessibleFamilies.has(family)) {
|
||||
modelFamilyInfo[family] = getInfoForFamily(family);
|
||||
}
|
||||
}
|
||||
return { serviceInfo, modelFamilyInfo };
|
||||
}
|
||||
|
||||
@@ -309,15 +445,45 @@ function addKeyToAggregates(k: KeyPoolKey) {
|
||||
addToService("aws__keys", k.service === "aws" ? 1 : 0);
|
||||
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
|
||||
addToService("azure__keys", k.service === "azure" ? 1 : 0);
|
||||
addToService("deepseek__keys", k.service === "deepseek" ? 1 : 0);
|
||||
addToService("xai__keys", k.service === "xai" ? 1 : 0);
|
||||
addToService("cohere__keys", k.service === "cohere" ? 1 : 0);
|
||||
addToService("qwen__keys", k.service === "qwen" ? 1 : 0);
|
||||
addToService("moonshot__keys", k.service === "moonshot" ? 1 : 0);
|
||||
|
||||
let sumTokens = 0;
|
||||
let sumInputTokens = 0;
|
||||
let sumOutputTokens = 0;
|
||||
let sumLegacyTokens = 0; // Optional
|
||||
let sumCost = 0;
|
||||
|
||||
const incrementGenericFamilyStats = (f: ModelFamily) => {
|
||||
const tokens = (k as any)[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
addToFamily(`${f}__tokens`, tokens);
|
||||
const usage = k.tokenUsage?.[f];
|
||||
let familyInputTokens = 0;
|
||||
let familyOutputTokens = 0;
|
||||
let familyLegacyTokens = 0;
|
||||
|
||||
if (usage) {
|
||||
familyInputTokens = usage.input || 0;
|
||||
familyOutputTokens = usage.output || 0;
|
||||
if (usage.legacy_total && familyInputTokens === 0 && familyOutputTokens === 0) {
|
||||
// This is a migrated key with no new usage, use legacy_total as input for cost
|
||||
familyLegacyTokens = usage.legacy_total;
|
||||
sumCost += getTokenCostUsd(f, usage.legacy_total, 0);
|
||||
} else {
|
||||
sumCost += getTokenCostUsd(f, familyInputTokens, familyOutputTokens);
|
||||
}
|
||||
}
|
||||
// If no k.tokenUsage[f], tokens are 0, cost is 0.
|
||||
|
||||
sumInputTokens += familyInputTokens;
|
||||
sumOutputTokens += familyOutputTokens;
|
||||
sumLegacyTokens += familyLegacyTokens; // Optional
|
||||
|
||||
addToFamily(`${f}__inputTokens`, familyInputTokens);
|
||||
addToFamily(`${f}__outputTokens`, familyOutputTokens);
|
||||
if (familyLegacyTokens > 0) {
|
||||
addToFamily(`${f}__legacyTokens`, familyLegacyTokens); // Optional
|
||||
}
|
||||
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
|
||||
};
|
||||
@@ -351,10 +517,21 @@ function addKeyToAggregates(k: KeyPoolKey) {
|
||||
k.modelIds.forEach((id) => {
|
||||
if (id.includes("claude-3-sonnet")) {
|
||||
addToFamily(`aws-claude__awsSonnet3`, 1);
|
||||
// not ideal but whatever
|
||||
} else if (id.includes("claude-3-5-sonnet")) {
|
||||
addToFamily(`aws-claude__awsSonnet3_5`, 1);
|
||||
} else if (id.includes("claude-3-7-sonnet")) {
|
||||
addToFamily(`aws-claude__awsSonnet3_7`, 1);
|
||||
} else if (id.includes("claude-3-haiku")) {
|
||||
addToFamily(`aws-claude__awsHaiku`, 1);
|
||||
} else if (id.includes("sonnet-4")) {
|
||||
addToFamily(`aws-claude__awsSonnet4`, 1);
|
||||
} else if (id.includes("claude-3-opus")) {
|
||||
addToFamily(`aws-claude__awsOpus3`, 1);
|
||||
addToFamily(`aws-claude-opus__awsOpus3`, 1);
|
||||
} else if (id.includes("opus-4")) {
|
||||
addToFamily(`aws-claude__awsOpus4`, 1);
|
||||
addToFamily(`aws-claude-opus__awsOpus4`, 1);
|
||||
} else if (id.includes("claude-v2")) {
|
||||
addToFamily(`aws-claude__awsClaude2`, 1);
|
||||
}
|
||||
@@ -372,25 +549,111 @@ function addKeyToAggregates(k: KeyPoolKey) {
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
// TODO: add modelIds to GcpKey
|
||||
break;
|
||||
case "deepseek":
|
||||
if (!keyIsDeepseekKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
});
|
||||
break;
|
||||
case "xai":
|
||||
if (!keyIsXaiKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
if ('isOverQuota' in k) {
|
||||
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
}
|
||||
});
|
||||
break;
|
||||
case "cohere":
|
||||
if (!keyIsCohereKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
if ('isOverQuota' in k) {
|
||||
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
}
|
||||
});
|
||||
break;
|
||||
// These services don't have any additional stats to track.
|
||||
case "azure":
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
break;
|
||||
case "google-ai":
|
||||
// Cast to GoogleAIKey to access GoogleAI-specific properties
|
||||
const googleKey = k as unknown as { overQuotaFamilies?: string[] };
|
||||
|
||||
// First handle general stats for all model families
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
});
|
||||
|
||||
// Create a set of model families that are over quota for this key
|
||||
let overQuotaModelFamilies = new Set<string>();
|
||||
|
||||
// Add any model family that's listed in overQuotaFamilies
|
||||
if (googleKey.overQuotaFamilies && Array.isArray(googleKey.overQuotaFamilies)) {
|
||||
googleKey.overQuotaFamilies.forEach(family => {
|
||||
overQuotaModelFamilies.add(family);
|
||||
});
|
||||
}
|
||||
// If key is generally over quota and we don't have specific families, add all families
|
||||
else if ('isOverQuota' in k && k.isOverQuota) {
|
||||
k.modelFamilies.forEach(family => {
|
||||
overQuotaModelFamilies.add(family);
|
||||
});
|
||||
}
|
||||
|
||||
// Now increment the over-quota counter for each affected family
|
||||
// These model families are valid and already defined in the enum
|
||||
overQuotaModelFamilies.forEach(family => {
|
||||
if (family === 'gemini-pro' || family === 'gemini-flash' || family === 'gemini-ultra') {
|
||||
addToFamily(`${family}__overQuota` as any, 1);
|
||||
}
|
||||
});
|
||||
break;
|
||||
case "qwen":
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
break;
|
||||
case "moonshot":
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
break;
|
||||
default:
|
||||
assertNever(k.service);
|
||||
}
|
||||
|
||||
addToService("tokens", sumTokens);
|
||||
addToService("inputTokens", sumInputTokens);
|
||||
addToService("outputTokens", sumOutputTokens);
|
||||
if (sumLegacyTokens > 0) { // Optional
|
||||
addToService("legacyTokens", sumLegacyTokens);
|
||||
}
|
||||
addToService("tokenCost", sumCost);
|
||||
}
|
||||
|
||||
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
const tokens = familyStats.get(`${family}__tokens`) || 0;
|
||||
const cost = getTokenCostUsd(family, tokens);
|
||||
const inputTokens = familyStats.get(`${family}__inputTokens`) || 0;
|
||||
const outputTokens = familyStats.get(`${family}__outputTokens`) || 0;
|
||||
const legacyTokens = familyStats.get(`${family}__legacyTokens`) || 0; // Optional
|
||||
|
||||
let cost = 0;
|
||||
let displayTokens = 0;
|
||||
let usageString = "";
|
||||
|
||||
if (inputTokens > 0 || outputTokens > 0) {
|
||||
cost = getTokenCostUsd(family, inputTokens, outputTokens);
|
||||
displayTokens = inputTokens + outputTokens;
|
||||
usageString = `${prettyTokens(displayTokens)} (In: ${prettyTokens(inputTokens)}, Out: ${prettyTokens(outputTokens)})${getCostSuffix(cost)}`;
|
||||
} else if (legacyTokens > 0) {
|
||||
// Only show legacy if no new input/output has been recorded for this family aggregate
|
||||
cost = getTokenCostUsd(family, legacyTokens, 0); // Cost legacy as all input
|
||||
displayTokens = legacyTokens;
|
||||
usageString = `${prettyTokens(displayTokens)} tokens (legacy total)${getCostSuffix(cost)}`;
|
||||
} else {
|
||||
usageString = `${prettyTokens(0)} tokens${getCostSuffix(0)}`;
|
||||
}
|
||||
|
||||
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
|
||||
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
|
||||
usage: usageString,
|
||||
activeKeys: familyStats.get(`${family}__active`) || 0,
|
||||
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
|
||||
};
|
||||
@@ -418,25 +681,40 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
break;
|
||||
case "aws":
|
||||
if (family === "aws-claude") {
|
||||
// Original behavior: get logged count from the same family
|
||||
const logged = familyStats.get(`${family}__awsLogged`) || 0;
|
||||
const variants = new Set<string>();
|
||||
if (familyStats.get(`${family}__awsClaude2`) || 0)
|
||||
variants.add("claude2");
|
||||
if (familyStats.get(`${family}__awsSonnet3`) || 0)
|
||||
variants.add("sonnet3");
|
||||
if (familyStats.get(`${family}__awsSonnet3_5`) || 0)
|
||||
variants.add("sonnet3.5");
|
||||
if (familyStats.get(`${family}__awsHaiku`) || 0)
|
||||
variants.add("haiku");
|
||||
info.enabledVariants = variants.size
|
||||
? `${Array.from(variants).join(",")}`
|
||||
: undefined;
|
||||
if (familyStats.get(`${family}__awsClaude2`) || 0) variants.add("claude2");
|
||||
if (familyStats.get(`${family}__awsSonnet3`) || 0) variants.add("sonnet3");
|
||||
if (familyStats.get(`${family}__awsSonnet3_5`) || 0) variants.add("sonnet3.5");
|
||||
if (familyStats.get(`${family}__awsSonnet3_7`) || 0) variants.add("sonnet3.7");
|
||||
if (familyStats.get(`${family}__awsHaiku`) || 0) variants.add("haiku");
|
||||
if (familyStats.get(`${family}__awsSonnet4`) || 0) variants.add("sonnet4");
|
||||
|
||||
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
|
||||
|
||||
if (logged > 0) {
|
||||
info.privacy = config.allowAwsLogging
|
||||
? `AWS logging verification inactive. Prompts could be logged.`
|
||||
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
|
||||
}
|
||||
} else if (family === "aws-claude-opus") {
|
||||
// Get logging info from aws-claude family since that's where it's collected
|
||||
const awsLogged = familyStats.get(`aws-claude__awsLogged`) || 0;
|
||||
const variants = new Set<string>();
|
||||
if (familyStats.get(`${family}__awsOpus3`) || 0) variants.add("opus3");
|
||||
if (familyStats.get(`${family}__awsOpus4`) || 0) variants.add("opus4");
|
||||
|
||||
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
|
||||
|
||||
// Show privacy warning for Opus if there are active Opus keys AND some AWS keys are logged
|
||||
if (awsLogged > 0 && info.activeKeys > 0) {
|
||||
info.privacy = config.allowAwsLogging
|
||||
? `AWS logging verification inactive. Prompts could be logged.`
|
||||
: `Some AWS keys are potentially logged. Set ALLOW_AWS_LOGGING=true to override.`;
|
||||
}
|
||||
}
|
||||
// TODO: Consider if aws-mistral-* families need similar enabledVariant listings
|
||||
break;
|
||||
case "gcp":
|
||||
if (family === "gcp-claude") {
|
||||
@@ -444,6 +722,24 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
info.enabledVariants = "not implemented";
|
||||
}
|
||||
break;
|
||||
case "deepseek":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
case "xai":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
case "cohere":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
case "google-ai":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
case "qwen":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
case "moonshot":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,13 @@ const AnthropicV1BaseSchema = z
|
||||
top_k: z.coerce.number().optional(),
|
||||
top_p: z.coerce.number().optional(),
|
||||
metadata: z.object({ user_id: z.string().optional() }).optional(),
|
||||
tools: z.array(z.any()).optional(),
|
||||
tool_choice: z.any().optional(),
|
||||
service_tier: z.enum(["auto", "standard_only"]).optional(),
|
||||
cache_control: z.object({
|
||||
type: z.literal("ephemeral"),
|
||||
ttl: z.enum(["5m", "1h"]).optional()
|
||||
}).optional(),
|
||||
})
|
||||
.strip();
|
||||
|
||||
@@ -33,16 +40,35 @@ export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
|
||||
})
|
||||
);
|
||||
|
||||
const AnthropicV1BaseContentSchema = z.union([
|
||||
z.object({ type: z.literal("text"), text: z.string() }),
|
||||
z.object({
|
||||
type: z.literal("image"),
|
||||
source: z.object({
|
||||
type: z.literal("base64"),
|
||||
media_type: z.string().max(100),
|
||||
data: z.string(),
|
||||
}),
|
||||
})
|
||||
]);
|
||||
|
||||
const AnthropicV1MessageMultimodalContentSchema = z.array(
|
||||
z.union([
|
||||
z.object({ type: z.literal("text"), text: z.string() }),
|
||||
AnthropicV1BaseContentSchema,
|
||||
z.object({
|
||||
type: z.literal("image"),
|
||||
source: z.object({
|
||||
type: z.literal("base64"),
|
||||
media_type: z.string().max(100),
|
||||
data: z.string(),
|
||||
}),
|
||||
type: z.literal("tool_use"),
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
input: z.object({}).passthrough(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("tool_result"),
|
||||
tool_use_id: z.string(),
|
||||
is_error: z.boolean().optional(),
|
||||
content: z.union([
|
||||
z.string(),
|
||||
z.array(AnthropicV1BaseContentSchema)
|
||||
]).optional(),
|
||||
}),
|
||||
])
|
||||
);
|
||||
@@ -69,6 +95,10 @@ export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
|
||||
z.array(z.object({ type: z.literal("text"), text: z.string() })),
|
||||
])
|
||||
.optional(),
|
||||
thinking: z.object({
|
||||
type: z.literal("enabled"),
|
||||
budget_tokens: z.number().min(1024),
|
||||
}).optional(),
|
||||
})
|
||||
);
|
||||
export type AnthropicChatMessage = z.infer<
|
||||
@@ -82,7 +112,7 @@ function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
|
||||
let role: string = m.role;
|
||||
if (role === "assistant") {
|
||||
role = "Assistant";
|
||||
} else if (role === "system") {
|
||||
} else if (role === "system" || role === "developer") {
|
||||
role = "System";
|
||||
} else if (role === "user") {
|
||||
role = "Human";
|
||||
@@ -109,8 +139,10 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
|
||||
);
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
if (result.data.max_tokens > 8192) {
|
||||
result.data.max_tokens = 4096;
|
||||
}
|
||||
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const { messages: newMessages, system } =
|
||||
@@ -146,8 +178,6 @@ export const transformOpenAIToAnthropicText: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const prompt = openAIMessagesToClaudeTextPrompt(messages);
|
||||
|
||||
@@ -192,8 +222,6 @@ export const transformAnthropicTextToAnthropicChat: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
|
||||
validateAnthropicTextPrompt(prompt);
|
||||
|
||||
@@ -371,7 +399,7 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
||||
// Here we will lose the original name if it was a system message, but that
|
||||
// is generally okay because the system message is usually a prompt and not
|
||||
// a character in the chat.
|
||||
const name = msg.role === "system" ? "System" : msg.name?.trim();
|
||||
const name = (msg.role === "system" || msg.role === "developer") ? "System" : msg.name?.trim();
|
||||
const content = convertOpenAIContent(msg.content);
|
||||
|
||||
// Prepend the display name to the first text content in the current message
|
||||
@@ -401,8 +429,8 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
||||
|
||||
function isSystemOpenAIRole(
|
||||
role: OpenAIChatMessage["role"]
|
||||
): role is "system" | "function" | "tool" {
|
||||
return ["system", "function", "tool"].includes(role);
|
||||
): role is "developer" | "system" | "function" | "tool" {
|
||||
return ["developer", "system", "function", "tool"].includes(role);
|
||||
}
|
||||
|
||||
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
|
||||
@@ -445,9 +473,25 @@ function convertOpenAIContent(
|
||||
});
|
||||
}
|
||||
|
||||
export function containsImageContent(messages: AnthropicChatMessage[]) {
|
||||
return messages.some(
|
||||
({ content }) =>
|
||||
typeof content !== "string" && content.some((c) => c.type === "image")
|
||||
);
|
||||
export function containsImageContent(messages: AnthropicChatMessage[]): boolean {
|
||||
const isImage = (item: any) => item?.type === 'image';
|
||||
|
||||
return messages.some(msg => {
|
||||
if (typeof msg.content === 'string') return false;
|
||||
|
||||
return msg.content.some(item => {
|
||||
if (isImage(item)) return true;
|
||||
|
||||
if (item.type === 'tool_result') {
|
||||
const content = item.content;
|
||||
if (!content) return false;
|
||||
|
||||
if (typeof content === 'string') return false;
|
||||
if (Array.isArray(content)) return content.some(isImage);
|
||||
return isImage(content);
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
import { z } from "zod";
|
||||
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||
|
||||
/**
|
||||
* Helper function to check if a model is from Cohere
|
||||
*/
|
||||
export function isCohereModel(model: string): boolean {
|
||||
// Cohere's command model family
|
||||
return model.includes("command") || model.includes("cohere");
|
||||
}
|
||||
|
||||
// Basic chat message schema
|
||||
const CohereChatMessageSchema = z.object({
|
||||
role: z.enum(["user", "assistant", "system", "developer"]),
|
||||
content: z.string().nullable(),
|
||||
name: z.string().optional(),
|
||||
});
|
||||
|
||||
const CohereMessagesSchema = z.array(CohereChatMessageSchema);
|
||||
|
||||
// Schema for Cohere chat completions
|
||||
export const CohereV1ChatCompletionsSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: CohereMessagesSchema,
|
||||
temperature: z.number().optional().default(1),
|
||||
top_p: z.number().optional().default(1),
|
||||
max_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
stream: z.boolean().optional().default(false),
|
||||
stop: z
|
||||
.union([z.string(), z.array(z.string())])
|
||||
.optional()
|
||||
.default([])
|
||||
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||
seed: z.number().int().min(0).optional(),
|
||||
response_format: z
|
||||
.object({
|
||||
type: z.enum(["text", "json_object"]),
|
||||
schema: z.any().optional()
|
||||
})
|
||||
.optional(),
|
||||
// Structured output with schema
|
||||
tools: z.array(z.any()).optional(),
|
||||
frequency_penalty: z.number().optional().default(0),
|
||||
presence_penalty: z.number().optional().default(0),
|
||||
});
|
||||
|
||||
// Schema for Cohere embeddings
|
||||
export const CohereV1EmbeddingsSchema = z.object({
|
||||
model: z.string(),
|
||||
input: z.union([z.string(), z.array(z.string())]),
|
||||
encoding_format: z.enum(["float", "base64"]).optional()
|
||||
});
|
||||
|
||||
// Helper function to convert between different message formats if needed
|
||||
export function normalizeMessages(messages: any[]): any[] {
|
||||
// From documentation, Cohere supports roles: developer, user, assistant
|
||||
// The 'developer' role is equivalent to 'system' in OpenAI API
|
||||
return messages.map((msg) => {
|
||||
// Convert system role to developer role for Cohere compatibility
|
||||
if (msg.role === "system") {
|
||||
return { ...msg, role: "developer" };
|
||||
}
|
||||
return msg;
|
||||
});
|
||||
}
|
||||
@@ -5,33 +5,92 @@ import {
|
||||
} from "./openai";
|
||||
import { APIFormatTransformer } from "./index";
|
||||
|
||||
const TextPartSchema = z.object({
|
||||
text: z.string(),
|
||||
thought: z.boolean().optional()
|
||||
});
|
||||
|
||||
const InlineDataPartSchema = z.object({
|
||||
inlineData: z.object({
|
||||
mimeType: z.string(),
|
||||
data: z.string(),
|
||||
}),
|
||||
});
|
||||
|
||||
const PartSchema = z.union([TextPartSchema, InlineDataPartSchema]);
|
||||
|
||||
const GoogleAIV1ContentSchema = z.object({
|
||||
parts: z.array(z.object({ text: z.string() })), // TODO: add other media types
|
||||
parts: z
|
||||
.union([PartSchema, z.array(PartSchema)])
|
||||
.transform((val) => (Array.isArray(val) ? val : [val])),
|
||||
role: z.enum(["user", "model"]).optional(),
|
||||
});
|
||||
|
||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
|
||||
|
||||
const SafetySettingsSchema = z
|
||||
.array(
|
||||
z.object({
|
||||
category: z.enum([
|
||||
"HARM_CATEGORY_HARASSMENT",
|
||||
"HARM_CATEGORY_HATE_SPEECH",
|
||||
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||
"HARM_CATEGORY_CIVIC_INTEGRITY",
|
||||
]),
|
||||
threshold: z.enum([
|
||||
"OFF",
|
||||
"BLOCK_NONE",
|
||||
"BLOCK_ONLY_HIGH",
|
||||
"BLOCK_MEDIUM_AND_ABOVE",
|
||||
"BLOCK_LOW_AND_ABOVE",
|
||||
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
||||
]),
|
||||
})
|
||||
)
|
||||
.optional();
|
||||
|
||||
const GoogleSearchToolSchema = z.object({
|
||||
googleSearch: z.object({}),
|
||||
});
|
||||
|
||||
// Corrected: Directly assign the schema since there's only one tool type for now
|
||||
const ToolSchema = GoogleSearchToolSchema;
|
||||
|
||||
export const GoogleAIV1GenerateContentSchema = z
|
||||
.object({
|
||||
model: z.string().max(100), //actually specified in path but we need it for the router
|
||||
stream: z.boolean().optional().default(false), // also used for router
|
||||
model: z.string().max(100),
|
||||
stream: z.boolean().optional().default(false),
|
||||
contents: z.array(GoogleAIV1ContentSchema),
|
||||
tools: z.array(z.object({})).max(0).optional(),
|
||||
safetySettings: z.array(z.object({})).optional(),
|
||||
tools: z.array(ToolSchema).optional(), // Uses the corrected ToolSchema
|
||||
safetySettings: SafetySettingsSchema,
|
||||
systemInstruction: GoogleAIV1ContentSchema.optional(),
|
||||
generationConfig: z.object({
|
||||
temperature: z.number().optional(),
|
||||
maxOutputTokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional()
|
||||
.default(16)
|
||||
.transform((v) => Math.min(v, 4096)), // TODO: Add config
|
||||
candidateCount: z.literal(1).optional(),
|
||||
topP: z.number().optional(),
|
||||
topK: z.number().optional(),
|
||||
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
||||
}).default({}),
|
||||
system_instruction: GoogleAIV1ContentSchema.optional(),
|
||||
generationConfig: z
|
||||
.object({
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
maxOutputTokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional()
|
||||
.default(16)
|
||||
.transform((v) => Math.min(v, 65536)),
|
||||
candidateCount: z.literal(1).optional(),
|
||||
topP: z.number().min(0).max(1).optional(),
|
||||
topK: z.number().min(0).max(500).optional(),
|
||||
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
||||
seed: z.number().int().optional(),
|
||||
frequencyPenalty: z.number().optional().default(0),
|
||||
presencePenalty: z.number().optional().default(0),
|
||||
thinkingConfig: z.object({
|
||||
includeThoughts: z.boolean().optional(),
|
||||
thinkingBudget: z.union([
|
||||
z.literal("auto"),
|
||||
z.number().int()
|
||||
]).optional()
|
||||
}).optional(),
|
||||
responseModalities: z.any().optional(), // responseModalities: z.array(z.enum(["TEXT"])).optional()
|
||||
})
|
||||
.default({}),
|
||||
})
|
||||
.strip();
|
||||
export type GoogleAIChatMessage = z.infer<
|
||||
@@ -55,15 +114,11 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
}
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
|
||||
const foundNames = new Set<string>();
|
||||
const contents = messages
|
||||
.map((m) => {
|
||||
const role = m.role === "assistant" ? "model" : "user";
|
||||
// Detects character names so we can set stop sequences for them as Gemini
|
||||
// is prone to continuing as the next character.
|
||||
// If names are not available, we'll still try to prefix the message
|
||||
// with generic names so we can set stops for them but they don't work
|
||||
// as well as real names.
|
||||
const text = flattenOpenAIMessageContent(m.content);
|
||||
const propName = m.name?.trim();
|
||||
const textName =
|
||||
@@ -73,12 +128,6 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
|
||||
foundNames.add(name);
|
||||
|
||||
// Prefixing messages with their character name seems to help avoid
|
||||
// Gemini trying to continue as the next character, or at the very least
|
||||
// ensures it will hit the stop sequence. Otherwise it will start a new
|
||||
// paragraph and switch perspectives.
|
||||
// The response will be very likely to include this prefix so frontends
|
||||
// will need to strip it out.
|
||||
const textPrefix = textName ? "" : `${name}: `;
|
||||
return {
|
||||
parts: [{ text: textPrefix + text }],
|
||||
@@ -87,7 +136,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
})
|
||||
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
|
||||
const last = acc[acc.length - 1];
|
||||
if (last?.role === msg.role) {
|
||||
if (last?.role === msg.role && 'text' in last.parts[0] && 'text' in msg.parts[0]) {
|
||||
last.parts[0].text += "\n\n" + msg.parts[0].text;
|
||||
} else {
|
||||
acc.push(msg);
|
||||
@@ -103,23 +152,52 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
|
||||
stops = [...new Set(stops)].slice(0, 5);
|
||||
|
||||
let tools: z.infer<typeof ToolSchema>[] | undefined = undefined;
|
||||
let responseModalities: string[] | undefined = undefined;
|
||||
|
||||
if (req.body.use_google_search === true) {
|
||||
req.log.info("Google Search tool requested.");
|
||||
tools = [{ googleSearch: {} }];
|
||||
responseModalities = ["TEXT"];
|
||||
}
|
||||
|
||||
let thinkingConfig = undefined;
|
||||
if (body.generationConfig?.thinkingConfig || body.thinkingConfig) {
|
||||
thinkingConfig = body.generationConfig?.thinkingConfig || body.thinkingConfig;
|
||||
}
|
||||
|
||||
return {
|
||||
model: req.body.model,
|
||||
stream: rest.stream,
|
||||
contents,
|
||||
tools: [],
|
||||
tools: tools,
|
||||
generationConfig: {
|
||||
maxOutputTokens: rest.max_tokens,
|
||||
stopSequences: stops,
|
||||
topP: rest.top_p,
|
||||
topK: 40, // openai schema doesn't have this, google ai defaults to 40
|
||||
topK: 40,
|
||||
temperature: rest.temperature,
|
||||
seed: rest.seed,
|
||||
frequencyPenalty: rest.frequency_penalty,
|
||||
presencePenalty: rest.presence_penalty,
|
||||
responseModalities: responseModalities,
|
||||
...(thinkingConfig ? { thinkingConfig } : {})
|
||||
},
|
||||
safetySettings: [
|
||||
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "BLOCK_NONE" },
|
||||
],
|
||||
...(req.body.system_instruction && { system_instruction: req.body.system_instruction }),
|
||||
...(req.body.systemInstruction && { systemInstruction: req.body.systemInstruction }),
|
||||
};
|
||||
};
|
||||
|
||||
export function containsImageContent(contents: GoogleAIChatMessage[]): boolean {
|
||||
return contents.some(content => {
|
||||
const parts = Array.isArray(content.parts) ? content.parts : [content.parts];
|
||||
return parts.some(part => 'inlineData' in part);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -17,6 +17,10 @@ import {
|
||||
OpenAIV1ImagesGenerationSchema,
|
||||
transformOpenAIToOpenAIImage,
|
||||
} from "./openai-image";
|
||||
import {
|
||||
OpenAIV1ResponsesSchema,
|
||||
transformOpenAIToOpenAIResponses,
|
||||
} from "./openai-responses";
|
||||
import {
|
||||
GoogleAIV1GenerateContentSchema,
|
||||
transformOpenAIToGoogleAI,
|
||||
@@ -52,6 +56,7 @@ export const API_REQUEST_TRANSFORMERS: TransformerMap = {
|
||||
"openai->anthropic-text": transformOpenAIToAnthropicText,
|
||||
"openai->openai-text": transformOpenAIToOpenAIText,
|
||||
"openai->openai-image": transformOpenAIToOpenAIImage,
|
||||
"openai->openai-responses": transformOpenAIToOpenAIResponses,
|
||||
"openai->google-ai": transformOpenAIToGoogleAI,
|
||||
"mistral-ai->mistral-text": transformMistralChatToText,
|
||||
};
|
||||
@@ -62,6 +67,7 @@ export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
|
||||
openai: OpenAIV1ChatCompletionSchema,
|
||||
"openai-text": OpenAIV1TextCompletionSchema,
|
||||
"openai-image": OpenAIV1ImagesGenerationSchema,
|
||||
"openai-responses": OpenAIV1ResponsesSchema,
|
||||
"google-ai": GoogleAIV1GenerateContentSchema,
|
||||
"mistral-ai": MistralAIV1ChatCompletionsSchema,
|
||||
"mistral-text": MistralAIV1TextCompletionsSchema,
|
||||
|
||||
@@ -4,9 +4,61 @@ import { Template } from "@huggingface/jinja";
|
||||
import { APIFormatTransformer } from "./index";
|
||||
import { logger } from "../../logger";
|
||||
|
||||
// Define the content types for multimodal messages
|
||||
export const TextContentSchema = z.object({
|
||||
type: z.literal("text"),
|
||||
text: z.string()
|
||||
});
|
||||
|
||||
export const ImageUrlContentSchema = z.object({
|
||||
type: z.literal("image_url"),
|
||||
image_url: z.union([
|
||||
// URL format (https://...)
|
||||
z.string().url(),
|
||||
// Base64 format (data:image/jpeg;base64,...)
|
||||
z.string().regex(/^data:image\/(jpeg|png|gif|webp);base64,/),
|
||||
// Object format (might contain detail or url properties)
|
||||
z.record(z.any()),
|
||||
// Allow any string for maximum compatibility
|
||||
z.string()
|
||||
])
|
||||
});
|
||||
|
||||
export const ContentItemSchema = z.union([TextContentSchema, ImageUrlContentSchema]);
|
||||
|
||||
// Export types for the content schemas
|
||||
export type TextContent = z.infer<typeof TextContentSchema>;
|
||||
export type ImageUrlContent = z.infer<typeof ImageUrlContentSchema>;
|
||||
export type ContentItem = z.infer<typeof ContentItemSchema>;
|
||||
|
||||
// List of Mistral models with vision capabilities
|
||||
export const MISTRAL_VISION_MODELS = [
|
||||
"pixtral-12b-2409",
|
||||
"pixtral-12b-latest",
|
||||
"pixtral-large-2411",
|
||||
"pixtral-large-latest",
|
||||
"mistral-small-2503",
|
||||
"mistral-small-latest",
|
||||
"mistral-medium-latest",
|
||||
"mistral-medium-2505"
|
||||
];
|
||||
|
||||
// Helper function to check if a model supports vision
|
||||
export function isMistralVisionModel(model: string): boolean {
|
||||
return MISTRAL_VISION_MODELS.some(visionModel =>
|
||||
model === visionModel ||
|
||||
model.startsWith(`${visionModel}-`)
|
||||
);
|
||||
}
|
||||
|
||||
// Main Mistral chat message schema
|
||||
const MistralChatMessageSchema = z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool"]), // TODO: implement tools
|
||||
content: z.string(),
|
||||
// Support both string content (for backwards compatibility) and array of content items (for multimodal)
|
||||
content: z.union([
|
||||
z.string(),
|
||||
z.array(ContentItemSchema)
|
||||
]),
|
||||
prefix: z.boolean().optional(),
|
||||
});
|
||||
|
||||
@@ -107,7 +159,26 @@ export function fixMistralPrompt(
|
||||
// Consolidate multiple messages from the same role
|
||||
const last = acc[acc.length - 1];
|
||||
if (last.role === copy.role) {
|
||||
last.content += "\n\n" + copy.content;
|
||||
// Handle different content types for consolidation
|
||||
if (typeof last.content === "string" && typeof copy.content === "string") {
|
||||
// Both are strings, concatenate them
|
||||
last.content += "\n\n" + copy.content;
|
||||
} else if (Array.isArray(last.content) && typeof copy.content === "string") {
|
||||
// Add the string content as a new text content item
|
||||
last.content.push({
|
||||
type: "text",
|
||||
text: copy.content
|
||||
});
|
||||
} else if (typeof last.content === "string" && Array.isArray(copy.content)) {
|
||||
// Convert last.content to array and append copy.content items
|
||||
last.content = [
|
||||
{ type: "text", text: last.content },
|
||||
...copy.content
|
||||
];
|
||||
} else if (Array.isArray(last.content) && Array.isArray(copy.content)) {
|
||||
// Both are arrays, concatenate them
|
||||
last.content = [...last.content, ...copy.content];
|
||||
}
|
||||
} else {
|
||||
acc.push(copy);
|
||||
}
|
||||
@@ -125,18 +196,41 @@ export function fixMistralPrompt(
|
||||
|
||||
let jinjaTemplate: Template;
|
||||
let renderTemplate: (messages: MistralAIChatMessage[]) => string;
|
||||
|
||||
// Helper function to convert multimodal content to string format for text-only models
|
||||
function contentToString(content: string | any[]): string {
|
||||
if (typeof content === "string") {
|
||||
return content;
|
||||
} else if (Array.isArray(content)) {
|
||||
// For multimodal content, extract only the text parts
|
||||
// Images are not supported in text-only templates
|
||||
return content
|
||||
.filter(item => item.type === "text")
|
||||
.map(item => (item as any).text)
|
||||
.join("\n\n");
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
function renderMistralPrompt(messages: MistralAIChatMessage[]) {
|
||||
if (!jinjaTemplate) {
|
||||
logger.warn("Lazy loading mistral chat template...");
|
||||
const { chatTemplate, bosToken, eosToken } =
|
||||
require("./templates/mistral-template").MISTRAL_TEMPLATE;
|
||||
jinjaTemplate = new Template(chatTemplate);
|
||||
renderTemplate = (messages) =>
|
||||
jinjaTemplate.render({
|
||||
messages,
|
||||
renderTemplate = (messages) => {
|
||||
// We need to convert any multimodal content to string format for the template
|
||||
const textOnlyMessages = messages.map(msg => ({
|
||||
...msg,
|
||||
content: contentToString(msg.content)
|
||||
}));
|
||||
|
||||
return jinjaTemplate.render({
|
||||
messages: textOnlyMessages,
|
||||
bos_token: bosToken,
|
||||
eos_token: eosToken,
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
return renderTemplate(messages);
|
||||
@@ -145,6 +239,9 @@ function renderMistralPrompt(messages: MistralAIChatMessage[]) {
|
||||
/**
|
||||
* Attempts to convert a Mistral chat completions request to a text completions,
|
||||
* using the official prompt template published by Mistral.
|
||||
*
|
||||
* Note: This transformation is only applicable for text-only models.
|
||||
* Multimodal/vision models (Pixtral, etc.) cannot use this transformation.
|
||||
*/
|
||||
export const transformMistralChatToText: APIFormatTransformer<
|
||||
typeof MistralAIV1TextCompletionsSchema
|
||||
@@ -159,8 +256,24 @@ export const transformMistralChatToText: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const prompt = renderMistralPrompt(messages);
|
||||
// Check if this is a vision request (contains any image_url content items)
|
||||
const { messages, model, ...rest } = result.data;
|
||||
const hasVisionContent = messages.some(msg =>
|
||||
Array.isArray(msg.content) &&
|
||||
msg.content.some(item => item.type === "image_url")
|
||||
);
|
||||
|
||||
return { ...rest, prompt, messages: undefined };
|
||||
// Cannot transform vision requests to text completions
|
||||
if (hasVisionContent) {
|
||||
req.log.warn(
|
||||
{ model },
|
||||
"Cannot transform Mistral vision request to text completions format"
|
||||
);
|
||||
throw new Error(
|
||||
"Vision requests (with image_url content) cannot be transformed to text completions format"
|
||||
);
|
||||
}
|
||||
|
||||
const prompt = renderMistralPrompt(messages);
|
||||
return { ...rest, model, prompt, messages: undefined };
|
||||
};
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
import { z } from "zod";
|
||||
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||
|
||||
/**
|
||||
* Helper function to check if a model is from Moonshot
|
||||
*/
|
||||
export function isMoonshotModel(model: string): boolean {
|
||||
return model.includes("moonshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to check if a model is a Moonshot vision model
|
||||
*/
|
||||
export function isMoonshotVisionModel(model: string): boolean {
|
||||
return model.includes("moonshot") && model.includes("vision");
|
||||
}
|
||||
|
||||
// Content schema for vision models
|
||||
const MoonshotVisionContentSchema = z.union([
|
||||
z.string(),
|
||||
z.array(
|
||||
z.union([
|
||||
z.object({
|
||||
type: z.literal("text"),
|
||||
text: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("image_url"),
|
||||
image_url: z.object({
|
||||
url: z.string(),
|
||||
detail: z.enum(["low", "high", "auto"]).optional(),
|
||||
}),
|
||||
}),
|
||||
])
|
||||
),
|
||||
]);
|
||||
|
||||
// Basic chat message schema
|
||||
const MoonshotChatMessageSchema = z.object({
|
||||
role: z.enum(["user", "assistant", "system"]),
|
||||
content: z.union([z.string(), MoonshotVisionContentSchema]).nullable(),
|
||||
name: z.string().optional(),
|
||||
// Support for partial mode
|
||||
partial: z.boolean().optional(),
|
||||
});
|
||||
|
||||
const MoonshotMessagesSchema = z.array(MoonshotChatMessageSchema);
|
||||
|
||||
// Schema for Moonshot chat completions
|
||||
export const MoonshotV1ChatCompletionsSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: MoonshotMessagesSchema,
|
||||
temperature: z.number().optional().default(0.3),
|
||||
top_p: z.number().optional().default(1),
|
||||
max_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
stream: z.boolean().optional().default(false),
|
||||
stop: z
|
||||
.union([z.string(), z.array(z.string()).max(5)])
|
||||
.optional()
|
||||
.default([])
|
||||
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||
seed: z.number().int().min(0).optional(),
|
||||
response_format: z
|
||||
.object({
|
||||
type: z.enum(["text", "json_object"])
|
||||
})
|
||||
.optional(),
|
||||
tools: z.array(z.any()).optional(),
|
||||
tool_choice: z.any().optional(),
|
||||
frequency_penalty: z.number().min(-2).max(2).optional().default(0),
|
||||
presence_penalty: z.number().min(-2).max(2).optional().default(0),
|
||||
n: z.number().int().min(1).max(5).optional().default(1),
|
||||
});
|
||||
|
||||
// Schema for Moonshot embeddings
|
||||
export const MoonshotV1EmbeddingsSchema = z.object({
|
||||
model: z.string(),
|
||||
input: z.union([z.string(), z.array(z.string())]),
|
||||
encoding_format: z.enum(["float", "base64"]).optional()
|
||||
});
|
||||
|
||||
// Note: Partial mode handling is implemented directly in the proxy middleware
|
||||
// to follow the Deepseek-style consolidation pattern
|
||||
@@ -1,20 +1,58 @@
|
||||
import { z } from "zod";
|
||||
import { Request } from "express";
|
||||
import { OpenAIV1ChatCompletionSchema } from "./openai";
|
||||
import { APIFormatTransformer } from "./index";
|
||||
|
||||
// Extend the Express Request type to include multimodal content
|
||||
declare global {
|
||||
namespace Express {
|
||||
interface Request {
|
||||
multimodalContent?: {
|
||||
prompt?: string;
|
||||
images?: string[];
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/images/create
|
||||
export const OpenAIV1ImagesGenerationSchema = z
|
||||
.object({
|
||||
prompt: z.string().max(4000),
|
||||
prompt: z.string().max(32000), // gpt-image-1 supports up to 32000 chars
|
||||
model: z.string().max(100).optional(),
|
||||
quality: z.enum(["standard", "hd"]).optional().default("standard"),
|
||||
n: z.number().int().min(1).max(4).optional().default(1),
|
||||
response_format: z.enum(["url", "b64_json"]).optional(),
|
||||
// Support for image inputs (multimodal capability of gpt-image-1)
|
||||
image: z.union([
|
||||
z.string(), // single image (base64 or URL)
|
||||
z.array(z.string()) // array of images
|
||||
]).optional(),
|
||||
mask: z.string().optional(), // mask image for editing
|
||||
// Different quality options based on model
|
||||
quality: z
|
||||
.union([
|
||||
z.enum(["standard", "hd"]), // dall-e-3 options
|
||||
z.enum(["high", "medium", "low"]), // gpt-image-1 options
|
||||
z.literal("auto") // default for gpt-image-1
|
||||
])
|
||||
.optional()
|
||||
.default("standard"),
|
||||
n: z.number().int().min(1).max(10).optional().default(1), // gpt-image-1 supports up to 10
|
||||
response_format: z.enum(["url", "b64_json"]).optional(), // Note: gpt-image-1 always returns b64_json
|
||||
// Enhanced size options for gpt-image-1
|
||||
size: z
|
||||
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
|
||||
.union([
|
||||
// dalle models
|
||||
z.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]),
|
||||
// gpt-image-1 models (adds landscape, portrait, auto)
|
||||
z.enum(["1024x1024", "1536x1024", "1024x1536", "auto"])
|
||||
])
|
||||
.optional()
|
||||
.default("1024x1024"),
|
||||
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
|
||||
style: z.enum(["vivid", "natural"]).optional().default("vivid"), // dall-e-3 only
|
||||
// New gpt-image-1 specific parameters
|
||||
background: z.enum(["transparent", "opaque", "auto"]).optional(), // gpt-image-1 only
|
||||
moderation: z.enum(["low", "auto"]).optional(), // gpt-image-1 only
|
||||
output_compression: z.number().int().min(0).max(100).optional(), // gpt-image-1 only
|
||||
output_format: z.enum(["png", "jpeg", "webp"]).optional(), // gpt-image-1 only
|
||||
user: z.string().max(500).optional(),
|
||||
})
|
||||
.strip();
|
||||
@@ -34,9 +72,41 @@ export const transformOpenAIToOpenAIImage: APIFormatTransformer<
|
||||
}
|
||||
|
||||
const { messages } = result.data;
|
||||
const prompt = messages.filter((m) => m.role === "user").pop()?.content;
|
||||
if (Array.isArray(prompt)) {
|
||||
throw new Error("Image generation prompt must be a text message.");
|
||||
const userMessage = messages.filter((m) => m.role === "user").pop();
|
||||
if (!userMessage) {
|
||||
throw new Error("No user message found in the request.");
|
||||
}
|
||||
|
||||
const content = userMessage.content;
|
||||
|
||||
// Handle array content (multimodal content with text and images)
|
||||
if (Array.isArray(content)) {
|
||||
const textParts: string[] = [];
|
||||
const imageParts: string[] = [];
|
||||
|
||||
// Process content parts, extracting text and images
|
||||
content.forEach(part => {
|
||||
if (typeof part === 'string') {
|
||||
textParts.push(part);
|
||||
} else if (part.type === 'image_url') {
|
||||
// Extract image URL or base64 data from the content
|
||||
const imageUrl = typeof part.image_url === 'string'
|
||||
? part.image_url
|
||||
: part.image_url.url;
|
||||
imageParts.push(imageUrl);
|
||||
}
|
||||
});
|
||||
|
||||
// Join all text parts to form the prompt
|
||||
const prompt = textParts.join('\n');
|
||||
|
||||
// For gpt-image-1, we'll pass both the text prompt and image(s)
|
||||
req.multimodalContent = {
|
||||
prompt,
|
||||
images: imageParts
|
||||
};
|
||||
} else if (typeof content !== 'string') {
|
||||
throw new Error("Image generation prompt must be a text message or multimodal content.");
|
||||
}
|
||||
|
||||
if (body.stream) {
|
||||
@@ -49,20 +119,206 @@ export const transformOpenAIToOpenAIImage: APIFormatTransformer<
|
||||
// character name or wrapping the entire thing in quotes. We will look for
|
||||
// the index of "Image:" and use everything after that as the prompt.
|
||||
|
||||
const index = prompt?.toLowerCase().indexOf("image:");
|
||||
if (index === -1 || !prompt) {
|
||||
throw new Error(
|
||||
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${prompt}).`
|
||||
);
|
||||
// Determine if this is a multimodal request (with images)
|
||||
const isMultimodalRequest = Array.isArray(content) && req.multimodalContent?.images && req.multimodalContent.images.length > 0;
|
||||
|
||||
// Check if this is a request for gpt-image-1
|
||||
const isGptImageRequest = body.model?.includes("gpt-image") || false;
|
||||
|
||||
// Only enforce the "Image:" prefix for non-multimodal, non-gpt-image-1 requests
|
||||
if (!isMultimodalRequest && !isGptImageRequest && typeof content === 'string') {
|
||||
const textIndex = content.toLowerCase().indexOf("image:");
|
||||
if (textIndex === -1) {
|
||||
throw new Error(
|
||||
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${content}).`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add some way to specify parameters via chat message
|
||||
// Determine which model to use (gpt-image-1 or dall-e-3)
|
||||
const isGptImage = body.model?.includes("gpt-image") || false;
|
||||
|
||||
// For gpt-image-1, add the 'Image:' prefix if it's missing but only for string content
|
||||
let modifiedStringContent = typeof content === 'string' ? content : '';
|
||||
if (isGptImageRequest && typeof content === 'string' && !content.toLowerCase().includes("image:")) {
|
||||
req.log.info("Adding 'Image:' prefix to gpt-image-1 prompt");
|
||||
modifiedStringContent = `Image: ${content}`;
|
||||
// Store this in the request object for later use
|
||||
req.multimodalContent = req.multimodalContent || {};
|
||||
req.multimodalContent.prompt = modifiedStringContent;
|
||||
}
|
||||
|
||||
// TODO: Add some way to specify parameters via chat message
|
||||
const transformed = {
|
||||
model: body.model.includes("dall-e") ? body.model : "dall-e-3",
|
||||
quality: "standard",
|
||||
size: "1024x1024",
|
||||
response_format: "url",
|
||||
prompt: prompt.slice(index! + 6).trim(),
|
||||
// Get the correct text prompt either from multimodal content or plain string content
|
||||
let textPrompt: string | undefined;
|
||||
let index = -1;
|
||||
|
||||
if (Array.isArray(content)) {
|
||||
// For array content, use the prompt from multimodal content if available
|
||||
textPrompt = req.multimodalContent?.prompt;
|
||||
} else if (typeof content === 'string') {
|
||||
// For string content, use the modified content which might have the Image: prefix for gpt-image-1
|
||||
const contentToProcess = isGptImageRequest ? modifiedStringContent : content;
|
||||
|
||||
// Find the "Image:" prefix in the content
|
||||
index = contentToProcess.toLowerCase().indexOf("image:");
|
||||
|
||||
// For gpt-image-1, we might have just added the prefix, so we need to handle both cases
|
||||
if (index !== -1) {
|
||||
textPrompt = contentToProcess.slice(index + 6).trim();
|
||||
} else if (isGptImageRequest) {
|
||||
// For gpt-image-1, use the whole content if no prefix is found
|
||||
textPrompt = content; // Use the original content without prefix
|
||||
} else {
|
||||
// For other models, default to the content as-is
|
||||
textPrompt = contentToProcess;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that we have a text prompt
|
||||
if (!textPrompt) {
|
||||
throw new Error("No text prompt found in the request.");
|
||||
}
|
||||
|
||||
// Determine the exact model being used
|
||||
let modelName = "dall-e-2"; // Default
|
||||
|
||||
if (isGptImage) {
|
||||
modelName = "gpt-image-1";
|
||||
} else if (body.model?.includes("dall-e-3")) {
|
||||
modelName = "dall-e-3";
|
||||
} else if (body.model?.includes("dall-e-2")) {
|
||||
modelName = "dall-e-2";
|
||||
} else {
|
||||
// If no specific model requested, default to dall-e-3
|
||||
modelName = "dall-e-3";
|
||||
}
|
||||
|
||||
// Start with basic parameters common to all models
|
||||
const transformed: any = {
|
||||
model: modelName,
|
||||
prompt: textPrompt,
|
||||
};
|
||||
|
||||
// Add model-specific parameters
|
||||
if (modelName === "gpt-image-1") {
|
||||
// GPT Image specific parameters - Ensure we only include parameters that are valid for gpt-image-1
|
||||
transformed.quality = "auto"; // Default quality for gpt-image-1
|
||||
transformed.size = "1024x1024"; // Default size (square)
|
||||
transformed.moderation = "low"; // Always set moderation to low for gpt-image-1
|
||||
|
||||
// Optional GPT Image parameters
|
||||
if (body.background) transformed.background = body.background;
|
||||
if (body.output_format) transformed.output_format = body.output_format;
|
||||
if (body.output_compression) transformed.output_compression = body.output_compression;
|
||||
|
||||
// Handle specific quality settings for gpt-image-1
|
||||
if (body.quality && ["high", "medium", "low", "auto"].includes(body.quality)) {
|
||||
transformed.quality = body.quality;
|
||||
}
|
||||
|
||||
// Handle specific size settings for gpt-image-1
|
||||
if (body.size && ["1024x1024", "1536x1024", "1024x1536", "auto"].includes(body.size)) {
|
||||
transformed.size = body.size;
|
||||
}
|
||||
|
||||
// IMPORTANT: Remove any style parameter as it's not supported by gpt-image-1
|
||||
delete transformed.style;
|
||||
|
||||
// Log what we're sending for debugging
|
||||
req.log.info({ model: "gpt-image-1", allowedParams: Object.keys(transformed) }, "Filtered parameters for gpt-image-1");
|
||||
|
||||
// No response_format for gpt-image-1 as it always returns b64_json
|
||||
} else if (modelName === "dall-e-3") {
|
||||
// DALL-E 3 specific parameters
|
||||
transformed.size = "1024x1024"; // Default size
|
||||
transformed.response_format = "url"; // Default format
|
||||
transformed.quality = "standard"; // Default quality
|
||||
|
||||
// Handle DALL-E 3 style parameter
|
||||
if (body.style && ["vivid", "natural"].includes(body.style)) {
|
||||
transformed.style = body.style;
|
||||
} else {
|
||||
transformed.style = "vivid"; // Default style
|
||||
}
|
||||
|
||||
// Handle specific quality settings for dall-e-3
|
||||
if (body.quality && ["standard", "hd"].includes(body.quality)) {
|
||||
transformed.quality = body.quality;
|
||||
}
|
||||
|
||||
// Handle specific size settings for dall-e-3
|
||||
if (body.size && ["1024x1024", "1792x1024", "1024x1792"].includes(body.size)) {
|
||||
transformed.size = body.size;
|
||||
}
|
||||
} else {
|
||||
// DALL-E 2 specific parameters
|
||||
transformed.size = "1024x1024"; // Default size
|
||||
transformed.response_format = "url"; // Default format
|
||||
|
||||
// NO quality parameter for dall-e-2
|
||||
// Explicitly remove the quality parameter before sending
|
||||
delete transformed.quality;
|
||||
|
||||
// Handle specific size settings for dall-e-2
|
||||
if (body.size && ["256x256", "512x512", "1024x1024"].includes(body.size)) {
|
||||
transformed.size = body.size;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle common parameters
|
||||
if (body.n && !isNaN(parseInt(body.n))) {
|
||||
// For dall-e-3, only n=1 is supported
|
||||
if (modelName === "dall-e-3" && parseInt(body.n) > 1) {
|
||||
transformed.n = 1;
|
||||
} else {
|
||||
transformed.n = parseInt(body.n);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle response_format for non-gpt-image models
|
||||
if (!isGptImage && body.response_format && ["url", "b64_json"].includes(body.response_format)) {
|
||||
transformed.response_format = body.response_format;
|
||||
}
|
||||
|
||||
// If this is gpt-image-1 and we have image content, add it to the transformed request
|
||||
if (isGptImage && req.multimodalContent?.images && req.multimodalContent.images.length > 0) {
|
||||
// For the edit endpoint, we need to format the images properly
|
||||
transformed.image = req.multimodalContent.images.length === 1
|
||||
? req.multimodalContent.images[0]
|
||||
: req.multimodalContent.images;
|
||||
|
||||
// Any request with images for gpt-image-1 should use the edits endpoint
|
||||
req.log.info(`${req.multimodalContent.images.length} image(s) detected for gpt-image-1, using images/edits endpoint`);
|
||||
if (req.path.startsWith("/v1/chat/completions")) {
|
||||
req.url = req.url.replace("/v1/chat/completions", "/v1/images/edits");
|
||||
}
|
||||
}
|
||||
// For dall-e-2, we need to make sure we don't introduce unsupported parameters
|
||||
// due to default values in the schema. Let's bypass Zod schema validation here
|
||||
// for dall-e-2 and only include the supported parameters.
|
||||
if (modelName === "dall-e-2") {
|
||||
// Only include parameters that dall-e-2 supports
|
||||
const filteredTransformed: any = {};
|
||||
|
||||
// List of parameters supported by dall-e-2
|
||||
const supportedParams = [
|
||||
"model", "prompt", "n", "size", "response_format", "user"
|
||||
];
|
||||
|
||||
// Copy only supported parameters
|
||||
for (const param of supportedParams) {
|
||||
if (transformed[param] !== undefined) {
|
||||
filteredTransformed[param] = transformed[param];
|
||||
}
|
||||
}
|
||||
|
||||
// Log what we're sending
|
||||
req.log.info({ model: "dall-e-2", params: Object.keys(filteredTransformed) }, "Filtered parameters for dall-e-2");
|
||||
|
||||
return filteredTransformed;
|
||||
}
|
||||
|
||||
// For other models, use the schema as normal
|
||||
return OpenAIV1ImagesGenerationSchema.parse(transformed);
|
||||
};
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
import { z } from "zod";
|
||||
import { Request } from "express";
|
||||
import { OpenAIChatMessage, OpenAIV1ChatCompletionSchema } from "./openai";
|
||||
|
||||
// Schema for the OpenAI Responses API based on the chat completion schema
|
||||
// with some additional fields specific to the Responses API
|
||||
export const OpenAIV1ResponsesSchema = z.object({
|
||||
model: z.string(),
|
||||
input: z.object({
|
||||
messages: z.array(z.any())
|
||||
}).optional(),
|
||||
previousResponseId: z.string().optional(),
|
||||
max_output_tokens: z.number().int().positive().optional(),
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
top_p: z.number().min(0).max(1).optional(),
|
||||
n: z.number().int().positive().optional(),
|
||||
stream: z.boolean().optional(),
|
||||
stop: z.union([z.string(), z.array(z.string())]).optional(),
|
||||
presence_penalty: z.number().min(-2).max(2).optional(),
|
||||
frequency_penalty: z.number().min(-2).max(2).optional(),
|
||||
user: z.string().optional(),
|
||||
tools: z.array(z.any()).optional(),
|
||||
reasoning_effort: z.enum(["low", "medium", "high"]).optional(),
|
||||
});
|
||||
|
||||
// Allow transforming from OpenAI Chat to Responses format
|
||||
export async function transformOpenAIToOpenAIResponses(
|
||||
req: Request
|
||||
): Promise<z.infer<typeof OpenAIV1ResponsesSchema>> {
|
||||
const body = { ...req.body };
|
||||
|
||||
// Move 'messages' to 'input.messages' as required by the Responses API
|
||||
if (body.messages && !body.input) {
|
||||
body.input = {
|
||||
messages: body.messages
|
||||
};
|
||||
delete body.messages;
|
||||
}
|
||||
|
||||
// Convert max_tokens to max_output_tokens if present and not set
|
||||
if (body.max_tokens && !body.max_output_tokens) {
|
||||
body.max_output_tokens = body.max_tokens;
|
||||
delete body.max_tokens;
|
||||
}
|
||||
|
||||
// Map conversation_id to previousResponseId if present
|
||||
if (body.conversation_id && !body.previousResponseId) {
|
||||
body.previousResponseId = body.conversation_id;
|
||||
delete body.conversation_id;
|
||||
}
|
||||
|
||||
// Ensure tools have the right format if present
|
||||
if (body.tools) {
|
||||
body.tools = body.tools.map((tool: any) => ({
|
||||
...tool,
|
||||
type: tool.type || "function"
|
||||
}));
|
||||
}
|
||||
|
||||
return body;
|
||||
}
|
||||
@@ -21,11 +21,11 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
model: z.string().max(100),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool", "function"]),
|
||||
role: z.enum(["system", "developer", "user", "assistant", "tool", "function"]),
|
||||
content: z.union([z.string(), OpenAIV1ChatContentArraySchema]),
|
||||
name: z.string().optional(),
|
||||
tool_calls: z.array(z.any()).optional(),
|
||||
function_call: z.array(z.any()).optional(),
|
||||
function_call: z.any().optional(),
|
||||
tool_call_id: z.string().optional(),
|
||||
}),
|
||||
{
|
||||
@@ -54,6 +54,13 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
.nullish()
|
||||
.default(Math.min(OPENAI_OUTPUT_MAX, 16384))
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
// max_completion_tokens replaces max_tokens in the OpenAI API.
|
||||
// for backwards compatibility, we accept both and move the value in
|
||||
// max_tokens to max_completion_tokens in proxy middleware.
|
||||
max_completion_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional(),
|
||||
frequency_penalty: z.number().optional().default(0),
|
||||
presence_penalty: z.number().optional().default(0),
|
||||
logit_bias: z.any().optional(),
|
||||
@@ -70,12 +77,14 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
functions: z.array(z.any()).optional(),
|
||||
tool_choice: z.any().optional(),
|
||||
function_choice: z.any().optional(),
|
||||
reasoning_effort: z.enum(["minimal", "low", "medium", "high"]).optional(),
|
||||
verbosity: z.enum(["low", "medium", "high"]).optional(),
|
||||
response_format: z.any(),
|
||||
})
|
||||
// Tool usage must be enabled via config because we currently have no way to
|
||||
// track quota usage for them or enforce limits.
|
||||
.omit(
|
||||
Boolean(config.allowOpenAIToolUsage) ? {} : { tools: true, functions: true }
|
||||
!Boolean(config.allowOpenAIToolUsage) ? { tools: true, functions: true } : {}
|
||||
)
|
||||
.strip();
|
||||
export type OpenAIChatMessage = z.infer<
|
||||
|
||||
@@ -0,0 +1,118 @@
|
||||
import { z } from "zod";
|
||||
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||
|
||||
/**
|
||||
* Helper function to check if a model is from Qwen
|
||||
*/
|
||||
export function isQwenModel(model: string): boolean {
|
||||
// Remove any suffix like -thinking or -nonthinking for checking
|
||||
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
|
||||
return baseModel.startsWith("qwen") || baseModel.includes("qwen");
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to check if a model supports thinking capability
|
||||
*/
|
||||
export function isQwenThinkingModel(model: string): boolean {
|
||||
// Remove any suffix like -thinking or -nonthinking for checking
|
||||
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
|
||||
|
||||
// All Qwen3 models support thinking
|
||||
if (baseModel.startsWith("qwen3")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Other models that support thinking
|
||||
return (
|
||||
baseModel === "qwen-plus-latest" ||
|
||||
baseModel === "qwen-plus-2025-04-28" ||
|
||||
baseModel === "qwen-turbo-latest" ||
|
||||
baseModel === "qwen-turbo-2025-04-28"
|
||||
);
|
||||
}
|
||||
|
||||
// Basic chat message schema
|
||||
const QwenChatMessageSchema = z.object({
|
||||
role: z.enum(["user", "assistant", "system"]),
|
||||
content: z.string().nullable(),
|
||||
name: z.string().optional(),
|
||||
});
|
||||
|
||||
const QwenMessagesSchema = z.array(QwenChatMessageSchema);
|
||||
|
||||
// Schema for Qwen chat completions
|
||||
export const QwenV1ChatCompletionsSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: QwenMessagesSchema,
|
||||
temperature: z.number().optional().default(1),
|
||||
top_p: z.number().optional().default(1),
|
||||
max_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
stream: z.boolean().optional().default(false),
|
||||
stop: z
|
||||
.union([z.string(), z.array(z.string())])
|
||||
.optional()
|
||||
.default([])
|
||||
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||
seed: z.number().int().min(0).optional(),
|
||||
response_format: z
|
||||
.object({
|
||||
type: z.enum(["text", "json_object"]),
|
||||
schema: z.any().optional()
|
||||
})
|
||||
.optional(),
|
||||
tools: z.array(z.any()).optional(),
|
||||
frequency_penalty: z.number().optional().default(0),
|
||||
presence_penalty: z.number().optional().default(0),
|
||||
// Qwen-specific parameters
|
||||
enable_thinking: z.boolean().optional(),
|
||||
thinking_budget: z.number().optional(),
|
||||
});
|
||||
|
||||
// Schema for Qwen embeddings
|
||||
export const QwenV1EmbeddingsSchema = z.object({
|
||||
model: z.string(),
|
||||
input: z.union([z.string(), z.array(z.string())]),
|
||||
encoding_format: z.enum(["float", "base64"]).optional()
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper function to normalize messages for Qwen API
|
||||
* Qwen uses the standard OpenAI message format, so no transformation is needed
|
||||
*/
|
||||
export function normalizeMessages(messages: any[]): any[] {
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to check if a model is a Qwen3 model
|
||||
*/
|
||||
export function isQwen3Model(model: string): boolean {
|
||||
// Remove any suffix like -thinking or -nonthinking for checking
|
||||
const baseModel = model.replace(/-thinking$|-nonthinking$/, '');
|
||||
return baseModel.startsWith("qwen3");
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to check if a model name has the thinking variant suffix
|
||||
*/
|
||||
export function isThinkingVariant(model: string): boolean {
|
||||
return model.endsWith("-thinking");
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to check if a model name has the non-thinking variant suffix
|
||||
*/
|
||||
export function isNonThinkingVariant(model: string): boolean {
|
||||
return model.endsWith("-nonthinking");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base model name without any thinking/nonthinking suffix
|
||||
*/
|
||||
export function getBaseModelName(model: string): string {
|
||||
return model.replace(/-thinking$|-nonthinking$/, '');
|
||||
}
|
||||
@@ -0,0 +1,167 @@
|
||||
import { z } from "zod";
|
||||
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||
|
||||
// Define the content types for multimodal messages
|
||||
export const TextContentSchema = z.object({
|
||||
type: z.literal("text"),
|
||||
text: z.string()
|
||||
});
|
||||
|
||||
export const ImageUrlContentSchema = z.object({
|
||||
type: z.literal("image_url"),
|
||||
image_url: z.union([
|
||||
// URL format (https://...)
|
||||
z.string().url(),
|
||||
// Base64 format (data:image/jpeg;base64,...)
|
||||
z.string().regex(/^data:image\/(jpeg|png|gif|webp);base64,/),
|
||||
// Object format (might contain detail or url properties)
|
||||
z.object({
|
||||
url: z.string(),
|
||||
detail: z.enum(["low", "high"]).optional()
|
||||
}),
|
||||
// Allow any string for maximum compatibility
|
||||
z.string()
|
||||
])
|
||||
});
|
||||
|
||||
export const ContentItemSchema = z.union([TextContentSchema, ImageUrlContentSchema]);
|
||||
|
||||
// Export types for the content schemas
|
||||
export type TextContent = z.infer<typeof TextContentSchema>;
|
||||
export type ImageUrlContent = z.infer<typeof ImageUrlContentSchema>;
|
||||
export type ContentItem = z.infer<typeof ContentItemSchema>;
|
||||
|
||||
// Helper function to check if a model supports vision
|
||||
export function isGrokVisionModel(model: string): boolean {
|
||||
// Check if the model name contains '-vision' anywhere in the name
|
||||
// This makes it future-proof for new vision models
|
||||
return model.toLowerCase().includes("-vision");
|
||||
}
|
||||
|
||||
// Helper function to check if a model supports image generation
|
||||
export function isGrokImageGenModel(model: string): boolean {
|
||||
// Check if the model name contains '-image' anywhere in the name
|
||||
// This makes it future-proof for new image generation models
|
||||
return model.toLowerCase().includes("-image");
|
||||
}
|
||||
|
||||
// Helper function to check if a model supports reasoning
|
||||
export function isGrokReasoningModel(model: string): boolean {
|
||||
// grok-3-mini variants and grok-4-0709 support reasoning
|
||||
const modelLower = model.toLowerCase();
|
||||
return (modelLower.includes("-mini") && modelLower.includes("grok-3")) ||
|
||||
modelLower.includes("grok-4");
|
||||
}
|
||||
|
||||
// Helper function to check if a model supports reasoning_effort parameter
|
||||
export function isGrokReasoningEffortModel(model: string): boolean {
|
||||
// Only grok-3-mini variants support reasoning_effort parameter
|
||||
// grok-4-0709 does NOT support reasoning_effort
|
||||
const modelLower = model.toLowerCase();
|
||||
return modelLower.includes("-mini") && modelLower.includes("grok-3");
|
||||
}
|
||||
|
||||
// Helper function to check if a model returns reasoning_content
|
||||
export function isGrokReasoningContentModel(model: string): boolean {
|
||||
// Only grok-3-mini variants return reasoning_content
|
||||
// grok-4-0709 does NOT return reasoning_content
|
||||
const modelLower = model.toLowerCase();
|
||||
return modelLower.includes("-mini") && modelLower.includes("grok-3");
|
||||
}
|
||||
|
||||
// Main Grok chat message schema
|
||||
const XaiChatMessageSchema = z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool", "function"]),
|
||||
// Support both string content (for backwards compatibility) and array of content items (for multimodal)
|
||||
content: z.union([
|
||||
z.string().nullable(),
|
||||
z.array(ContentItemSchema)
|
||||
]),
|
||||
// Reasoning content field (for grok-3-mini models)
|
||||
reasoning_content: z.string().optional(),
|
||||
// Tool call fields
|
||||
tool_call_id: z.string().optional(),
|
||||
name: z.string().optional(),
|
||||
tool_calls: z.array(z.any()).optional(),
|
||||
});
|
||||
|
||||
const XaiMessagesSchema = z.array(XaiChatMessageSchema);
|
||||
|
||||
// Basic chat completions schema
|
||||
export const XaiV1ChatCompletionsSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: XaiMessagesSchema,
|
||||
temperature: z.number().optional().default(1),
|
||||
top_p: z.number().optional().default(1),
|
||||
max_completion_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
max_tokens: z.coerce // Deprecated parameter, but kept for backward compatibility
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
stream: z.boolean().optional().default(false),
|
||||
// Grok docs say that `stop` can be a string or array
|
||||
stop: z
|
||||
.union([z.string(), z.array(z.string())])
|
||||
.optional()
|
||||
.default([])
|
||||
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||
seed: z.number().int().min(0).optional(),
|
||||
response_format: z
|
||||
.object({ type: z.enum(["text", "json_object", "json_schema"]), json_schema: z.any().optional() })
|
||||
.optional(),
|
||||
// reasoning_effort parameter for grok-3-mini models
|
||||
reasoning_effort: z.enum(["low", "medium", "high"]).optional().default("low"),
|
||||
stream_options: z.object({
|
||||
include_usage: z.boolean()
|
||||
}).optional(),
|
||||
user: z.string().optional(),
|
||||
// Fields to support function calling
|
||||
tools: z.array(z.any()).optional(),
|
||||
tool_choice: z.union([
|
||||
z.string(),
|
||||
z.object({
|
||||
type: z.literal("function"),
|
||||
function: z.object({
|
||||
name: z.string()
|
||||
})
|
||||
})
|
||||
]).optional(),
|
||||
// Advanced parameters
|
||||
frequency_penalty: z.number().optional().default(0),
|
||||
presence_penalty: z.number().optional().default(0),
|
||||
logprobs: z.boolean().optional().default(false),
|
||||
top_logprobs: z.number().int().min(0).max(8).optional(),
|
||||
});
|
||||
|
||||
// Image Generation schema
|
||||
export const XaiV1ImageGenerationsSchema = z.object({
|
||||
model: z.string().optional(),
|
||||
prompt: z.string(),
|
||||
n: z.number().int().min(1).max(10).optional().default(1),
|
||||
response_format: z.enum(["url", "b64_json"]).optional().default("url"),
|
||||
user: z.string().optional(),
|
||||
// These are marked as not supported in the documentation but included for compatibility
|
||||
quality: z.string().optional(),
|
||||
size: z.string().optional(),
|
||||
style: z.string().optional(),
|
||||
});
|
||||
|
||||
// Helper function to convert multimodal content to string format for text-only models
|
||||
export function contentToString(content: string | any[] | null): string {
|
||||
if (typeof content === "string") {
|
||||
return content || "";
|
||||
} else if (Array.isArray(content)) {
|
||||
// For multimodal content, extract only the text parts
|
||||
// Images are not supported in text-only templates
|
||||
return content
|
||||
.filter(item => item.type === "text")
|
||||
.map(item => (item as any).text)
|
||||
.join("\n\n");
|
||||
}
|
||||
return "";
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
import { Request } from "express";
|
||||
|
||||
/**
|
||||
* Claude Opus 4.1 has stricter API validation that doesn't allow both temperature
|
||||
* and top_p parameters to be specified simultaneously. This function validates and
|
||||
* adjusts the request parameters for Claude Opus 4.1 models ONLY.
|
||||
*
|
||||
* Rules:
|
||||
* - If both parameters are at default values (1.0), omit top_p
|
||||
* - If only one parameter is at default, omit the default one
|
||||
* - If both are non-default, throw an error
|
||||
*/
|
||||
export function validateClaude41OpusParameters(req: Request): void {
|
||||
const model = req.body.model;
|
||||
|
||||
// Only apply this validation to Claude Opus 4.1 models
|
||||
if (!isClaude41OpusModel(model)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const temperature = req.body.temperature;
|
||||
const topP = req.body.top_p;
|
||||
|
||||
// If neither parameter is specified, no validation needed
|
||||
if (temperature === undefined && topP === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Default values for Claude API
|
||||
const DEFAULT_TEMPERATURE = 1.0;
|
||||
const DEFAULT_TOP_P = 1.0;
|
||||
|
||||
const tempIsDefault = temperature === undefined || temperature === DEFAULT_TEMPERATURE;
|
||||
const topPIsDefault = topP === undefined || topP === DEFAULT_TOP_P;
|
||||
|
||||
// If both are at default values, omit top_p (keep temperature)
|
||||
if (tempIsDefault && topPIsDefault) {
|
||||
delete req.body.top_p;
|
||||
req.log?.info("Claude Opus 4.1: Both temperature and top_p at default, omitting top_p");
|
||||
return;
|
||||
}
|
||||
|
||||
// If only one is at default, omit the default one
|
||||
if (tempIsDefault && !topPIsDefault) {
|
||||
delete req.body.temperature;
|
||||
req.log?.info("Claude Opus 4.1: Temperature at default, omitting temperature");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!tempIsDefault && topPIsDefault) {
|
||||
delete req.body.top_p;
|
||||
req.log?.info("Claude Opus 4.1: top_p at default, omitting top_p");
|
||||
return;
|
||||
}
|
||||
|
||||
// If both are non-default, throw an error
|
||||
if (!tempIsDefault && !topPIsDefault) {
|
||||
throw new Error(
|
||||
"Claude Opus 4.1 does not support both temperature and top_p parameters being set to non-default values simultaneously. " +
|
||||
"Please specify only one of these parameters or set one to its default value (1.0)."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given model is a Claude Opus 4.1 model.
|
||||
* This includes all provider formats for Claude Opus 4.1 ONLY.
|
||||
*/
|
||||
function isClaude41OpusModel(model: string): boolean {
|
||||
if (!model) return false;
|
||||
|
||||
// Anthropic API format
|
||||
if (model.includes("claude-opus-4-1")) return true;
|
||||
|
||||
// AWS Bedrock format
|
||||
if (model.includes("anthropic.claude-opus-4-1")) return true;
|
||||
|
||||
// GCP Vertex AI format
|
||||
if (model.includes("claude-opus-4-1@")) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
export interface ClaudeModelMapping {
|
||||
awsId: string;
|
||||
anthropicId: string;
|
||||
displayName: string;
|
||||
}
|
||||
|
||||
export const claudeModels: ClaudeModelMapping[] = [
|
||||
{ awsId: "anthropic.claude-v2", anthropicId: "claude-2", displayName: "Claude 2" },
|
||||
{ awsId: "anthropic.claude-v2:1", anthropicId: "claude-2.1", displayName: "Claude 2.1" },
|
||||
{ awsId: "anthropic.claude-3-haiku-20240307-v1:0", anthropicId: "claude-3-haiku-20240307", displayName: "Claude 3 Haiku" },
|
||||
{ awsId: "anthropic.claude-3-5-haiku-20241022-v1:0", anthropicId: "claude-3-5-haiku-20241022", displayName: "Claude 3.5 Haiku" },
|
||||
{ awsId: "anthropic.claude-3-sonnet-20240229-v1:0", anthropicId: "claude-3-sonnet-20240229", displayName: "Claude 3 Sonnet" },
|
||||
{ awsId: "anthropic.claude-3-5-sonnet-20240620-v1:0", anthropicId: "claude-3-5-sonnet-20240620", displayName: "Claude 3.5 Sonnet (Old)" },
|
||||
{ awsId: "anthropic.claude-3-5-sonnet-20241022-v2:0", anthropicId: "claude-3-5-sonnet-20241022", displayName: "Claude 3.5 Sonnet (New)" },
|
||||
{ awsId: "anthropic.claude-3-5-sonnet-20241022-v2:0", anthropicId: "claude-3-5-sonnet-latest", displayName: "Claude 3.5 Sonnet (Latest)" },
|
||||
{ awsId: "anthropic.claude-3-7-sonnet-20250219-v1:0", anthropicId: "claude-3-7-sonnet-20250219", displayName: "Claude 3.7 Sonnet" },
|
||||
{ awsId: "anthropic.claude-3-7-sonnet-20250219-v1:0", anthropicId: "claude-3-7-sonnet-latest", displayName: "Claude 3.7 Sonnet (Latest)" },
|
||||
{ awsId: "anthropic.claude-3-opus-20240229-v1:0", anthropicId: "claude-3-opus-20240229", displayName: "Claude 3 Opus" },
|
||||
{ awsId: "anthropic.claude-3-opus-20240229-v1:0", anthropicId: "claude-3-opus-latest", displayName: "Claude 3 Opus (Latest)" },
|
||||
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-20250514", displayName: "Claude 4 Sonnet" },
|
||||
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-latest", displayName: "Claude 4 Sonnet (Latest)" },
|
||||
{ awsId: "anthropic.claude-opus-4-20250514-v1:0", anthropicId: "claude-opus-4-20250514", displayName: "Claude 4.0 Opus" },
|
||||
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-1-20250805", displayName: "Claude 4.1 Opus" },
|
||||
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-latest", displayName: "Claude 4 Opus (Latest)" },
|
||||
{ awsId: "anthropic.claude-opus-4-1-20250805-v1:0", anthropicId: "claude-opus-4-1", displayName: "Claude 4.1 Opus" },
|
||||
{ awsId: "anthropic.claude-sonnet-4-20250514-v1:0", anthropicId: "claude-sonnet-4-0", displayName: "Claude 4 Sonnet" },
|
||||
{ awsId: "anthropic.claude-opus-4-20250514-v1:0", anthropicId: "claude-opus-4-0", displayName: "Claude 4.0 Opus" },
|
||||
];
|
||||
|
||||
export function findByAwsId(awsId: string): ClaudeModelMapping | undefined {
|
||||
return claudeModels.find(model => model.awsId === awsId);
|
||||
}
|
||||
|
||||
export function findByAnthropicId(anthropicId: string): ClaudeModelMapping | undefined {
|
||||
return claudeModels.find(model => model.anthropicId === anthropicId);
|
||||
}
|
||||
|
||||
export function getAllClaudeModels(): ClaudeModelMapping[] {
|
||||
return claudeModels;
|
||||
}
|
||||
Vendored
+3
@@ -5,6 +5,7 @@ import { Express } from "express-serve-static-core";
|
||||
import { APIFormat, Key } from "./key-management";
|
||||
import { User } from "./users/schema";
|
||||
import { LLMService, ModelFamily } from "./models";
|
||||
import { ProxyReqManager } from "../proxy/middleware/request/proxy-req-manager";
|
||||
|
||||
declare global {
|
||||
namespace Express {
|
||||
@@ -24,6 +25,7 @@ declare global {
|
||||
queueOutTime?: number;
|
||||
onAborted?: () => void;
|
||||
proceed: () => void;
|
||||
changeManager?: ProxyReqManager;
|
||||
heartbeatInterval?: NodeJS.Timeout;
|
||||
monitorInterval?: NodeJS.Timeout;
|
||||
promptTokens?: number;
|
||||
@@ -31,6 +33,7 @@ declare global {
|
||||
tokenizerInfo: Record<string, any>;
|
||||
signedRequest: HttpRequest;
|
||||
modelFamily?: ModelFamily;
|
||||
isChunkedTransfer?: boolean;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
import axios from "axios";
|
||||
import express from "express";
|
||||
import { promises as fs } from "fs";
|
||||
import path from "path";
|
||||
import { v4 } from "uuid";
|
||||
import { USER_ASSETS_DIR } from "../../config";
|
||||
import { getAxiosInstance } from "../network";
|
||||
import { addToImageHistory } from "./image-history";
|
||||
import { libSharp } from "./index";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
export type OpenAIImageGenerationResult = {
|
||||
created: number;
|
||||
data: {
|
||||
revised_prompt?: string;
|
||||
url: string;
|
||||
b64_json: string;
|
||||
url?: string; // gpt-image-1 doesn't return URLs, only b64_json
|
||||
b64_json?: string;
|
||||
}[];
|
||||
// Added for gpt-image-1 responses
|
||||
usage?: {
|
||||
total_tokens: number;
|
||||
input_tokens: number;
|
||||
output_tokens: number;
|
||||
input_tokens_details?: {
|
||||
text_tokens: number;
|
||||
image_tokens: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
async function downloadImage(url: string) {
|
||||
@@ -63,11 +75,16 @@ export async function mirrorGeneratedImage(
|
||||
let mirror: string;
|
||||
if (item.b64_json) {
|
||||
mirror = await saveB64Image(item.b64_json);
|
||||
} else {
|
||||
} else if (item.url) {
|
||||
mirror = await downloadImage(item.url);
|
||||
} else {
|
||||
req.log.warn("No image data found in response");
|
||||
continue;
|
||||
}
|
||||
// Set the URL to our mirrored version
|
||||
item.url = `${host}/user_content/${path.basename(mirror)}`;
|
||||
await createThumbnail(mirror);
|
||||
// Add to image history with the local URL
|
||||
addToImageHistory({
|
||||
url: item.url,
|
||||
prompt,
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import type firebase from "firebase-admin";
|
||||
import { config } from "../config";
|
||||
import { getHttpAgents } from "./network";
|
||||
|
||||
let firebaseApp: firebase.app.App | undefined;
|
||||
|
||||
export async function initializeFirebase() {
|
||||
const firebase = await import("firebase-admin");
|
||||
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
|
||||
const app = firebase.initializeApp({
|
||||
// RTDB doesn't actually seem to use this but respects `WS_PROXY` if set,
|
||||
// so we do that in the network module.
|
||||
httpAgent: getHttpAgents()[0],
|
||||
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
|
||||
databaseURL: config.firebaseRtdbUrl,
|
||||
});
|
||||
|
||||
await app.database().ref("connection-test").set(Date.now());
|
||||
|
||||
firebaseApp = app;
|
||||
}
|
||||
|
||||
export function getFirebaseApp(): firebase.app.App {
|
||||
if (!firebaseApp) {
|
||||
throw new Error("Firebase app not initialized.");
|
||||
}
|
||||
return firebaseApp;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { RequestHandler } from "express";
|
||||
import { config } from "../config";
|
||||
import { getTokenCostUsd, prettyTokens } from "./stats";
|
||||
import { getTokenCostUsd, getTokenCostDetailsUsd, prettyTokens } from "./stats"; // Added getTokenCostDetailsUsd
|
||||
import { redactIp } from "./utils";
|
||||
import * as userStore from "./users/user-store";
|
||||
|
||||
@@ -30,7 +30,8 @@ export const injectLocals: RequestHandler = (req, res, next) => {
|
||||
|
||||
// view helpers
|
||||
res.locals.prettyTokens = prettyTokens;
|
||||
res.locals.tokenCost = getTokenCostUsd;
|
||||
res.locals.tokenCost = getTokenCostUsd; // Returns total cost as a number
|
||||
res.locals.tokenCostDetails = getTokenCostDetailsUsd; // Returns { inputCost, outputCost, totalCost }
|
||||
res.locals.redactIp = redactIp;
|
||||
|
||||
next();
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import axios, { AxiosError, AxiosResponse } from "axios";
|
||||
import { AxiosError, AxiosResponse } from "axios";
|
||||
import { getAxiosInstance } from "../../network";
|
||||
import { KeyCheckerBase } from "../key-checker-base";
|
||||
import type { AnthropicKey, AnthropicKeyProvider } from "./provider";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
|
||||
const KEY_CHECK_PERIOD = 1000 * 60 * 60 * 6; // 6 hours
|
||||
const KEY_CHECK_PERIOD = 1000 * 60 * 60 * 24; // 24 hours (no reason to do it every 6 hours)
|
||||
const POST_MESSAGES_URL = "https://api.anthropic.com/v1/messages";
|
||||
const TEST_MODEL = "claude-3-sonnet-20240229";
|
||||
const TEST_MODEL = "claude-3-7-sonnet-latest";
|
||||
const SYSTEM = "Obey all instructions from the user.";
|
||||
const DETECTION_PROMPT = [
|
||||
{
|
||||
@@ -68,10 +71,13 @@ export class AnthropicKeyChecker extends KeyCheckerBase<AnthropicKey> {
|
||||
// The type is always invalid_request_error, so we have to check the text.
|
||||
const isOverQuota =
|
||||
data.error?.message?.match(/usage blocked until/i) ||
|
||||
data.error?.message?.match(/credit balance is too low/i);
|
||||
data.error?.message?.match(/credit balance is too low/i) ||
|
||||
data.error?.message?.match(/reached your specified API usage limits/i) ||
|
||||
data.error?.message?.match(/You will regain access on/i);
|
||||
const isDisabled = data.error?.message?.match(
|
||||
/organization has been disabled/i
|
||||
);
|
||||
) ||
|
||||
data.error?.message?.match(/credential is only authorized for use with Claude Code/i);
|
||||
if (status === 400 && isOverQuota) {
|
||||
this.log.warn(
|
||||
{ key: key.hash, error: data },
|
||||
|
||||
@@ -16,11 +16,8 @@ export type AnthropicKeyUpdate = Omit<
|
||||
| "rateLimitedUntil"
|
||||
>;
|
||||
|
||||
type AnthropicKeyUsage = {
|
||||
[K in AnthropicModelFamily as `${K}Tokens`]: number;
|
||||
};
|
||||
|
||||
export interface AnthropicKey extends Key, AnthropicKeyUsage {
|
||||
// AnthropicKeyUsage is removed, tokenUsage from base Key interface will be used.
|
||||
export interface AnthropicKey extends Key {
|
||||
readonly service: "anthropic";
|
||||
readonly modelFamilies: AnthropicModelFamily[];
|
||||
/**
|
||||
@@ -120,8 +117,7 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
|
||||
.digest("hex")
|
||||
.slice(0, 8)}`,
|
||||
lastChecked: 0,
|
||||
claudeTokens: 0,
|
||||
"claude-opusTokens": 0,
|
||||
tokenUsage: {}, // Initialize new tokenUsage field
|
||||
tier: "unknown",
|
||||
};
|
||||
this.keys.push(newKey);
|
||||
@@ -206,11 +202,23 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
|
||||
return this.keys.filter((k) => !k.isDisabled).length;
|
||||
}
|
||||
|
||||
public incrementUsage(hash: string, model: string, tokens: number) {
|
||||
const key = this.keys.find((k) => k.hash === hash);
|
||||
public incrementUsage(keyHash: string, modelFamily: AnthropicModelFamily, usage: { input: number; output: number }) {
|
||||
const key = this.keys.find((k) => k.hash === keyHash);
|
||||
if (!key) return;
|
||||
|
||||
key.promptCount++;
|
||||
key[`${getClaudeModelFamily(model)}Tokens`] += tokens;
|
||||
|
||||
if (!key.tokenUsage) {
|
||||
key.tokenUsage = {};
|
||||
}
|
||||
// Ensure the specific family object exists
|
||||
if (!key.tokenUsage[modelFamily]) {
|
||||
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
|
||||
}
|
||||
|
||||
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
|
||||
currentFamilyUsage.input += usage.input;
|
||||
currentFamilyUsage.output += usage.output;
|
||||
}
|
||||
|
||||
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
|
||||
|
||||
@@ -1,23 +1,33 @@
|
||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||
import { SignatureV4 } from "@smithy/signature-v4";
|
||||
import { HttpRequest } from "@smithy/protocol-http";
|
||||
import axios, { AxiosError, AxiosHeaders, AxiosRequestConfig } from "axios";
|
||||
import { AxiosError, AxiosHeaders, AxiosRequestConfig } from "axios";
|
||||
import { URL } from "url";
|
||||
import { config } from "../../../config";
|
||||
import { getAwsBedrockModelFamily } from "../../models";
|
||||
import { getAxiosInstance } from "../../network";
|
||||
import { KeyCheckerBase } from "../key-checker-base";
|
||||
import type { AwsBedrockKey, AwsBedrockKeyProvider } from "./provider";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
type ParentModelId = string;
|
||||
type AliasModelId = string;
|
||||
type ModuleAliasTuple = [ParentModelId, ...AliasModelId[]];
|
||||
|
||||
const KNOWN_MODEL_IDS: ModuleAliasTuple[] = [
|
||||
["anthropic.claude-instant-v1"],
|
||||
["anthropic.claude-v2", "anthropic.claude-v2:1"],
|
||||
["anthropic.claude-3-sonnet-20240229-v1:0"],
|
||||
["anthropic.claude-3-haiku-20240307-v1:0"],
|
||||
["anthropic.claude-3-5-haiku-20241022-v1:0"],
|
||||
["anthropic.claude-3-opus-20240229-v1:0"],
|
||||
["anthropic.claude-3-5-sonnet-20240620-v1:0"],
|
||||
["anthropic.claude-3-5-sonnet-20241022-v2:0"],
|
||||
["anthropic.claude-3-7-sonnet-20250219-v1:0"],
|
||||
["anthropic.claude-sonnet-4-20250514-v1:0"],
|
||||
["anthropic.claude-opus-4-20250514-v1:0"],
|
||||
["anthropic.claude-opus-4-1-20250805-v1:0"],
|
||||
["mistral.mistral-7b-instruct-v0:2"],
|
||||
["mistral.mixtral-8x7b-instruct-v0:1"],
|
||||
["mistral.mistral-large-2402-v1:0"],
|
||||
@@ -86,6 +96,8 @@ export class AwsKeyChecker extends KeyCheckerBase<AwsBedrockKey> {
|
||||
protected async testKeyOrFail(key: AwsBedrockKey) {
|
||||
const isInitialCheck = !key.lastChecked;
|
||||
|
||||
// Keys with logging enabled will get rejected in the provider
|
||||
await this.checkLoggingConfiguration(key);
|
||||
if (isInitialCheck) {
|
||||
try {
|
||||
await this.checkInferenceProfiles(key);
|
||||
@@ -103,6 +115,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
}
|
||||
|
||||
// Perform checks for all parent model IDs
|
||||
// TODO: use allsettled
|
||||
const results = await Promise.all(
|
||||
KNOWN_MODEL_IDS.filter(([model]) =>
|
||||
// Skip checks for models that are disabled anyway
|
||||
@@ -176,9 +189,9 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
// not necessarily disabled. Retry in 10 seconds.
|
||||
this.log.warn(
|
||||
{ key: key.hash, errorType, error: error.response.data },
|
||||
"Key is rate limited. Rechecking in 10 seconds."
|
||||
"Key is rate limited. Rechecking in 30 seconds."
|
||||
);
|
||||
const next = Date.now() - (KEY_CHECK_PERIOD - 10 * 1000);
|
||||
const next = Date.now() - (KEY_CHECK_PERIOD - 30 * 1000);
|
||||
return this.updateKey(key.hash, { lastChecked: next });
|
||||
case "ValidationException":
|
||||
default:
|
||||
@@ -234,7 +247,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
} catch (e) {
|
||||
this.log.error(
|
||||
{ key: key.hash, model, profile, error: e.message },
|
||||
"Error testing model with inference profile; trying model ID directly."
|
||||
"InvokeModel via inference profile returned an error; trying model ID directly."
|
||||
);
|
||||
result = false;
|
||||
}
|
||||
@@ -244,6 +257,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
// profile will be used when the key is used for inference.
|
||||
if (result) return true;
|
||||
}
|
||||
this.log.debug({ key: key.hash, model }, "Testing model via model ID.");
|
||||
return this.testClaudeModel(key, model);
|
||||
} else if (model.includes("mistral")) {
|
||||
return this.testMistralModel(key, model);
|
||||
@@ -269,7 +283,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
method: "POST",
|
||||
url: POST_INVOKE_MODEL_URL(creds.region, model),
|
||||
data: payload,
|
||||
validateStatus: (status) => [400, 403, 404].includes(status),
|
||||
validateStatus: (status) => [400, 403, 404, 429, 503].includes(status),
|
||||
};
|
||||
config.headers = new AxiosHeaders({
|
||||
"content-type": "application/json",
|
||||
@@ -281,6 +295,39 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
const errorType = (headers["x-amzn-errortype"] as string).split(":")[0];
|
||||
const errorMessage = data?.message;
|
||||
|
||||
// 503 ServiceUnavailableException errors are usually due to temporary
|
||||
// outages in the AWS infrastructure. However, because a 503 response also
|
||||
// indicates that the key can invoke the model, we can treat this as a
|
||||
// successful response.
|
||||
if (status === 503 && errorType.match(/ServiceUnavailableException/i)) {
|
||||
this.log.warn(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is accessible, but may be temporarily unavailable."
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// 429 ThrottlingException can suggest the model is available but the key
|
||||
// is being rate limited. I think if a key does not have access to the
|
||||
// model, it cannot receive a 429 response, so this should be a success.
|
||||
if (status === 429) {
|
||||
if (errorType.match(/ThrottlingException/i)) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is available but key is rate limited."
|
||||
);
|
||||
return true;
|
||||
} else {
|
||||
throw new AxiosError(
|
||||
`InvokeModel returned 429 of type ${errorType}`,
|
||||
`AWS_INVOKE_MODEL_RATE_LIMITED`,
|
||||
response.config,
|
||||
response.request,
|
||||
response
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// This message indicates the key is valid but this particular model is not
|
||||
// accessible. Other 403s may indicate the key is not usable.
|
||||
if (
|
||||
|
||||
@@ -3,15 +3,13 @@ import { config } from "../../../config";
|
||||
import { logger } from "../../../logger";
|
||||
import { PaymentRequiredError } from "../../errors";
|
||||
import { AwsBedrockModelFamily, getAwsBedrockModelFamily } from "../../models";
|
||||
import { findByAnthropicId } from "../../claude-models";
|
||||
import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
|
||||
import { prioritizeKeys } from "../prioritize-keys";
|
||||
import { AwsKeyChecker } from "./checker";
|
||||
|
||||
type AwsBedrockKeyUsage = {
|
||||
[K in AwsBedrockModelFamily as `${K}Tokens`]: number;
|
||||
};
|
||||
|
||||
export interface AwsBedrockKey extends Key, AwsBedrockKeyUsage {
|
||||
// AwsBedrockKeyUsage is removed, tokenUsage from base Key interface will be used.
|
||||
export interface AwsBedrockKey extends Key {
|
||||
readonly service: "aws";
|
||||
readonly modelFamilies: AwsBedrockModelFamily[];
|
||||
/**
|
||||
@@ -29,13 +27,13 @@ export interface AwsBedrockKey extends Key, AwsBedrockKeyUsage {
|
||||
* Upon being rate limited, a key will be locked out for this many milliseconds
|
||||
* while we wait for other concurrent requests to finish.
|
||||
*/
|
||||
const RATE_LIMIT_LOCKOUT = 4000;
|
||||
const RATE_LIMIT_LOCKOUT = 5000;
|
||||
/**
|
||||
* Upon assigning a key, we will wait this many milliseconds before allowing it
|
||||
* to be used again. This is to prevent the queue from flooding a key with too
|
||||
* many requests while we wait to learn whether previous ones succeeded.
|
||||
*/
|
||||
const KEY_REUSE_DELAY = 500;
|
||||
const KEY_REUSE_DELAY = 250;
|
||||
|
||||
export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
|
||||
readonly service = "aws";
|
||||
@@ -74,12 +72,7 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
|
||||
lastChecked: 0,
|
||||
modelIds: ["anthropic.claude-3-sonnet-20240229-v1:0"],
|
||||
inferenceProfileIds: [],
|
||||
["aws-claudeTokens"]: 0,
|
||||
["aws-claude-opusTokens"]: 0,
|
||||
["aws-mistral-tinyTokens"]: 0,
|
||||
["aws-mistral-smallTokens"]: 0,
|
||||
["aws-mistral-mediumTokens"]: 0,
|
||||
["aws-mistral-largeTokens"]: 0,
|
||||
tokenUsage: {}, // Initialize new tokenUsage field
|
||||
};
|
||||
this.keys.push(newKey);
|
||||
}
|
||||
@@ -104,6 +97,15 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
|
||||
// Claude 2 is the only model that breaks this convention; Anthropic calls
|
||||
// it claude-2 but AWS calls it claude-v2.
|
||||
if (model.includes("claude-2")) neededVariantId = "claude-v2";
|
||||
|
||||
// For Claude models, try to resolve aliases to AWS model IDs
|
||||
if (model.includes("claude") && !model.includes("anthropic.")) {
|
||||
const claudeMapping = findByAnthropicId(model);
|
||||
if (claudeMapping) {
|
||||
neededVariantId = claudeMapping.awsId;
|
||||
}
|
||||
}
|
||||
|
||||
const neededFamily = getAwsBedrockModelFamily(model);
|
||||
|
||||
const availableKeys = this.keys.filter((k) => {
|
||||
@@ -173,11 +175,22 @@ export class AwsBedrockKeyProvider implements KeyProvider<AwsBedrockKey> {
|
||||
return this.keys.filter((k) => !k.isDisabled).length;
|
||||
}
|
||||
|
||||
public incrementUsage(hash: string, model: string, tokens: number) {
|
||||
const key = this.keys.find((k) => k.hash === hash);
|
||||
public incrementUsage(keyHash: string, modelFamily: AwsBedrockModelFamily, usage: { input: number; output: number }) {
|
||||
const key = this.keys.find((k) => k.hash === keyHash);
|
||||
if (!key) return;
|
||||
|
||||
key.promptCount++;
|
||||
key[`${getAwsBedrockModelFamily(model)}Tokens`] += tokens;
|
||||
|
||||
if (!key.tokenUsage) {
|
||||
key.tokenUsage = {};
|
||||
}
|
||||
if (!key.tokenUsage[modelFamily]) {
|
||||
key.tokenUsage[modelFamily] = { input: 0, output: 0 };
|
||||
}
|
||||
|
||||
const currentFamilyUsage = key.tokenUsage[modelFamily]!;
|
||||
currentFamilyUsage.input += usage.input;
|
||||
currentFamilyUsage.output += usage.output;
|
||||
}
|
||||
|
||||
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user