Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8d5059534a |
+51
-79
@@ -8,9 +8,6 @@
|
||||
# Use production mode unless you are developing locally.
|
||||
NODE_ENV=production
|
||||
|
||||
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
|
||||
# LOG_LEVEL=info
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# General settings:
|
||||
|
||||
@@ -27,77 +24,36 @@ NODE_ENV=production
|
||||
|
||||
# Max number of context tokens a user can request at once.
|
||||
# Increase this if your proxy allow GPT 32k or 128k context
|
||||
# MAX_CONTEXT_TOKENS_OPENAI=32768
|
||||
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
|
||||
# MAX_CONTEXT_TOKENS_OPENAI=16384
|
||||
|
||||
# Max number of output tokens a user can request at once.
|
||||
# MAX_OUTPUT_TOKENS_OPENAI=1024
|
||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
|
||||
# MAX_OUTPUT_TOKENS_OPENAI=400
|
||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
|
||||
|
||||
# Whether to show the estimated cost of consumed tokens on the info page.
|
||||
# SHOW_TOKEN_COSTS=false
|
||||
|
||||
# Whether to automatically check API keys for validity.
|
||||
# Disabled by default in local development mode, but enabled in production.
|
||||
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
|
||||
# by default in production mode.
|
||||
# CHECK_KEYS=true
|
||||
|
||||
# Which model types users are allowed to access.
|
||||
# The following model families are recognized:
|
||||
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
|
||||
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
|
||||
# | mistral-small | mistral-medium | mistral-large | aws-claude |
|
||||
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
|
||||
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
|
||||
|
||||
# By default, all models are allowed except for dall-e and o1.
|
||||
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | aws-claude-opus | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-dall-e
|
||||
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
|
||||
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
|
||||
# 'azure-dall-e' to the list of allowed model families.
|
||||
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
|
||||
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
|
||||
|
||||
# Which services can be used to process prompts containing images via multimodal
|
||||
# models. The following services are recognized:
|
||||
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
|
||||
# openai | anthropic | aws | azure | google-ai | mistral-ai
|
||||
# Do not enable this feature unless all users are trusted, as you will be liable
|
||||
# for any user-submitted images containing illegal content.
|
||||
# By default, no image services are allowed and image prompts are rejected.
|
||||
# ALLOWED_VISION_SERVICES=
|
||||
|
||||
# Whether prompts should be logged to Google Sheets.
|
||||
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||
# PROMPT_LOGGING=false
|
||||
|
||||
# Specifies the number of proxies or load balancers in front of the server.
|
||||
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
||||
# For any other deployments, please see config.ts as the correct configuration
|
||||
# depends on your setup. Misconfiguring this value can result in problems
|
||||
# accurately tracking IP addresses and enforcing rate limits.
|
||||
# TRUSTED_PROXIES=1
|
||||
|
||||
# Whether cookies should be set without the Secure flag, for hosts that don't
|
||||
# support SSL. True by default in development, false in production.
|
||||
# USE_INSECURE_COOKIES=false
|
||||
|
||||
# Reorganizes requests in the queue according to their token count, placing
|
||||
# larger prompts further back. The penalty is determined by (promptTokens *
|
||||
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
|
||||
# When there is no queue or it is very short, the effect is negligible (this
|
||||
# setting only reorders the queue, it does not artificially delay requests).
|
||||
# TOKENS_PUNISHMENT_FACTOR=0.0
|
||||
|
||||
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
||||
# CAPTCHA_MODE=none
|
||||
# POW_TOKEN_HOURS=24
|
||||
# POW_TOKEN_MAX_IPS=2
|
||||
# POW_DIFFICULTY_LEVEL=low
|
||||
# POW_CHALLENGE_TIMEOUT=30
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Blocking settings:
|
||||
# Allows blocking requests depending on content, referers, or IP addresses.
|
||||
# This is a convenience feature; if you need more robust functionality it is
|
||||
# highly recommended to put this application behind nginx or Cloudflare, as they
|
||||
# will have better performance.
|
||||
|
||||
# IP addresses or CIDR blocks from which requests will be blocked.
|
||||
# IP_BLACKLIST=10.0.0.1/24
|
||||
# URLs from which requests will be blocked.
|
||||
@@ -106,13 +62,35 @@ NODE_ENV=production
|
||||
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
||||
# Destination to redirect blocked requests to.
|
||||
# BLOCK_REDIRECT="https://roblox.com/"
|
||||
# Comma-separated list of phrases that will be rejected. Surround phrases with
|
||||
# quotes if they contain commas. You can use regular expression tokens.
|
||||
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
|
||||
|
||||
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
|
||||
# Surround phrases with quotes if they contain commas.
|
||||
# Avoid short or common phrases as this tests the entire prompt.
|
||||
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
|
||||
# Message to show when requests are rejected.
|
||||
# REJECT_MESSAGE="You can't say that here."
|
||||
|
||||
# Whether prompts should be logged to Google Sheets.
|
||||
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||
# PROMPT_LOGGING=false
|
||||
|
||||
# The port and network interface to listen on.
|
||||
# PORT=7860
|
||||
# BIND_ADDRESS=0.0.0.0
|
||||
|
||||
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
|
||||
# USE_INSECURE_COOKIES=false
|
||||
|
||||
# Detail level of logging. (trace | debug | info | warn | error)
|
||||
# LOG_LEVEL=info
|
||||
|
||||
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
|
||||
# CAPTCHA_MODE=none
|
||||
# POW_TOKEN_HOURS=24
|
||||
# POW_TOKEN_MAX_IPS=2
|
||||
# POW_DIFFICULTY_LEVEL=low
|
||||
# POW_CHALLENGE_TIMEOUT=30
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Optional settings for user management, access control, and quota enforcement:
|
||||
# See `docs/user-management.md` for more information and setup instructions.
|
||||
@@ -132,8 +110,14 @@ NODE_ENV=production
|
||||
# ALLOW_NICKNAME_CHANGES=true
|
||||
|
||||
# Default token quotas for each model family. (0 for unlimited)
|
||||
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
|
||||
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
|
||||
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value, replacing dashes with underscores.
|
||||
# TOKEN_QUOTA_TURBO=0
|
||||
# TOKEN_QUOTA_GPT4=0
|
||||
# TOKEN_QUOTA_GPT4_32K=0
|
||||
# TOKEN_QUOTA_GPT4_TURBO=0
|
||||
# TOKEN_QUOTA_CLAUDE=0
|
||||
# TOKEN_QUOTA_GEMINI_PRO=0
|
||||
# TOKEN_QUOTA_AWS_CLAUDE=0
|
||||
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
|
||||
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
|
||||
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
|
||||
@@ -144,22 +128,12 @@ NODE_ENV=production
|
||||
# Leave unset to never automatically refresh quotas.
|
||||
# QUOTA_REFRESH_PERIOD=daily
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# HTTP agent settings:
|
||||
# If you need to change how the proxy makes requests to other servers, such
|
||||
# as when checking keys or forwarding users' requests to external services,
|
||||
# you can configure an alternative HTTP agent. Otherwise the default OS settings
|
||||
# will be used.
|
||||
|
||||
# The name of the network interface to use. The first external IPv4 address
|
||||
# belonging to this interface will be used for outgoing requests.
|
||||
# HTTP_AGENT_INTERFACE=enp0s3
|
||||
|
||||
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
|
||||
# Note that if your proxy server issues a self-signed certificate, you may need
|
||||
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
|
||||
# that variable in your environment, not in this file.
|
||||
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
|
||||
# Specifies the number of proxies or load balancers in front of the server.
|
||||
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
|
||||
# For any other deployments, please see config.ts as the correct configuration
|
||||
# depends on your setup. Misconfiguring this value can result in problems
|
||||
# accurately tracking IP addresses and enforcing rate limits.
|
||||
# TRUSTED_PROXIES=1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Secrets and keys:
|
||||
@@ -168,25 +142,23 @@ NODE_ENV=production
|
||||
|
||||
# You can add multiple API keys by separating them with a comma.
|
||||
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
|
||||
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
|
||||
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
|
||||
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
|
||||
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
|
||||
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
|
||||
GCP_CREDENTIALS=project-id:client-email:region:private-key
|
||||
|
||||
# With proxy_key gatekeeper, the password users must provide to access the API.
|
||||
# PROXY_KEY=your-secret-key
|
||||
|
||||
# With user_token gatekeeper, the admin password used to manage users.
|
||||
# ADMIN_KEY=your-very-secret-key
|
||||
# Restrict access to the admin interface to specific IP addresses, specified
|
||||
# as a comma-separated list of CIDR ranges.
|
||||
# To restrict access to the admin interface to specific IP addresses, set the
|
||||
# ADMIN_WHITELIST environment variable to a comma-separated list of CIDR blocks.
|
||||
# ADMIN_WHITELIST=0.0.0.0/0
|
||||
|
||||
|
||||
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
|
||||
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
# OAI Reverse Proxy
|
||||
|
||||
Reverse proxy server for various LLM APIs.
|
||||
|
||||
### Table of Contents
|
||||
<!-- TOC -->
|
||||
* [OAI Reverse Proxy](#oai-reverse-proxy)
|
||||
* [Table of Contents](#table-of-contents)
|
||||
* [What is this?](#what-is-this)
|
||||
* [Features](#features)
|
||||
* [Usage Instructions](#usage-instructions)
|
||||
* [Personal Use (single-user)](#personal-use-single-user)
|
||||
* [Updating](#updating)
|
||||
* [Local Development](#local-development)
|
||||
* [Self-hosting](#self-hosting)
|
||||
* [Building](#building)
|
||||
* [Forking](#forking)
|
||||
<!-- TOC -->
|
||||
- [What is this?](#what-is-this)
|
||||
- [Features](#features)
|
||||
- [Usage Instructions](#usage-instructions)
|
||||
- [Self-hosting](#self-hosting)
|
||||
- [Alternatives](#alternatives)
|
||||
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
|
||||
- [Render (outdated, not advised)](#render-outdated-not-advised)
|
||||
- [Local Development](#local-development)
|
||||
|
||||
## What is this?
|
||||
This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
@@ -24,7 +20,6 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
- [x] [OpenAI](https://openai.com/)
|
||||
- [x] [Anthropic](https://www.anthropic.com/)
|
||||
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
||||
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
|
||||
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
|
||||
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
|
||||
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
|
||||
@@ -33,40 +28,43 @@ This project allows you to run a reverse proxy server for various LLM APIs.
|
||||
- [x] Simple role-based permissions
|
||||
- [x] Per-model token quotas
|
||||
- [x] Temporary user accounts
|
||||
- [x] Event audit logging
|
||||
- [x] Optional full logging of prompts and completions
|
||||
- [x] Prompt and completion logging
|
||||
- [x] Abuse detection and prevention
|
||||
- [x] IP address and user token model invocation rate limits
|
||||
- [x] IP blacklists
|
||||
- [x] Proof-of-work challenge for access by anonymous users
|
||||
|
||||
---
|
||||
|
||||
## Usage Instructions
|
||||
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
|
||||
|
||||
### Personal Use (single-user)
|
||||
If you just want to run the proxy server to use yourself without hosting it for others:
|
||||
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
|
||||
2. Clone this repository
|
||||
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
||||
4. Install dependencies with `npm install`
|
||||
5. Run `npm run build`
|
||||
6. Run `npm start`
|
||||
|
||||
#### Updating
|
||||
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
|
||||
|
||||
#### Local Development
|
||||
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
|
||||
|
||||
### Self-hosting
|
||||
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
|
||||
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
|
||||
|
||||
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
|
||||
|
||||
### Alternatives
|
||||
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
|
||||
|
||||
### Huggingface (outdated, not advised)
|
||||
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
|
||||
|
||||
### Render (outdated, not advised)
|
||||
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
|
||||
|
||||
## Local Development
|
||||
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
|
||||
|
||||
1. Clone the repo
|
||||
2. Install dependencies with `npm install`
|
||||
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
|
||||
4. Start the server in development mode with `npm run start:dev`.
|
||||
|
||||
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
|
||||
|
||||
## Building
|
||||
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
|
||||
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
|
||||
|
||||
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
|
||||
|
||||
## Forking
|
||||
|
||||
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
|
||||
|
||||
@@ -17,8 +17,9 @@ ARG GREETING_URL
|
||||
RUN if [ -n "$GREETING_URL" ]; then \
|
||||
curl -sL "$GREETING_URL" > greeting.md; \
|
||||
fi
|
||||
COPY . .
|
||||
COPY package*.json greeting.md* ./
|
||||
RUN npm install
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
|
||||
EXPOSE 10000
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Deploy to Render.com
|
||||
|
||||
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
||||
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
|
||||
|
||||
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# Configuring the proxy for Vertex AI (GCP)
|
||||
|
||||
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
|
||||
|
||||
- [Setting keys](#setting-keys)
|
||||
- [Setup Vertex AI](#setup-vertex-ai)
|
||||
- [Supported model IDs](#supported-model-ids)
|
||||
|
||||
## Setting keys
|
||||
|
||||
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
|
||||
|
||||
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
|
||||
```
|
||||
|
||||
## Setup Vertex AI
|
||||
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
|
||||
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
|
||||
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
|
||||
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
|
||||
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
|
||||
6. The required credential is in the JSON file you just downloaded.
|
||||
|
||||
## Supported model IDs
|
||||
Users can send these model IDs to the proxy to invoke the corresponding models.
|
||||
- **Claude**
|
||||
- `claude-3-haiku@20240307`
|
||||
- `claude-3-sonnet@20240229`
|
||||
- `claude-3-opus@20240229`
|
||||
- `claude-3-5-sonnet@20240620`
|
||||
+1
-1
@@ -129,7 +129,7 @@ also significantly reduce hash rates on mobile devices.
|
||||
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
|
||||
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
|
||||
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
|
||||
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
|
||||
- iPhone 13 Pro (Safari): 4.0 - 4.6 H/s
|
||||
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
|
||||
- This is a 2019 phone almost matching an iPhone five years newer because of
|
||||
bad Safari performance.
|
||||
|
||||
Generated
+262
-1173
File diff suppressed because it is too large
Load Diff
+10
-17
@@ -5,11 +5,10 @@
|
||||
"scripts": {
|
||||
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
|
||||
"database:migrate": "ts-node scripts/migrate.ts",
|
||||
"postinstall": "patch-package",
|
||||
"prepare": "husky install",
|
||||
"start": "node --trace-deprecation --trace-warnings build/server.js",
|
||||
"start": "node build/server.js",
|
||||
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
||||
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
|
||||
"start:replit": "tsc && node build/server.js",
|
||||
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
||||
"type-check": "tsc --noEmit"
|
||||
},
|
||||
@@ -21,14 +20,14 @@
|
||||
"dependencies": {
|
||||
"@anthropic-ai/tokenizer": "^0.0.4",
|
||||
"@aws-crypto/sha256-js": "^5.2.0",
|
||||
"@huggingface/jinja": "^0.3.0",
|
||||
"@node-rs/argon2": "^1.8.3",
|
||||
"@smithy/eventstream-codec": "^2.1.3",
|
||||
"@smithy/eventstream-serde-node": "^2.1.3",
|
||||
"@smithy/protocol-http": "^3.2.1",
|
||||
"@smithy/signature-v4": "^2.1.3",
|
||||
"@smithy/types": "^2.10.1",
|
||||
"@smithy/util-utf8": "^2.1.1",
|
||||
"axios": "^1.7.4",
|
||||
"axios": "^1.3.5",
|
||||
"better-sqlite3": "^10.0.0",
|
||||
"check-disk-space": "^3.4.0",
|
||||
"cookie-parser": "^1.4.6",
|
||||
@@ -37,35 +36,30 @@
|
||||
"csrf-csrf": "^2.3.0",
|
||||
"dotenv": "^16.3.1",
|
||||
"ejs": "^3.1.10",
|
||||
"express": "^4.19.3",
|
||||
"express": "^4.18.2",
|
||||
"express-session": "^1.17.3",
|
||||
"firebase-admin": "^12.5.0",
|
||||
"firebase-admin": "^12.1.0",
|
||||
"glob": "^10.3.12",
|
||||
"googleapis": "^122.0.0",
|
||||
"http-proxy": "1.18.1",
|
||||
"http-proxy-middleware": "^3.0.2",
|
||||
"http-proxy-middleware": "^3.0.0-beta.1",
|
||||
"ipaddr.js": "^2.1.0",
|
||||
"memorystore": "^1.6.7",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"node-schedule": "^2.1.1",
|
||||
"patch-package": "^8.0.0",
|
||||
"pino": "^8.11.0",
|
||||
"pino-http": "^8.3.3",
|
||||
"proxy-agent": "^6.4.0",
|
||||
"sanitize-html": "^2.13.0",
|
||||
"sanitize-html": "2.12.1",
|
||||
"sharp": "^0.32.6",
|
||||
"showdown": "^2.1.0",
|
||||
"source-map-support": "^0.5.21",
|
||||
"stream-json": "^1.8.0",
|
||||
"tiktoken": "^1.0.10",
|
||||
"tinyws": "^0.1.0",
|
||||
"uuid": "^9.0.0",
|
||||
"zlib": "^1.0.5",
|
||||
"zod": "^3.22.3",
|
||||
"zod-error": "^1.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@smithy/types": "^3.3.0",
|
||||
"@types/better-sqlite3": "^7.6.10",
|
||||
"@types/cookie-parser": "^1.4.3",
|
||||
"@types/cors": "^2.8.13",
|
||||
@@ -89,8 +83,7 @@
|
||||
"typescript": "^5.4.2"
|
||||
},
|
||||
"overrides": {
|
||||
"node-fetch@2.x": {
|
||||
"whatwg-url": "14.x"
|
||||
}
|
||||
"postcss": "^8.4.31",
|
||||
"follow-redirects": "^1.15.4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patches
|
||||
Contains monkey patches for certain packages, applied using `patch-package`.
|
||||
|
||||
## `http-proxy+1.18.1.patch`
|
||||
Modifies the `http-proxy` package to work around an incompatibility with
|
||||
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
|
||||
when `socks-proxy-agent` is used instead of a generic http.Agent.
|
||||
|
||||
Modification involves adjusting the `buffer` property on ProxyServer's `options`
|
||||
object to be a function that returns a stream instead of a stream itself. This
|
||||
allows us to give it a function which produces a new Readable from the already-
|
||||
parsed request body.
|
||||
|
||||
With the old implementation we would need to create an entirely new ProxyServer
|
||||
instance for each request, which is not ideal under heavy load.
|
||||
|
||||
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
|
||||
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
|
||||
|
||||
### See also
|
||||
https://github.com/chimurai/http-proxy-middleware/issues/40
|
||||
https://github.com/chimurai/http-proxy-middleware/issues/299
|
||||
https://github.com/http-party/node-http-proxy/pull/1027
|
||||
@@ -1,13 +0,0 @@
|
||||
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
index 7ae7355..c825c27 100644
|
||||
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
|
||||
@@ -167,7 +167,7 @@ module.exports = {
|
||||
}
|
||||
}
|
||||
|
||||
- (options.buffer || req).pipe(proxyReq);
|
||||
+ (options.buffer(req) || req).pipe(proxyReq);
|
||||
|
||||
proxyReq.on('response', function(proxyRes) {
|
||||
if(server) { server.emit('proxyRes', proxyRes, req, res); }
|
||||
@@ -30,6 +30,7 @@ self.onmessage = async (event) => {
|
||||
nonce = data.nonce;
|
||||
|
||||
const c = data.challenge;
|
||||
// decode salt to Uint8Array
|
||||
const salt = new Uint8Array(c.s.length / 2);
|
||||
for (let i = 0; i < c.s.length; i += 2) {
|
||||
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
|
||||
@@ -98,7 +99,7 @@ const solve = async () => {
|
||||
self.postMessage({ type: "solved", nonce: solution.nonce });
|
||||
active = false;
|
||||
} else {
|
||||
if (Date.now() - lastNotify >= 500) {
|
||||
if (Date.now() - lastNotify > 1000) {
|
||||
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
|
||||
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
|
||||
lastNotify = Date.now();
|
||||
|
||||
@@ -230,39 +230,6 @@ Content-Type: application/json
|
||||
]
|
||||
}
|
||||
|
||||
###
|
||||
# @name Proxy / GCP Claude -- Native Completion
|
||||
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
|
||||
Authorization: Bearer {{proxy-key}}
|
||||
anthropic-version: 2023-01-01
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"model": "claude-v2",
|
||||
"max_tokens_to_sample": 10,
|
||||
"temperature": 0,
|
||||
"stream": true,
|
||||
"prompt": "What is genshin impact\n\n:Assistant:"
|
||||
}
|
||||
|
||||
###
|
||||
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
|
||||
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
|
||||
Authorization: Bearer {{proxy-key}}
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"model": "gpt-3.5-turbo",
|
||||
"max_tokens": 50,
|
||||
"stream": true,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is genshin impact?"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
###
|
||||
# @name Proxy / Azure OpenAI -- Native Chat Completions
|
||||
POST {{proxy-host}}/proxy/azure/openai/chat/completions
|
||||
|
||||
@@ -51,8 +51,6 @@ function getRandomModelFamily() {
|
||||
"mistral-large",
|
||||
"aws-claude",
|
||||
"aws-claude-opus",
|
||||
"gcp-claude",
|
||||
"gcp-claude-opus",
|
||||
"azure-turbo",
|
||||
"azure-gpt4",
|
||||
"azure-gpt4-32k",
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
|
||||
import axios from "axios";
|
||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||
import { SignatureV4 } from "@smithy/signature-v4";
|
||||
import { HttpRequest } from "@smithy/protocol-http";
|
||||
|
||||
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
|
||||
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
|
||||
|
||||
// Copied from amazon bedrock docs
|
||||
|
||||
// List models
|
||||
// ListFoundationModels
|
||||
// Service: Amazon Bedrock
|
||||
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
|
||||
// Bedrock User Guide.
|
||||
// Request Syntax
|
||||
// GET /foundation-models?
|
||||
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
|
||||
// HTTP/1.1
|
||||
// URI Request Parameters
|
||||
// The request uses the following URI parameters.
|
||||
// byCustomizationType (p. 38)
|
||||
// List by customization type.
|
||||
// Valid Values: FINE_TUNING
|
||||
// byInferenceType (p. 38)
|
||||
// List by inference type.
|
||||
// Valid Values: ON_DEMAND | PROVISIONED
|
||||
// byOutputModality (p. 38)
|
||||
// List by output modality type.
|
||||
// Valid Values: TEXT | IMAGE | EMBEDDING
|
||||
// byProvider (p. 38)
|
||||
// A Bedrock model provider.
|
||||
// Pattern: ^[a-z0-9-]{1,63}$
|
||||
// Request Body
|
||||
// The request does not have a request body
|
||||
|
||||
// Run inference on a text model
|
||||
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
|
||||
// parameter to accept any content type in the response.
|
||||
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
|
||||
// -H accept: */*
|
||||
// -H content-type: application/json
|
||||
// Payload
|
||||
// {"inputText": "Hello world"}
|
||||
// Example response
|
||||
// Response for the above request.
|
||||
// -H content-type: application/json
|
||||
// Payload
|
||||
// <the model response>
|
||||
|
||||
const AMZ_REGION = "us-east-1";
|
||||
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
|
||||
|
||||
async function listModels() {
|
||||
const httpRequest = new HttpRequest({
|
||||
method: "GET",
|
||||
protocol: "https:",
|
||||
hostname: AMZ_HOST,
|
||||
path: "/foundation-models",
|
||||
headers: { ["Host"]: AMZ_HOST },
|
||||
});
|
||||
|
||||
const signedRequest = await signRequest(httpRequest);
|
||||
const response = await axios.get(
|
||||
`https://${signedRequest.hostname}${signedRequest.path}`,
|
||||
{ headers: signedRequest.headers }
|
||||
);
|
||||
console.log(response.data);
|
||||
}
|
||||
|
||||
async function invokeModel() {
|
||||
const model = "anthropic.claude-v1";
|
||||
const httpRequest = new HttpRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: AMZ_HOST,
|
||||
path: `/model/${model}/invoke`,
|
||||
headers: {
|
||||
["Host"]: AMZ_HOST,
|
||||
["accept"]: "*/*",
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
temperature: 0.5,
|
||||
prompt: "\n\nHuman:Hello world\n\nAssistant:",
|
||||
max_tokens_to_sample: 10,
|
||||
}),
|
||||
});
|
||||
console.log("httpRequest", httpRequest);
|
||||
|
||||
const signedRequest = await signRequest(httpRequest);
|
||||
const response = await axios.post(
|
||||
`https://${signedRequest.hostname}${signedRequest.path}`,
|
||||
signedRequest.body,
|
||||
{ headers: signedRequest.headers }
|
||||
);
|
||||
console.log(response.status);
|
||||
console.log(response.headers);
|
||||
console.log(response.data);
|
||||
console.log("full url", response.request.res.responseUrl);
|
||||
}
|
||||
|
||||
async function signRequest(request: HttpRequest) {
|
||||
const signer = new SignatureV4({
|
||||
sha256: Sha256,
|
||||
credentials: {
|
||||
accessKeyId: AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
||||
},
|
||||
region: AMZ_REGION,
|
||||
service: "bedrock",
|
||||
});
|
||||
return await signer.sign(request, { signingDate: new Date() });
|
||||
}
|
||||
|
||||
// listModels();
|
||||
// invokeModel();
|
||||
@@ -1,53 +0,0 @@
|
||||
const axios = require("axios");
|
||||
|
||||
function randomInteger(max) {
|
||||
return Math.floor(Math.random() * max + 1);
|
||||
}
|
||||
|
||||
async function testQueue() {
|
||||
const requests = Array(10).fill(undefined).map(async function() {
|
||||
const maxTokens = randomInteger(2000);
|
||||
|
||||
const headers = {
|
||||
"Authorization": "Bearer test",
|
||||
"Content-Type": "application/json",
|
||||
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
|
||||
};
|
||||
|
||||
const payload = {
|
||||
model: "gpt-4o-mini-2024-07-18",
|
||||
max_tokens: 20 + maxTokens,
|
||||
stream: false,
|
||||
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
|
||||
temperature: 0,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await axios.post(
|
||||
"http://localhost:7860/proxy/openai/v1/chat/completions",
|
||||
payload,
|
||||
{ headers }
|
||||
);
|
||||
|
||||
if (response.status !== 200) {
|
||||
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
|
||||
return;
|
||||
}
|
||||
|
||||
const content = response.data.choices[0].message.content;
|
||||
|
||||
console.log(
|
||||
`Request ${maxTokens} `,
|
||||
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
|
||||
);
|
||||
} catch (error) {
|
||||
const msg = error.response;
|
||||
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(requests);
|
||||
console.log("All requests finished");
|
||||
}
|
||||
|
||||
testQueue();
|
||||
+16
-19
@@ -17,7 +17,7 @@ import {
|
||||
} from "../../shared/users/schema";
|
||||
import { getLastNImages } from "../../shared/file-storage/image-history";
|
||||
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
|
||||
import { invalidatePowChallenges } from "../../user/web/pow-captcha";
|
||||
import { invalidatePowHmacKey } from "../../user/web/pow-captcha";
|
||||
|
||||
const router = Router();
|
||||
|
||||
@@ -268,14 +268,7 @@ router.post("/maintenance", (req, res) => {
|
||||
let flash = { type: "", message: "" };
|
||||
switch (action) {
|
||||
case "recheck": {
|
||||
const checkable: LLMService[] = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"aws",
|
||||
"gcp",
|
||||
"azure",
|
||||
"google-ai"
|
||||
];
|
||||
const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
|
||||
checkable.forEach((s) => keyPool.recheck(s));
|
||||
const keyCount = keyPool
|
||||
.list()
|
||||
@@ -324,7 +317,7 @@ router.post("/maintenance", (req, res) => {
|
||||
user.disabledReason = "Admin forced expiration.";
|
||||
userStore.upsertUser(user);
|
||||
});
|
||||
invalidatePowChallenges();
|
||||
invalidatePowHmacKey();
|
||||
flash.type = "success";
|
||||
flash.message = `${temps.length} temporary users marked for expiration.`;
|
||||
break;
|
||||
@@ -345,20 +338,24 @@ router.post("/maintenance", (req, res) => {
|
||||
case "setDifficulty": {
|
||||
const selected = req.body["pow-difficulty"];
|
||||
const valid = ["low", "medium", "high", "extreme"];
|
||||
const isNumber = Number.isInteger(Number(selected));
|
||||
if (!selected || !valid.includes(selected) && !isNumber) {
|
||||
throw new HttpError(400, "Invalid difficulty " + selected);
|
||||
if (!selected || !valid.includes(selected)) {
|
||||
throw new HttpError(400, "Invalid difficulty" + selected);
|
||||
}
|
||||
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
|
||||
invalidatePowChallenges();
|
||||
config.powDifficultyLevel = selected;
|
||||
break;
|
||||
}
|
||||
case "generateTempIpReport": {
|
||||
const tempUsers = userStore
|
||||
.getUsers()
|
||||
.filter((u) => u.type === "temporary");
|
||||
const ipv4RangeMap = new Map<string, Set<string>>();
|
||||
const ipv6RangeMap = new Map<string, Set<string>>();
|
||||
const ipv4RangeMap: Map<string, Set<string>> = new Map<
|
||||
string,
|
||||
Set<string>
|
||||
>();
|
||||
const ipv6RangeMap: Map<string, Set<string>> = new Map<
|
||||
string,
|
||||
Set<string>
|
||||
>();
|
||||
|
||||
tempUsers.forEach((u) => {
|
||||
u.ip.forEach((ip) => {
|
||||
@@ -368,14 +365,14 @@ router.post("/maintenance", (req, res) => {
|
||||
const subnet =
|
||||
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
|
||||
".0/24";
|
||||
const userSet = ipv4RangeMap.get(subnet) || new Set();
|
||||
const userSet = ipv4RangeMap.get(subnet) || new Set<string>();
|
||||
userSet.add(u.token);
|
||||
ipv4RangeMap.set(subnet, userSet);
|
||||
} else if (parsed.kind() === "ipv6") {
|
||||
const subnet =
|
||||
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
|
||||
"::/48";
|
||||
const userSet = ipv6RangeMap.get(subnet) || new Set();
|
||||
const userSet = ipv6RangeMap.get(subnet) || new Set<string>();
|
||||
userSet.add(u.token);
|
||||
ipv6RangeMap.set(subnet, userSet);
|
||||
}
|
||||
|
||||
@@ -38,20 +38,15 @@
|
||||
<h3>Difficulty Level</h3>
|
||||
<div>
|
||||
<label for="difficulty">Difficulty Level:</label>
|
||||
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
|
||||
<span id="currentDifficulty">Current: <%= difficulty %></span>
|
||||
<select name="difficulty" id="difficulty">
|
||||
<option value="low">Low</option>
|
||||
<option value="medium">Medium</option>
|
||||
<option value="high">High</option>
|
||||
<option value="extreme">Extreme</option>
|
||||
<option value="custom">Custom</option>
|
||||
</select>
|
||||
<div id="custom-difficulty-container" style="display: none">
|
||||
<label for="customDifficulty">Hashes required (average):</label>
|
||||
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
|
||||
</div>
|
||||
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
|
||||
</div>
|
||||
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
|
||||
<% } %>
|
||||
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
|
||||
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||
@@ -68,14 +63,14 @@
|
||||
<div>
|
||||
<h2>IP Whitelists and Blacklists</h2>
|
||||
<p>
|
||||
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
|
||||
addresses or
|
||||
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
|
||||
supported but not recommended for use with the current version of the proxy.
|
||||
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Note that changes here are not
|
||||
persisted across server restarts. If you want to make changes permanent, you can copy the values to your deployment
|
||||
configuration.
|
||||
</p>
|
||||
<p>
|
||||
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
|
||||
you can copy the values to your deployment configuration.
|
||||
Entries can be specified as single addresses or
|
||||
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
|
||||
supported but not recommended for use with the current version of the proxy.
|
||||
</p>
|
||||
<% for (let i = 0; i < whitelists.length; i++) { %>
|
||||
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
|
||||
@@ -104,25 +99,10 @@
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function difficultyChanged(event) {
|
||||
const value = event.target.value;
|
||||
if (value === "custom") {
|
||||
document.getElementById("custom-difficulty-container").style.display = "block";
|
||||
} else {
|
||||
document.getElementById("custom-difficulty-container").style.display = "none";
|
||||
}
|
||||
}
|
||||
|
||||
function doAction(action) {
|
||||
document.getElementById("hiddenAction").value = action;
|
||||
if (action === "setDifficulty") {
|
||||
const selected = document.getElementById("difficulty").value;
|
||||
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
|
||||
if (selected === "custom") {
|
||||
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
|
||||
} else {
|
||||
hiddenDifficulty.value = selected;
|
||||
}
|
||||
document.getElementById("hiddenDifficulty").value = document.getElementById("difficulty").value;
|
||||
}
|
||||
document.getElementById("maintenanceForm").submit();
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
<legend>Bulk Quota Management</legend>
|
||||
<p>
|
||||
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
|
||||
Immediately refreshes all users' quotas by the configured amounts.
|
||||
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
|
||||
</p>
|
||||
<p>
|
||||
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
|
||||
|
||||
@@ -101,10 +101,6 @@
|
||||
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
|
||||
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
|
||||
<% }); %>
|
||||
<!-- tokenRefresh_ keys are dynamically generated -->
|
||||
<% Object.entries(quota).forEach(([family]) => { %>
|
||||
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
|
||||
<% }); %>
|
||||
</form>
|
||||
|
||||
<h3>Quota Information</h3>
|
||||
@@ -115,7 +111,7 @@
|
||||
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
|
||||
</form>
|
||||
<% } %>
|
||||
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
|
||||
<%- include("partials/shared_quota-info", { quota, user }) %>
|
||||
|
||||
<p><a href="/admin/manage/list-users">Back to User List</a></p>
|
||||
|
||||
@@ -126,25 +122,18 @@
|
||||
const token = a.dataset.token;
|
||||
const field = a.dataset.field;
|
||||
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
|
||||
|
||||
let value = prompt(`Enter new value for '${field}':`, existingValue);
|
||||
let value = prompt(`Enter new value for '${field}'':`, existingValue);
|
||||
if (value !== null) {
|
||||
if (value === "") {
|
||||
value = null;
|
||||
}
|
||||
|
||||
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
|
||||
if (field.startsWith("tokenRefresh_")) {
|
||||
const family = field.slice("tokenRefresh_".length);
|
||||
payload.tokenRefresh = { [family]: Number(value) };
|
||||
} else {
|
||||
payload[field] = value;
|
||||
}
|
||||
|
||||
fetch(`/admin/manage/edit-user/${token}`, {
|
||||
method: "POST",
|
||||
credentials: "same-origin",
|
||||
body: JSON.stringify(payload),
|
||||
body: JSON.stringify({
|
||||
[field]: value,
|
||||
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
|
||||
}),
|
||||
headers: { "Content-Type": "application/json", Accept: "application/json" },
|
||||
})
|
||||
.then((res) => Promise.all([res.ok, res.json()]))
|
||||
@@ -152,7 +141,9 @@
|
||||
const url = new URL(window.location.href);
|
||||
const params = new URLSearchParams();
|
||||
if (!ok) {
|
||||
alert(`Failed to edit user: ${json.message}`);
|
||||
params.set("flash", `error: ${json.error.message}`);
|
||||
} else {
|
||||
params.set("flash", `success: User's ${field} updated.`);
|
||||
}
|
||||
url.search = params.toString();
|
||||
window.location.assign(url);
|
||||
|
||||
+56
-83
@@ -45,13 +45,6 @@ type Config = {
|
||||
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
|
||||
*/
|
||||
awsCredentials?: string;
|
||||
/**
|
||||
* Comma-delimited list of GCP credentials. Each credential item should be a
|
||||
* colon-delimited list of access key, secret key, and GCP region.
|
||||
*
|
||||
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
|
||||
*/
|
||||
gcpCredentials?: string;
|
||||
/**
|
||||
* Comma-delimited list of Azure OpenAI credentials. Each credential item
|
||||
* should be a colon-delimited list of Azure resource name, deployment ID, and
|
||||
@@ -356,7 +349,7 @@ type Config = {
|
||||
*
|
||||
* Defaults to no services, meaning image prompts are disabled. Use a comma-
|
||||
* separated list. Available services are:
|
||||
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure
|
||||
* openai,anthropic,google-ai,mistral-ai,aws,azure
|
||||
*/
|
||||
allowedVisionServices: LLMService[];
|
||||
/**
|
||||
@@ -378,43 +371,6 @@ type Config = {
|
||||
* Takes precedence over the adminWhitelist.
|
||||
*/
|
||||
ipBlacklist: string[];
|
||||
/**
|
||||
* If set, pushes requests further back into the queue according to their
|
||||
* token costs by factor*tokens*milliseconds (or more intuitively
|
||||
* factor*thousands_of_tokens*seconds).
|
||||
* Accepts floats.
|
||||
*/
|
||||
tokensPunishmentFactor: number;
|
||||
/**
|
||||
* Configuration for HTTP requests made by the proxy to other servers, such
|
||||
* as when checking keys or forwarding users' requests to external services.
|
||||
* If not set, all requests will be made using the default agent.
|
||||
*
|
||||
* If set, the proxy may make requests to other servers using the specified
|
||||
* settings. This is useful if you wish to route users' requests through
|
||||
* another proxy or VPN, or if you have multiple network interfaces and want
|
||||
* to use a specific one for outgoing requests.
|
||||
*/
|
||||
httpAgent?: {
|
||||
/**
|
||||
* The name of the network interface to use. The first external IPv4 address
|
||||
* belonging to this interface will be used for outgoing requests.
|
||||
*/
|
||||
interface?: string;
|
||||
/**
|
||||
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
|
||||
* HTTPS. If not set, the proxy will be made using the default agent.
|
||||
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
|
||||
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
|
||||
* - HTTP: `http://proxy-server-over-tcp.com:3128`
|
||||
* - HTTPS: `https://proxy-server-over-tls.com:3129`
|
||||
*
|
||||
* **Note:** If your proxy server issues a certificate, you may need to set
|
||||
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
|
||||
* application will reject TLS connections.
|
||||
*/
|
||||
proxyUrl?: string;
|
||||
};
|
||||
};
|
||||
|
||||
// To change configs, create a file called .env in the root directory.
|
||||
@@ -427,7 +383,6 @@ export const config: Config = {
|
||||
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
|
||||
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
|
||||
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
|
||||
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
|
||||
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
|
||||
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
||||
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
||||
@@ -452,23 +407,40 @@ export const config: Config = {
|
||||
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
|
||||
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
|
||||
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
|
||||
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 32768),
|
||||
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
|
||||
maxContextTokensAnthropic: getEnvWithDefault(
|
||||
"MAX_CONTEXT_TOKENS_ANTHROPIC",
|
||||
32768
|
||||
0
|
||||
),
|
||||
maxOutputTokensOpenAI: getEnvWithDefault(
|
||||
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
|
||||
1024
|
||||
400
|
||||
),
|
||||
maxOutputTokensAnthropic: getEnvWithDefault(
|
||||
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
|
||||
1024
|
||||
),
|
||||
allowedModelFamilies: getEnvWithDefault(
|
||||
"ALLOWED_MODEL_FAMILIES",
|
||||
getDefaultModelFamilies()
|
||||
400
|
||||
),
|
||||
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
|
||||
"turbo",
|
||||
"gpt4",
|
||||
"gpt4-32k",
|
||||
"gpt4-turbo",
|
||||
"gpt4o",
|
||||
"claude",
|
||||
"claude-opus",
|
||||
"gemini-pro",
|
||||
"mistral-tiny",
|
||||
"mistral-small",
|
||||
"mistral-medium",
|
||||
"mistral-large",
|
||||
"aws-claude",
|
||||
"aws-claude-opus",
|
||||
"azure-turbo",
|
||||
"azure-gpt4",
|
||||
"azure-gpt4-32k",
|
||||
"azure-gpt4-turbo",
|
||||
"azure-gpt4o",
|
||||
]),
|
||||
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
|
||||
rejectMessage: getEnvWithDefault(
|
||||
"REJECT_MESSAGE",
|
||||
@@ -520,11 +492,6 @@ export const config: Config = {
|
||||
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
|
||||
),
|
||||
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
|
||||
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
|
||||
httpAgent: {
|
||||
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
|
||||
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
|
||||
},
|
||||
} as const;
|
||||
|
||||
function generateSigningKey() {
|
||||
@@ -542,7 +509,6 @@ function generateSigningKey() {
|
||||
config.googleAIKey,
|
||||
config.mistralAIKey,
|
||||
config.awsCredentials,
|
||||
config.gcpCredentials,
|
||||
config.azureCredentials,
|
||||
];
|
||||
if (secrets.filter((s) => s).length === 0) {
|
||||
@@ -561,7 +527,7 @@ function generateSigningKey() {
|
||||
}
|
||||
|
||||
const signingKey = generateSigningKey();
|
||||
export const SECRET_SIGNING_KEY = signingKey;
|
||||
export const COOKIE_SECRET = signingKey;
|
||||
|
||||
export async function assertConfigIsValid() {
|
||||
if (process.env.MODEL_RATE_LIMIT !== undefined) {
|
||||
@@ -644,16 +610,6 @@ export async function assertConfigIsValid() {
|
||||
);
|
||||
}
|
||||
|
||||
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
|
||||
delete config.httpAgent;
|
||||
} else if (config.httpAgent) {
|
||||
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
|
||||
throw new Error(
|
||||
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure forks which add new secret-like config keys don't unwittingly expose
|
||||
// them to users.
|
||||
for (const key of getKeys(config)) {
|
||||
@@ -667,16 +623,15 @@ export async function assertConfigIsValid() {
|
||||
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
|
||||
);
|
||||
}
|
||||
|
||||
await maybeInitializeFirebase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Config keys that are masked on the info page, but not hidden as their
|
||||
* presence may be relevant to the user due to privacy implications.
|
||||
*/
|
||||
export const SENSITIVE_KEYS: (keyof Config)[] = [
|
||||
"googleSheetsSpreadsheetId",
|
||||
"httpAgent",
|
||||
];
|
||||
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
|
||||
|
||||
/**
|
||||
* Config keys that are not displayed on the info page at all, generally because
|
||||
@@ -691,7 +646,6 @@ export const OMITTED_KEYS = [
|
||||
"googleAIKey",
|
||||
"mistralAIKey",
|
||||
"awsCredentials",
|
||||
"gcpCredentials",
|
||||
"azureCredentials",
|
||||
"proxyKey",
|
||||
"adminKey",
|
||||
@@ -782,7 +736,6 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
||||
"ANTHROPIC_KEY",
|
||||
"GOOGLE_AI_KEY",
|
||||
"AWS_CREDENTIALS",
|
||||
"GCP_CREDENTIALS",
|
||||
"AZURE_CREDENTIALS",
|
||||
].includes(String(env))
|
||||
) {
|
||||
@@ -800,6 +753,32 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
||||
}
|
||||
}
|
||||
|
||||
let firebaseApp: firebase.app.App | undefined;
|
||||
|
||||
async function maybeInitializeFirebase() {
|
||||
if (!config.gatekeeperStore.startsWith("firebase")) {
|
||||
return;
|
||||
}
|
||||
|
||||
const firebase = await import("firebase-admin");
|
||||
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
|
||||
const app = firebase.initializeApp({
|
||||
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
|
||||
databaseURL: config.firebaseRtdbUrl,
|
||||
});
|
||||
|
||||
await app.database().ref("connection-test").set(Date.now());
|
||||
|
||||
firebaseApp = app;
|
||||
}
|
||||
|
||||
export function getFirebaseApp(): firebase.app.App {
|
||||
if (!firebaseApp) {
|
||||
throw new Error("Firebase app not initialized.");
|
||||
}
|
||||
return firebaseApp;
|
||||
}
|
||||
|
||||
function parseCsv(val: string): string[] {
|
||||
if (!val) return [];
|
||||
|
||||
@@ -807,9 +786,3 @@ function parseCsv(val: string): string[] {
|
||||
const matches = val.match(regex) || [];
|
||||
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
|
||||
}
|
||||
|
||||
function getDefaultModelFamilies(): ModelFamily[] {
|
||||
return MODEL_FAMILIES.filter(
|
||||
(f) => !f.includes("dall-e") && !f.includes("o1")
|
||||
) as ModelFamily[];
|
||||
}
|
||||
|
||||
+4
-16
@@ -12,44 +12,32 @@ import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
|
||||
|
||||
const INFO_PAGE_TTL = 2000;
|
||||
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
|
||||
turbo: "GPT-4o Mini / 3.5 Turbo",
|
||||
turbo: "GPT-3.5 Turbo",
|
||||
gpt4: "GPT-4",
|
||||
"gpt4-32k": "GPT-4 32k",
|
||||
"gpt4-turbo": "GPT-4 Turbo",
|
||||
gpt4o: "GPT-4o",
|
||||
o1: "OpenAI o1",
|
||||
"o1-mini": "OpenAI o1 mini",
|
||||
"dall-e": "DALL-E",
|
||||
claude: "Claude (Sonnet)",
|
||||
"claude-opus": "Claude (Opus)",
|
||||
"gemini-flash": "Gemini Flash",
|
||||
"gemini-pro": "Gemini Pro",
|
||||
"gemini-ultra": "Gemini Ultra",
|
||||
"mistral-tiny": "Mistral 7B",
|
||||
"mistral-small": "Mistral Nemo",
|
||||
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
|
||||
"mistral-medium": "Mistral Medium",
|
||||
"mistral-large": "Mistral Large",
|
||||
"aws-claude": "AWS Claude (Sonnet)",
|
||||
"aws-claude-opus": "AWS Claude (Opus)",
|
||||
"aws-mistral-tiny": "AWS Mistral 7B",
|
||||
"aws-mistral-small": "AWS Mistral Nemo",
|
||||
"aws-mistral-medium": "AWS Mistral Medium",
|
||||
"aws-mistral-large": "AWS Mistral Large",
|
||||
"gcp-claude": "GCP Claude (Sonnet)",
|
||||
"gcp-claude-opus": "GCP Claude (Opus)",
|
||||
"azure-turbo": "Azure GPT-3.5 Turbo",
|
||||
"azure-gpt4": "Azure GPT-4",
|
||||
"azure-gpt4-32k": "Azure GPT-4 32k",
|
||||
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
|
||||
"azure-gpt4o": "Azure GPT-4o",
|
||||
"azure-o1": "Azure o1",
|
||||
"azure-o1-mini": "Azure o1 mini",
|
||||
"azure-dall-e": "Azure DALL-E",
|
||||
};
|
||||
|
||||
const converter = new showdown.Converter();
|
||||
const customGreeting = fs.existsSync("greeting.md")
|
||||
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
|
||||
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
|
||||
: "";
|
||||
let infoPageHtml: string | undefined;
|
||||
let infoPageLastUpdated = 0;
|
||||
@@ -171,7 +159,7 @@ function getSelfServiceLinks() {
|
||||
}
|
||||
|
||||
return `<div class="self-service-links">${links
|
||||
.map(([text, link]) => `<a href="${link}">${text}</a>`)
|
||||
.map(([text, link]) => `<a target="_blank" href="${link}">${text}</a>`)
|
||||
.join(" | ")}</div>`;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
import { NextFunction, Request, Response } from "express";
|
||||
|
||||
export function addV1(req: Request, res: Response, next: NextFunction) {
|
||||
// Clients don't consistently use the /v1 prefix so we'll add it for them.
|
||||
if (!req.path.startsWith("/v1/") && !req.path.startsWith("/v1beta/")) {
|
||||
req.url = `/v1${req.url}`;
|
||||
}
|
||||
next();
|
||||
}
|
||||
+117
-62
@@ -1,14 +1,22 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { Request, Response, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
addAnthropicPreamble,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import {
|
||||
ProxyResHandlerWithBody,
|
||||
createOnProxyResHandler,
|
||||
} from "./middleware/response";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
@@ -36,13 +44,8 @@ const getModelsResponse = () => {
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-latest",
|
||||
];
|
||||
|
||||
const models = claudeVariants.map((id) => ({
|
||||
@@ -65,7 +68,8 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
/** Only used for non-streaming requests. */
|
||||
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
@@ -118,6 +122,12 @@ export function transformAnthropicChatResponseToAnthropicText(
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms a model response from the Anthropic API to match those from the
|
||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||
* is only used for non-streaming requests as streaming requests are handled
|
||||
* on-the-fly.
|
||||
*/
|
||||
function transformAnthropicTextResponseToOpenAI(
|
||||
anthropicBody: Record<string, any>,
|
||||
req: Request
|
||||
@@ -168,59 +178,40 @@ export function transformAnthropicChatResponseToOpenAI(
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
|
||||
* model, reassigns it to Sonnet.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
if (model.includes("claude")) return; // use whatever model the user requested
|
||||
req.body.model = "claude-3-5-sonnet-latest";
|
||||
}
|
||||
|
||||
/**
|
||||
* If client requests more than 4096 output tokens the request must have a
|
||||
* particular version header.
|
||||
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
|
||||
*/
|
||||
function setAnthropicBetaHeader(req: Request) {
|
||||
const { max_tokens_to_sample } = req.body;
|
||||
if (max_tokens_to_sample > 4096) {
|
||||
req.headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15";
|
||||
}
|
||||
}
|
||||
|
||||
function selectUpstreamPath(manager: ProxyReqManager) {
|
||||
const req = manager.request;
|
||||
const pathname = req.url.split("?")[0];
|
||||
req.log.debug({ pathname }, "Anthropic path filter");
|
||||
const isText = req.outboundApi === "anthropic-text";
|
||||
const isChat = req.outboundApi === "anthropic-chat";
|
||||
if (isChat && pathname === "/v1/complete") {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
if (isText && pathname === "/v1/chat/completions") {
|
||||
manager.setPath("/v1/complete");
|
||||
}
|
||||
if (isChat && pathname === "/v1/chat/completions") {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
||||
manager.setPath("/v1/messages");
|
||||
}
|
||||
}
|
||||
|
||||
const anthropicProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.anthropic.com",
|
||||
mutations: [selectUpstreamPath, addKey, finalizeBody],
|
||||
blockingResponseHandler: anthropicBlockingResponseHandler,
|
||||
const anthropicProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.anthropic.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
|
||||
}),
|
||||
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
// Abusing pathFilter to rewrite the paths dynamically.
|
||||
pathFilter: (pathname, req) => {
|
||||
const isText = req.outboundApi === "anthropic-text";
|
||||
const isChat = req.outboundApi === "anthropic-chat";
|
||||
if (isChat && pathname === "/v1/complete") {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
if (isText && pathname === "/v1/chat/completions") {
|
||||
req.url = "/v1/complete";
|
||||
}
|
||||
if (isChat && pathname === "/v1/chat/completions") {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
|
||||
req.url = "/v1/messages";
|
||||
}
|
||||
return true;
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
|
||||
{ afterTransform: [setAnthropicBetaHeader] }
|
||||
);
|
||||
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware({
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-text",
|
||||
@@ -276,7 +267,11 @@ anthropicRouter.get("/v1/models", handleModelRequest);
|
||||
anthropicRouter.post(
|
||||
"/v1/messages",
|
||||
ipLimiter,
|
||||
nativeAnthropicChatPreprocessor,
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "anthropic-chat",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
}),
|
||||
anthropicProxy
|
||||
);
|
||||
// Anthropic text completion endpoint. Translates to Anthropic chat completion
|
||||
@@ -296,5 +291,65 @@ anthropicRouter.post(
|
||||
preprocessOpenAICompatRequest,
|
||||
anthropicProxy
|
||||
);
|
||||
// Temporarily force Anthropic Text to Anthropic Chat for frontends which do not
|
||||
// yet support the new model. Forces claude-3. Will be removed once common
|
||||
// frontends have been updated.
|
||||
anthropicRouter.post(
|
||||
"/v1/:type(sonnet|opus)/:action(complete|messages)",
|
||||
ipLimiter,
|
||||
handleAnthropicTextCompatRequest,
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-chat",
|
||||
service: "anthropic",
|
||||
}),
|
||||
anthropicProxy
|
||||
);
|
||||
|
||||
function handleAnthropicTextCompatRequest(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: any
|
||||
) {
|
||||
const type = req.params.type;
|
||||
const action = req.params.action;
|
||||
const alreadyInChatFormat = Boolean(req.body.messages);
|
||||
const compatModel = `claude-3-${type}-20240229`;
|
||||
req.log.info(
|
||||
{ type, inputModel: req.body.model, compatModel, alreadyInChatFormat },
|
||||
"Handling Anthropic compatibility request"
|
||||
);
|
||||
|
||||
if (action === "messages" || alreadyInChatFormat) {
|
||||
return sendErrorToClient({
|
||||
req,
|
||||
res,
|
||||
options: {
|
||||
title: "Unnecessary usage of compatibility endpoint",
|
||||
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/anthropic\` proxy endpoint instead.`,
|
||||
format: "unknown",
|
||||
statusCode: 400,
|
||||
reqId: req.id,
|
||||
obj: {
|
||||
requested_endpoint: "/anthropic/" + type,
|
||||
correct_endpoint: "/anthropic",
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
req.body.model = compatModel;
|
||||
next();
|
||||
}
|
||||
|
||||
/**
|
||||
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
|
||||
* model, reassigns it to Claude 3 Sonnet.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
if (!model.startsWith("gpt-")) return;
|
||||
req.body.model = "claude-3-sonnet-20240229";
|
||||
}
|
||||
|
||||
export const anthropic = anthropicRouter;
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { v4 } from "uuid";
|
||||
import {
|
||||
transformAnthropicChatResponseToAnthropicText,
|
||||
transformAnthropicChatResponseToOpenAI,
|
||||
} from "./anthropic";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
signAwsRequest,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
||||
case "openai<-anthropic-text":
|
||||
req.log.info("Transforming Anthropic Text back to OpenAI format");
|
||||
newBody = transformAwsTextResponseToOpenAI(body, req);
|
||||
break;
|
||||
case "openai<-anthropic-chat":
|
||||
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
|
||||
newBody = transformAnthropicChatResponseToOpenAI(body);
|
||||
break;
|
||||
case "anthropic-text<-anthropic-chat":
|
||||
req.log.info("Transforming AWS Anthropic Chat back to Text format");
|
||||
newBody = transformAnthropicChatResponseToAnthropicText(body);
|
||||
break;
|
||||
}
|
||||
|
||||
// AWS does not always confirm the model in the response, so we have to add it
|
||||
if (!newBody.model && req.body.model) {
|
||||
newBody.model = req.body.model;
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
function transformAwsTextResponseToOpenAI(
|
||||
awsBody: Record<string, any>,
|
||||
req: Request
|
||||
): Record<string, any> {
|
||||
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||
return {
|
||||
id: "aws-" + v4(),
|
||||
object: "chat.completion",
|
||||
created: Date.now(),
|
||||
model: req.body.model,
|
||||
usage: {
|
||||
prompt_tokens: req.promptTokens,
|
||||
completion_tokens: req.outputTokens,
|
||||
total_tokens: totalTokens,
|
||||
},
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: awsBody.completion?.trim(),
|
||||
},
|
||||
finish_reason: awsBody.stop_reason,
|
||||
index: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
const awsClaudeProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signAwsRequest, finalizeSignedRequest],
|
||||
blockingResponseHandler: awsBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
const textToChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes text completion prompts to aws anthropic-chat if they need translation
|
||||
* (claude-3 based models do not support the old text completion endpoint).
|
||||
*/
|
||||
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
|
||||
if (req.body.model?.includes("claude-3")) {
|
||||
textToChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
nativeTextPreprocessor(req, res, next);
|
||||
}
|
||||
};
|
||||
|
||||
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||
* or the new Claude chat completion endpoint, based on the requested model.
|
||||
*/
|
||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||
if (req.body.model?.includes("claude-3")) {
|
||||
oaiToAwsChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
oaiToAwsTextPreprocessor(req, res, next);
|
||||
}
|
||||
};
|
||||
|
||||
const awsClaudeRouter = Router();
|
||||
// Native(ish) Anthropic text completion endpoint.
|
||||
awsClaudeRouter.post(
|
||||
"/v1/complete",
|
||||
ipLimiter,
|
||||
preprocessAwsTextRequest,
|
||||
awsClaudeProxy
|
||||
);
|
||||
// Native Anthropic chat completion endpoint.
|
||||
awsClaudeRouter.post(
|
||||
"/v1/messages",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
),
|
||||
awsClaudeProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-AWS Anthropic compatibility endpoint.
|
||||
awsClaudeRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
preprocessOpenAICompatRequest,
|
||||
awsClaudeProxy
|
||||
);
|
||||
|
||||
/**
|
||||
* Tries to deal with:
|
||||
* - frontends sending AWS model names even when they want to use the OpenAI-
|
||||
* compatible endpoint
|
||||
* - frontends sending Anthropic model names that AWS doesn't recognize
|
||||
* - frontends sending OpenAI model names because they expect the proxy to
|
||||
* translate them
|
||||
*
|
||||
* If client sends AWS model ID it will be used verbatim. Otherwise, various
|
||||
* strategies are used to try to map a non-AWS model name to AWS model ID.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// If it looks like an AWS model, use it as-is
|
||||
if (model.includes("anthropic.claude")) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Anthropic model names can look like:
|
||||
// - claude-v1
|
||||
// - claude-2.1
|
||||
// - claude-3-5-sonnet-20240620
|
||||
// - claude-3-opus-latest
|
||||
const pattern =
|
||||
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*)/i;
|
||||
const match = model.match(pattern);
|
||||
|
||||
if (!match) {
|
||||
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
|
||||
}
|
||||
|
||||
const [_, _cl, instant, _v, major, _sep, minor, _ctx, rawName, rev] = match;
|
||||
|
||||
if (instant) {
|
||||
req.body.model = "anthropic.claude-instant-v1";
|
||||
return;
|
||||
}
|
||||
|
||||
const ver = minor ? `${major}.${minor}` : major;
|
||||
const name = rawName?.match(/([a-z]+)/)?.[1] || "";
|
||||
|
||||
switch (ver) {
|
||||
case "1":
|
||||
case "1.0":
|
||||
req.body.model = "anthropic.claude-v1";
|
||||
return;
|
||||
case "2":
|
||||
case "2.0":
|
||||
req.body.model = "anthropic.claude-v2";
|
||||
return;
|
||||
case "2.1":
|
||||
req.body.model = "anthropic.claude-v2:1";
|
||||
return;
|
||||
case "3":
|
||||
case "3.0":
|
||||
// there is only one snapshot for all Claude 3 models so there is no need
|
||||
// to check the revision
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||
return;
|
||||
case "haiku":
|
||||
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
||||
return;
|
||||
case "opus":
|
||||
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case "3.5":
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
switch (rev) {
|
||||
case "20241022":
|
||||
case "latest":
|
||||
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
|
||||
return;
|
||||
case "20240620":
|
||||
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case "haiku":
|
||||
switch (rev) {
|
||||
case "20241022":
|
||||
case "latest":
|
||||
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
|
||||
return;
|
||||
}
|
||||
case "opus":
|
||||
// Add after model id is announced never
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
|
||||
}
|
||||
|
||||
export const awsClaude = awsClaudeRouter;
|
||||
@@ -1,95 +0,0 @@
|
||||
import { Request, Router } from "express";
|
||||
import {
|
||||
detectMistralInputApi,
|
||||
transformMistralTextToMistralChat,
|
||||
} from "./mistral-ai";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
signAwsRequest,
|
||||
} from "./middleware/request";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
|
||||
newBody = transformMistralTextToMistralChat(body);
|
||||
}
|
||||
// AWS does not always confirm the model in the response, so we have to add it
|
||||
if (!newBody.model && req.body.model) {
|
||||
newBody.model = req.body.model;
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const awsMistralProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signAwsRequest,finalizeSignedRequest],
|
||||
blockingResponseHandler: awsMistralBlockingResponseHandler,
|
||||
});
|
||||
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// If it looks like an AWS model, use it as-is
|
||||
if (model.startsWith("mistral.")) {
|
||||
return;
|
||||
}
|
||||
// Mistral 7B Instruct
|
||||
else if (model.includes("7b")) {
|
||||
req.body.model = "mistral.mistral-7b-instruct-v0:2";
|
||||
}
|
||||
// Mistral 8x7B Instruct
|
||||
else if (model.includes("8x7b")) {
|
||||
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
|
||||
}
|
||||
// Mistral Large (Feb 2024)
|
||||
else if (model.includes("large-2402")) {
|
||||
req.body.model = "mistral.mistral-large-2402-v1:0";
|
||||
}
|
||||
// Mistral Large 2 (July 2024)
|
||||
else if (model.includes("large")) {
|
||||
req.body.model = "mistral.mistral-large-2407-v1:0";
|
||||
}
|
||||
// Mistral Small (Feb 2024)
|
||||
else if (model.includes("small")) {
|
||||
req.body.model = "mistral.mistral-small-2402-v1:0";
|
||||
} else {
|
||||
throw new Error(
|
||||
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
|
||||
{
|
||||
beforeTransform: [detectMistralInputApi],
|
||||
afterTransform: [maybeReassignModel],
|
||||
}
|
||||
);
|
||||
|
||||
const awsMistralRouter = Router();
|
||||
awsMistralRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
nativeMistralChatPreprocessor,
|
||||
awsMistralProxy
|
||||
);
|
||||
|
||||
export const awsMistral = awsMistralRouter;
|
||||
+318
-60
@@ -1,77 +1,335 @@
|
||||
/* Shared code between AWS Claude and AWS Mistral endpoints. */
|
||||
|
||||
import { Request, Response, Router } from "express";
|
||||
import { Request, RequestHandler, Response, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { v4 } from "uuid";
|
||||
import { config } from "../config";
|
||||
import { addV1 } from "./add-v1";
|
||||
import { awsClaude } from "./aws-claude";
|
||||
import { awsMistral } from "./aws-mistral";
|
||||
import { AwsBedrockKey, keyPool } from "../shared/key-management";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
signAwsRequest,
|
||||
finalizeSignedRequest,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import {
|
||||
ProxyResHandlerWithBody,
|
||||
createOnProxyResHandler,
|
||||
} from "./middleware/response";
|
||||
import { transformAnthropicChatResponseToAnthropicText, transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
|
||||
const awsRouter = Router();
|
||||
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
|
||||
awsRouter.use("/claude", addV1, awsClaude);
|
||||
awsRouter.use("/mistral", addV1, awsMistral);
|
||||
const LATEST_AWS_V2_MINOR_VERSION = "1";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const getModelsResponse = () => {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
const MODELS_CACHE_TTL = 10000;
|
||||
let modelsCache: Record<string, any> = {};
|
||||
let modelsCacheTime: Record<string, number> = {};
|
||||
function handleModelsRequest(req: Request, res: Response) {
|
||||
if (!config.awsCredentials) return { object: "list", data: [] };
|
||||
|
||||
const vendor = req.params.vendor?.length
|
||||
? req.params.vendor === "claude"
|
||||
? "anthropic"
|
||||
: req.params.vendor
|
||||
: "all";
|
||||
|
||||
const cacheTime = modelsCacheTime[vendor] || 0;
|
||||
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
|
||||
return res.json(modelsCache[vendor]);
|
||||
}
|
||||
|
||||
const availableModelIds = new Set<string>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "aws") continue;
|
||||
(key as AwsBedrockKey).modelIds.forEach((id) => availableModelIds.add(id));
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
const models = [
|
||||
const variants = [
|
||||
"anthropic.claude-v2",
|
||||
"anthropic.claude-v2:1",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-large-2407-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
]
|
||||
.filter((id) => availableModelIds.has(id))
|
||||
.map((id) => {
|
||||
const vendor = id.match(/^(.*)\./)?.[1];
|
||||
return {
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: vendor,
|
||||
permission: [],
|
||||
root: vendor,
|
||||
parent: null,
|
||||
};
|
||||
});
|
||||
];
|
||||
|
||||
modelsCache[vendor] = {
|
||||
object: "list",
|
||||
data: models.filter((m) => vendor === "all" || m.root === vendor),
|
||||
const models = variants.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "anthropic",
|
||||
permission: [],
|
||||
root: "claude",
|
||||
parent: null,
|
||||
}));
|
||||
|
||||
modelsCache = { object: "list", data: models };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
|
||||
return modelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
/** Only used for non-streaming requests. */
|
||||
const awsResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
||||
case "openai<-anthropic-text":
|
||||
req.log.info("Transforming Anthropic Text back to OpenAI format");
|
||||
newBody = transformAwsTextResponseToOpenAI(body, req);
|
||||
break;
|
||||
case "openai<-anthropic-chat":
|
||||
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
|
||||
newBody = transformAnthropicChatResponseToOpenAI(body);
|
||||
break;
|
||||
case "anthropic-text<-anthropic-chat":
|
||||
req.log.info("Transforming AWS Anthropic Chat back to Text format");
|
||||
newBody = transformAnthropicChatResponseToAnthropicText(body);
|
||||
break;
|
||||
}
|
||||
|
||||
// AWS does not always confirm the model in the response, so we have to add it
|
||||
if (!newBody.model && req.body.model) {
|
||||
newBody.model = req.body.model;
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
/**
|
||||
* Transforms a model response from the Anthropic API to match those from the
|
||||
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||
* is only used for non-streaming requests as streaming requests are handled
|
||||
* on-the-fly.
|
||||
*/
|
||||
function transformAwsTextResponseToOpenAI(
|
||||
awsBody: Record<string, any>,
|
||||
req: Request
|
||||
): Record<string, any> {
|
||||
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||
return {
|
||||
id: "aws-" + v4(),
|
||||
object: "chat.completion",
|
||||
created: Date.now(),
|
||||
model: req.body.model,
|
||||
usage: {
|
||||
prompt_tokens: req.promptTokens,
|
||||
completion_tokens: req.outputTokens,
|
||||
total_tokens: totalTokens,
|
||||
},
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: awsBody.completion?.trim(),
|
||||
},
|
||||
finish_reason: awsBody.stop_reason,
|
||||
index: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
modelsCacheTime[vendor] = new Date().getTime();
|
||||
}
|
||||
|
||||
return res.json(modelsCache[vendor]);
|
||||
const awsProxy = createQueueMiddleware({
|
||||
beforeProxy: signAwsRequest,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([awsResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const nativeTextPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
const textToChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes text completion prompts to aws anthropic-chat if they need translation
|
||||
* (claude-3 based models do not support the old text completion endpoint).
|
||||
*/
|
||||
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
|
||||
if (req.body.model?.includes("claude-3")) {
|
||||
textToChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
nativeTextPreprocessor(req, res, next);
|
||||
}
|
||||
};
|
||||
|
||||
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||
* or the new Claude chat completion endpoint, based on the requested model.
|
||||
*/
|
||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||
if (req.body.model?.includes("claude-3")) {
|
||||
oaiToAwsChatPreprocessor(req, res, next);
|
||||
} else {
|
||||
oaiToAwsTextPreprocessor(req, res, next);
|
||||
}
|
||||
};
|
||||
|
||||
const awsRouter = Router();
|
||||
awsRouter.get("/v1/models", handleModelRequest);
|
||||
// Native(ish) Anthropic text completion endpoint.
|
||||
awsRouter.post("/v1/complete", ipLimiter, preprocessAwsTextRequest, awsProxy);
|
||||
// Native Anthropic chat completion endpoint.
|
||||
awsRouter.post(
|
||||
"/v1/messages",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
),
|
||||
awsProxy
|
||||
);
|
||||
// Temporary force-Claude3 endpoint
|
||||
awsRouter.post(
|
||||
"/v1/sonnet/:action(complete|messages)",
|
||||
ipLimiter,
|
||||
handleCompatibilityRequest,
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "anthropic-text",
|
||||
outApi: "anthropic-chat",
|
||||
service: "aws",
|
||||
}),
|
||||
awsProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-AWS Anthropic compatibility endpoint.
|
||||
awsRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
preprocessOpenAICompatRequest,
|
||||
awsProxy
|
||||
);
|
||||
|
||||
/**
|
||||
* Tries to deal with:
|
||||
* - frontends sending AWS model names even when they want to use the OpenAI-
|
||||
* compatible endpoint
|
||||
* - frontends sending Anthropic model names that AWS doesn't recognize
|
||||
* - frontends sending OpenAI model names because they expect the proxy to
|
||||
* translate them
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// If client already specified an AWS Claude model ID, use it
|
||||
if (model.includes("anthropic.claude")) {
|
||||
return;
|
||||
}
|
||||
|
||||
const pattern =
|
||||
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?|-haiku-?)(\d*)/i;
|
||||
const match = model.match(pattern);
|
||||
|
||||
// If there's no match, return the latest v2 model
|
||||
if (!match) {
|
||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||
return;
|
||||
}
|
||||
|
||||
const instant = match[2];
|
||||
const major = match[4];
|
||||
const minor = match[6];
|
||||
|
||||
if (instant) {
|
||||
req.body.model = "anthropic.claude-instant-v1";
|
||||
return;
|
||||
}
|
||||
|
||||
// There's only one v1 model
|
||||
if (major === "1") {
|
||||
req.body.model = "anthropic.claude-v1";
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to map Anthropic API v2 models to AWS v2 models
|
||||
if (major === "2") {
|
||||
if (minor === "0") {
|
||||
req.body.model = "anthropic.claude-v2";
|
||||
return;
|
||||
}
|
||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||
return;
|
||||
}
|
||||
|
||||
// AWS currently only supports one v3 model.
|
||||
const variant = match[8]; // sonnet, opus, or haiku
|
||||
const variantVersion = match[9];
|
||||
if (major === "3") {
|
||||
if (variant.includes("opus")) {
|
||||
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
|
||||
} else if (variant.includes("haiku")) {
|
||||
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
|
||||
} else {
|
||||
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallback to latest v2 model
|
||||
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||
return;
|
||||
}
|
||||
|
||||
export function handleCompatibilityRequest(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: any
|
||||
) {
|
||||
const action = req.params.action;
|
||||
const alreadyInChatFormat = Boolean(req.body.messages);
|
||||
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
|
||||
req.log.info(
|
||||
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
|
||||
"Handling AWS compatibility request"
|
||||
);
|
||||
|
||||
if (action === "messages" || alreadyInChatFormat) {
|
||||
return sendErrorToClient({
|
||||
req,
|
||||
res,
|
||||
options: {
|
||||
title: "Unnecessary usage of compatibility endpoint",
|
||||
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
|
||||
format: "unknown",
|
||||
statusCode: 400,
|
||||
reqId: req.id,
|
||||
obj: {
|
||||
requested_endpoint: "/aws/claude/sonnet",
|
||||
correct_endpoint: "/aws/claude",
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
req.body.model = compatModel;
|
||||
next();
|
||||
}
|
||||
|
||||
export const aws = awsRouter;
|
||||
|
||||
+70
-18
@@ -1,30 +1,73 @@
|
||||
import { RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { generateModelList } from "./openai";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
AzureOpenAIModelFamily,
|
||||
getAzureOpenAIModelFamily,
|
||||
ModelFamily,
|
||||
} from "../shared/models";
|
||||
import { logger } from "../logger";
|
||||
import { KNOWN_OPENAI_MODELS } from "./openai";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addAzureKey,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
function getModelsResponse() {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return res.status(200).json(modelsCache);
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
if (!config.azureCredentials) return { object: "list", data: [] };
|
||||
let available = new Set<AzureOpenAIModelFamily>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "azure") continue;
|
||||
key.modelFamilies.forEach((family) =>
|
||||
available.add(family as AzureOpenAIModelFamily)
|
||||
);
|
||||
}
|
||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||
available = new Set([...available].filter((x) => allowed.has(x)));
|
||||
|
||||
const result = generateModelList("azure");
|
||||
const models = KNOWN_OPENAI_MODELS.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "azure",
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
|
||||
|
||||
modelsCache = { object: "list", data: result };
|
||||
modelsCache = { object: "list", data: models };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
res.status(200).json(modelsCache);
|
||||
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
@@ -40,17 +83,26 @@ const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const azureOpenAIProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
const { hostname, protocol } = signedRequest;
|
||||
return `${protocol}//${hostname}`;
|
||||
},
|
||||
mutations: [addAzureKey, finalizeSignedRequest],
|
||||
blockingResponseHandler: azureOpenaiResponseHandler,
|
||||
const azureOpenAIProxy = createQueueMiddleware({
|
||||
beforeProxy: addAzureKey,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "will be set by router",
|
||||
router: (req) => {
|
||||
if (!req.signedRequest) throw new Error("signedRequest not set");
|
||||
const { hostname, path } = req.signedRequest;
|
||||
return `https://${hostname}${path}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
|
||||
const azureOpenAIRouter = Router();
|
||||
azureOpenAIRouter.get("/v1/models", handleModelRequest);
|
||||
azureOpenAIRouter.post(
|
||||
|
||||
@@ -12,7 +12,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
|
||||
// pass the _proxy_ key in this header too, instead of providing it as a
|
||||
// Bearer token in the Authorization header. So we need to check both.
|
||||
// Prefer the Authorization header if both are present.
|
||||
// Google AI uses a key querystring parameter.
|
||||
|
||||
if (req.headers.authorization) {
|
||||
const token = req.headers.authorization?.slice("Bearer ".length);
|
||||
@@ -25,12 +24,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
|
||||
delete req.headers["x-api-key"];
|
||||
return token;
|
||||
}
|
||||
|
||||
if (req.query.key) {
|
||||
const token = req.query.key?.toString();
|
||||
delete req.query.key;
|
||||
return token;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { config } from "../config";
|
||||
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import {
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
signGcpRequest,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
|
||||
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
const getModelsResponse = () => {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return modelsCache;
|
||||
}
|
||||
|
||||
if (!config.gcpCredentials) return { object: "list", data: [] };
|
||||
|
||||
// https://docs.anthropic.com/en/docs/about-claude/models
|
||||
const variants = [
|
||||
"claude-3-haiku@20240307",
|
||||
"claude-3-5-haiku@20241022",
|
||||
"claude-3-sonnet@20240229",
|
||||
"claude-3-5-sonnet@20240620",
|
||||
"claude-3-5-sonnet-v2@20241022",
|
||||
"claude-3-opus@20240229",
|
||||
];
|
||||
|
||||
const models = variants.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "anthropic",
|
||||
permission: [],
|
||||
root: "claude",
|
||||
parent: null,
|
||||
}));
|
||||
|
||||
modelsCache = { object: "list", data: models };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
|
||||
return modelsCache;
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
body
|
||||
) => {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
switch (`${req.inboundApi}<-${req.outboundApi}`) {
|
||||
case "openai<-anthropic-chat":
|
||||
req.log.info("Transforming Anthropic Chat back to OpenAI format");
|
||||
newBody = transformAnthropicChatResponseToOpenAI(body);
|
||||
break;
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
const gcpProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||
},
|
||||
mutations: [signGcpRequest, finalizeSignedRequest],
|
||||
blockingResponseHandler: gcpBlockingResponseHandler,
|
||||
});
|
||||
|
||||
const oaiToChatPreprocessor = createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
);
|
||||
|
||||
/**
|
||||
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
|
||||
* or the new Claude chat completion endpoint, based on the requested model.
|
||||
*/
|
||||
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
|
||||
oaiToChatPreprocessor(req, res, next);
|
||||
};
|
||||
|
||||
const gcpRouter = Router();
|
||||
gcpRouter.get("/v1/models", handleModelRequest);
|
||||
// Native Anthropic chat completion endpoint.
|
||||
gcpRouter.post(
|
||||
"/v1/messages",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
),
|
||||
gcpProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-GCP Anthropic compatibility endpoint.
|
||||
gcpRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
preprocessOpenAICompatRequest,
|
||||
gcpProxy
|
||||
);
|
||||
|
||||
/**
|
||||
* Tries to deal with:
|
||||
* - frontends sending GCP model names even when they want to use the OpenAI-
|
||||
* compatible endpoint
|
||||
* - frontends sending Anthropic model names that GCP doesn't recognize
|
||||
* - frontends sending OpenAI model names because they expect the proxy to
|
||||
* translate them
|
||||
*
|
||||
* If client sends GCP model ID it will be used verbatim. Otherwise, various
|
||||
* strategies are used to try to map a non-GCP model name to GCP model ID.
|
||||
*/
|
||||
function maybeReassignModel(req: Request) {
|
||||
const model = req.body.model;
|
||||
|
||||
// If it looks like an GCP model, use it as-is
|
||||
// if (model.includes("anthropic.claude")) {
|
||||
if (model.startsWith("claude-") && model.includes("@")) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Anthropic model names can look like:
|
||||
// - claude-v1
|
||||
// - claude-2.1
|
||||
// - claude-3-5-sonnet-20240620-v1:0
|
||||
const pattern =
|
||||
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d{1}))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
|
||||
const match = model.match(pattern);
|
||||
|
||||
// If there's no match, fallback to Claude3 Sonnet as it is most likely to be
|
||||
// available on GCP.
|
||||
if (!match) {
|
||||
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
|
||||
return;
|
||||
}
|
||||
|
||||
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, rev] = match;
|
||||
|
||||
// TODO: rework this to function similarly to aws-claude.ts maybeReassignModel
|
||||
const ver = minor ? `${major}.${minor}` : major;
|
||||
switch (ver) {
|
||||
case "3":
|
||||
case "3.0":
|
||||
if (name.includes("opus")) {
|
||||
req.body.model = "claude-3-opus@20240229";
|
||||
} else if (name.includes("haiku")) {
|
||||
req.body.model = "claude-3-haiku@20240307";
|
||||
} else {
|
||||
req.body.model = "claude-3-sonnet@20240229";
|
||||
}
|
||||
return;
|
||||
case "3.5":
|
||||
switch (name) {
|
||||
case "sonnet":
|
||||
switch (rev) {
|
||||
case "20241022":
|
||||
case "latest":
|
||||
req.body.model = "claude-3-5-sonnet-v2@20241022";
|
||||
return;
|
||||
case "20240620":
|
||||
req.body.model = "claude-3-5-sonnet@20240620";
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case "haiku":
|
||||
req.body.model = "claude-3-5-haiku@20241022";
|
||||
return;
|
||||
case "opus":
|
||||
// Add after model ids are announced late 2024
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to Claude3 Sonnet
|
||||
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
|
||||
return;
|
||||
}
|
||||
|
||||
export const gcp = gcpRouter;
|
||||
+36
-77
@@ -1,15 +1,21 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { v4 } from "uuid";
|
||||
import { GoogleAIKey, keyPool } from "../shared/key-management";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeSignedRequest,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
@@ -24,19 +30,9 @@ const getModelsResponse = () => {
|
||||
|
||||
if (!config.googleAIKey) return { object: "list", data: [] };
|
||||
|
||||
const keys = keyPool
|
||||
.list()
|
||||
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
|
||||
if (keys.length === 0) {
|
||||
modelsCache = { object: "list", data: [] };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
return modelsCache;
|
||||
}
|
||||
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
|
||||
|
||||
const modelIds = Array.from(
|
||||
new Set(keys.map((k) => k.modelIds).flat())
|
||||
).filter((id) => id.startsWith("models/gemini"));
|
||||
const models = modelIds.map((id) => ({
|
||||
const models = googleAIVariants.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
@@ -56,7 +52,8 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
res.status(200).json(getModelsResponse());
|
||||
};
|
||||
|
||||
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
|
||||
/** Only used for non-streaming requests. */
|
||||
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
|
||||
_proxyRes,
|
||||
req,
|
||||
res,
|
||||
@@ -102,75 +99,37 @@ function transformGoogleAIResponse(
|
||||
};
|
||||
}
|
||||
|
||||
const googleAIProxy = createQueuedProxyMiddleware({
|
||||
target: ({ signedRequest }) => {
|
||||
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||
const { protocol, hostname} = signedRequest;
|
||||
return `${protocol}//${hostname}`;
|
||||
},
|
||||
mutations: [addGoogleAIKey, finalizeSignedRequest],
|
||||
blockingResponseHandler: googleAIBlockingResponseHandler,
|
||||
const googleAIProxy = createQueueMiddleware({
|
||||
beforeProxy: addGoogleAIKey,
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "bad-target-will-be-rewritten",
|
||||
router: ({ signedRequest }) => {
|
||||
const { protocol, hostname, path } = signedRequest;
|
||||
return `${protocol}//${hostname}${path}`;
|
||||
},
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const googleAIRouter = Router();
|
||||
googleAIRouter.get("/v1/models", handleModelRequest);
|
||||
|
||||
// Native Google AI chat completion endpoint
|
||||
googleAIRouter.post(
|
||||
"/v1beta/models/:modelId:(generateContent|streamGenerateContent)",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
|
||||
{ beforeTransform: [maybeReassignModel], afterTransform: [setStreamFlag] }
|
||||
),
|
||||
googleAIProxy
|
||||
);
|
||||
|
||||
// OpenAI-to-Google AI compatibility endpoint.
|
||||
googleAIRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
|
||||
{ afterTransform: [maybeReassignModel] }
|
||||
),
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "openai",
|
||||
outApi: "google-ai",
|
||||
service: "google-ai",
|
||||
}),
|
||||
googleAIProxy
|
||||
);
|
||||
|
||||
function setStreamFlag(req: Request) {
|
||||
const isStreaming = req.url.includes("streamGenerateContent");
|
||||
if (isStreaming) {
|
||||
req.body.stream = true;
|
||||
req.isStreaming = true;
|
||||
} else {
|
||||
req.body.stream = false;
|
||||
req.isStreaming = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces requests for non-Google AI models with gemini-1.5-pro-latest.
|
||||
* Also strips models/ from the beginning of the model IDs.
|
||||
**/
|
||||
function maybeReassignModel(req: Request) {
|
||||
// Ensure model is on body as a lot of middleware will expect it.
|
||||
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
|
||||
if (!model) {
|
||||
throw new Error("You must specify a model with your request.");
|
||||
}
|
||||
req.body.model = model;
|
||||
|
||||
const requested = model;
|
||||
if (requested.startsWith("models/")) {
|
||||
req.body.model = requested.slice("models/".length);
|
||||
}
|
||||
|
||||
if (requested.includes("gemini")) {
|
||||
return;
|
||||
}
|
||||
|
||||
req.log.info({ requested }, "Reassigning model to gemini-1.5-pro-latest");
|
||||
req.body.model = "gemini-1.5-pro-latest";
|
||||
}
|
||||
|
||||
export const googleAI = googleAIRouter;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Request, Response } from "express";
|
||||
import http from "http";
|
||||
import { Socket } from "net";
|
||||
import httpProxy from "http-proxy";
|
||||
import { ZodError } from "zod";
|
||||
import { generateErrorMessage } from "zod-error";
|
||||
import { HttpError } from "../../shared/errors";
|
||||
@@ -16,7 +16,6 @@ const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
||||
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
|
||||
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
|
||||
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
|
||||
const GOOGLE_AI_COMPLETION_ENDPOINT = "/v1beta/models";
|
||||
|
||||
export function isTextGenerationRequest(req: Request) {
|
||||
return (
|
||||
@@ -28,7 +27,6 @@ export function isTextGenerationRequest(req: Request) {
|
||||
ANTHROPIC_MESSAGES_ENDPOINT,
|
||||
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
|
||||
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
|
||||
GOOGLE_AI_COMPLETION_ENDPOINT,
|
||||
].some((endpoint) => req.path.startsWith(endpoint))
|
||||
);
|
||||
}
|
||||
@@ -72,23 +70,16 @@ export function sendProxyError(
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles errors thrown during preparation of a proxy request (before it is
|
||||
* sent to the upstream API), typically due to validation, quota, or other
|
||||
* pre-flight checks. Depending on the error class, this function will send an
|
||||
* appropriate error response to the client, streaming it if necessary.
|
||||
*/
|
||||
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
|
||||
req.log.error(err, `Error during http-proxy-middleware request`);
|
||||
classifyErrorAndSend(err, req as Request, res as Response);
|
||||
};
|
||||
|
||||
export const classifyErrorAndSend = (
|
||||
err: Error,
|
||||
req: Request,
|
||||
res: Response | Socket
|
||||
res: Response
|
||||
) => {
|
||||
if (res instanceof Socket) {
|
||||
// We should always have an Express response object here, but http-proxy's
|
||||
// ErrorCallback type says it could be just a Socket.
|
||||
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
|
||||
return res.destroy();
|
||||
}
|
||||
try {
|
||||
const { statusCode, statusMessage, userMessage, ...errorDetails } =
|
||||
classifyError(err);
|
||||
@@ -230,12 +221,9 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||
switch (format) {
|
||||
case "openai":
|
||||
case "mistral-ai":
|
||||
// Few possible values:
|
||||
// - choices[0].message.content
|
||||
// - choices[0].message with no content if model is invoking a tool
|
||||
return body.choices?.[0]?.message?.content || "";
|
||||
case "mistral-text":
|
||||
return body.outputs?.[0]?.text || "";
|
||||
// Can be null if the model wants to invoke tools rather than return a
|
||||
// completion.
|
||||
return body.choices[0].message.content || "";
|
||||
case "openai-text":
|
||||
return body.choices[0].text;
|
||||
case "anthropic-chat":
|
||||
@@ -264,15 +252,7 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||
if ("choices" in body) {
|
||||
return body.choices[0].message.content;
|
||||
}
|
||||
const text = body.candidates[0].content?.parts?.[0]?.text;
|
||||
if (!text) {
|
||||
req.log.warn(
|
||||
{ body: JSON.stringify(body) },
|
||||
"Received empty Google AI text completion"
|
||||
);
|
||||
return "";
|
||||
}
|
||||
return text;
|
||||
return body.candidates[0].content.parts[0].text;
|
||||
case "openai-image":
|
||||
return body.data?.map((item: any) => item.url).join("\n");
|
||||
default:
|
||||
@@ -280,22 +260,22 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||
}
|
||||
}
|
||||
|
||||
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
|
||||
export function getModelFromBody(req: Request, body: Record<string, any>) {
|
||||
const format = req.outboundApi;
|
||||
switch (format) {
|
||||
case "openai":
|
||||
case "openai-text":
|
||||
return resBody.model;
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
return body.model;
|
||||
case "openai-image":
|
||||
case "google-ai":
|
||||
// These formats don't have a model in the response body.
|
||||
return req.body.model;
|
||||
case "anthropic-chat":
|
||||
case "anthropic-text":
|
||||
// Anthropic confirms the model in the response, but AWS Claude doesn't.
|
||||
return resBody.model || req.body.model;
|
||||
return body.model || req.body.model;
|
||||
case "google-ai":
|
||||
// Google doesn't confirm the model in the response.
|
||||
return req.body.model;
|
||||
default:
|
||||
assertNever(format);
|
||||
}
|
||||
|
||||
@@ -1,38 +1,43 @@
|
||||
import type { Request } from "express";
|
||||
import type { ClientRequest } from "http";
|
||||
import type { ProxyReqCallback } from "http-proxy";
|
||||
|
||||
import { ProxyReqManager } from "./proxy-req-manager";
|
||||
export { createOnProxyReqHandler } from "./onproxyreq-factory";
|
||||
export {
|
||||
createPreprocessorMiddleware,
|
||||
createEmbeddingsPreprocessorMiddleware,
|
||||
} from "./preprocessor-factory";
|
||||
|
||||
// Preprocessors (runs before request is queued, usually body transformation/validation)
|
||||
// Express middleware (runs before http-proxy-middleware, can be async)
|
||||
export { addAzureKey } from "./preprocessors/add-azure-key";
|
||||
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
|
||||
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
|
||||
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
|
||||
export { languageFilter } from "./preprocessors/language-filter";
|
||||
export { setApiFormat } from "./preprocessors/set-api-format";
|
||||
export { signAwsRequest } from "./preprocessors/sign-aws-request";
|
||||
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
|
||||
export { validateContextSize } from "./preprocessors/validate-context-size";
|
||||
export { validateModelFamily } from "./preprocessors/validate-model-family";
|
||||
export { validateVision } from "./preprocessors/validate-vision";
|
||||
|
||||
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
|
||||
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
|
||||
export { addAzureKey } from "./mutators/add-azure-key";
|
||||
export { finalizeBody } from "./mutators/finalize-body";
|
||||
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
|
||||
export { signAwsRequest } from "./mutators/sign-aws-request";
|
||||
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
|
||||
export { stripHeaders } from "./mutators/strip-headers";
|
||||
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
|
||||
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
|
||||
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
|
||||
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
|
||||
export { checkModelFamily } from "./onproxyreq/check-model-family";
|
||||
export { finalizeBody } from "./onproxyreq/finalize-body";
|
||||
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
|
||||
export { stripHeaders } from "./onproxyreq/strip-headers";
|
||||
|
||||
/**
|
||||
* Middleware that runs prior to the request being queued or handled by
|
||||
* http-proxy-middleware. You will not have access to the proxied
|
||||
* request/response objects since they have not yet been sent to the API.
|
||||
* Middleware that runs prior to the request being handled by http-proxy-
|
||||
* middleware.
|
||||
*
|
||||
* User will have been authenticated by the proxy's gatekeeper, but the request
|
||||
* won't have been assigned an upstream API key yet.
|
||||
* Async functions can be used here, but you will not have access to the proxied
|
||||
* request/response objects, nor the data set by ProxyRequestMiddleware
|
||||
* functions as they have not yet been run.
|
||||
*
|
||||
* User will have been authenticated by the time this middleware runs, but your
|
||||
* request won't have been assigned an API key yet.
|
||||
*
|
||||
* Note that these functions only run once ever per request, even if the request
|
||||
* is automatically retried by the request queue middleware.
|
||||
@@ -40,14 +45,17 @@ export { stripHeaders } from "./mutators/strip-headers";
|
||||
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
||||
|
||||
/**
|
||||
* Middleware that runs immediately before the request is proxied to the
|
||||
* upstream API, after dequeueing the request from the request queue.
|
||||
* Callbacks that run immediately before the request is sent to the API in
|
||||
* response to http-proxy-middleware's `proxyReq` event.
|
||||
*
|
||||
* Because these middleware may be run multiple times per request if a retryable
|
||||
* error occurs and the request put back in the queue, they must be idempotent.
|
||||
* A change manager is provided to allow the middleware to make changes to the
|
||||
* request which can be automatically reverted.
|
||||
* Async functions cannot be used here as HPM's event emitter is not async and
|
||||
* will not wait for the promise to resolve before sending the request.
|
||||
*
|
||||
* Note that these functions may be run multiple times per request if the
|
||||
* first attempt is rate limited and the request is automatically retried by the
|
||||
* request queue middleware.
|
||||
*/
|
||||
export type ProxyReqMutator = (
|
||||
changeManager: ProxyReqManager
|
||||
) => void | Promise<void>;
|
||||
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
|
||||
|
||||
export const forceModel = (model: string) => (req: Request) =>
|
||||
void (req.body.model = model);
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
import { keyPool } from "../../../../shared/key-management";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
const inboundValid =
|
||||
req.inboundApi === "openai" || req.inboundApi === "google-ai";
|
||||
const outboundValid = req.outboundApi === "google-ai";
|
||||
|
||||
const serviceValid = req.service === "google-ai";
|
||||
if (!inboundValid || !outboundValid || !serviceValid) {
|
||||
throw new Error("addGoogleAIKey called on invalid request");
|
||||
}
|
||||
|
||||
const model = req.body.model;
|
||||
const key = keyPool.get(model, "google-ai");
|
||||
manager.setKey(key);
|
||||
|
||||
req.log.info(
|
||||
{ key: key.hash, model, stream: req.isStreaming },
|
||||
"Assigned Google AI API key to request"
|
||||
);
|
||||
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
|
||||
const payload = { ...req.body, stream: undefined, model: undefined };
|
||||
|
||||
// TODO: this isn't actually signed, so the manager api is a little unclear
|
||||
// with the ProxyReqManager refactor, it's probably no longer necesasry to
|
||||
// do this because we can modify the path using Manager.setPath.
|
||||
manager.setSignedRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: "generativelanguage.googleapis.com",
|
||||
path: `/v1beta/models/${model}:${
|
||||
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
|
||||
}key=${key.key}`,
|
||||
headers: {
|
||||
["host"]: `generativelanguage.googleapis.com`,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
import type { ProxyReqMutator } from "../index";
|
||||
|
||||
/** Finalize the rewritten request body. Must be the last mutator. */
|
||||
export const finalizeBody: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
|
||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||
// For image generation requests, remove stream flag.
|
||||
if (req.outboundApi === "openai-image") {
|
||||
delete req.body.stream;
|
||||
}
|
||||
// For anthropic text to chat requests, remove undefined prompt.
|
||||
if (req.outboundApi === "anthropic-chat") {
|
||||
delete req.body.prompt;
|
||||
}
|
||||
|
||||
const serialized =
|
||||
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
|
||||
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||
manager.setBody(serialized);
|
||||
}
|
||||
};
|
||||
@@ -1,32 +0,0 @@
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
/**
|
||||
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
|
||||
* pipeline, before the proxy middleware. This function just assigns the path
|
||||
* and headers to the proxy request.
|
||||
*/
|
||||
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
if (!req.signedRequest) {
|
||||
throw new Error("Expected req.signedRequest to be set");
|
||||
}
|
||||
|
||||
// The path depends on the selected model and the assigned key's region.
|
||||
manager.setPath(req.signedRequest.path);
|
||||
|
||||
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||
// reassign only the ones specified in the signed request.
|
||||
const headers = req.signedRequest.headers;
|
||||
Object.keys(headers).forEach((key) => {
|
||||
manager.removeHeader(key);
|
||||
});
|
||||
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||
manager.setHeader(key, value);
|
||||
});
|
||||
const serialized =
|
||||
typeof req.signedRequest.body === "string"
|
||||
? req.signedRequest.body
|
||||
: JSON.stringify(req.signedRequest.body);
|
||||
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
|
||||
manager.setBody(serialized);
|
||||
};
|
||||
@@ -1,148 +0,0 @@
|
||||
import express, { Request } from "express";
|
||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||
import { SignatureV4 } from "@smithy/signature-v4";
|
||||
import { HttpRequest } from "@smithy/protocol-http";
|
||||
import {
|
||||
AnthropicV1TextSchema,
|
||||
AnthropicV1MessagesSchema,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
|
||||
import {
|
||||
AWSMistralV1ChatCompletionsSchema,
|
||||
AWSMistralV1TextCompletionsSchema,
|
||||
} from "../../../../shared/api-schemas/mistral-ai";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
const AMZ_HOST =
|
||||
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
||||
|
||||
/**
|
||||
* Signs an outgoing AWS request with the appropriate headers modifies the
|
||||
* request object in place to fix the path.
|
||||
* This happens AFTER request transformation.
|
||||
*/
|
||||
export const signAwsRequest: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
const { model, stream } = req.body;
|
||||
const key = keyPool.get(model, "aws") as AwsBedrockKey;
|
||||
manager.setKey(key);
|
||||
|
||||
const credential = getCredentialParts(req);
|
||||
const host = AMZ_HOST.replace("%REGION%", credential.region);
|
||||
|
||||
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
||||
// set it so that the stream adapter always selects the correct transformer.
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
|
||||
// If our key has an inference profile compatible with the requested model,
|
||||
// we want to use the inference profile instead of the model ID when calling
|
||||
// InvokeModel as that will give us higher rate limits.
|
||||
const profile =
|
||||
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
|
||||
|
||||
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
||||
// with the headers generated by the SDK.
|
||||
const newRequest = new HttpRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: host,
|
||||
path: `/model/${profile}/invoke${stream ? "-with-response-stream" : ""}`,
|
||||
headers: {
|
||||
["Host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
|
||||
});
|
||||
|
||||
if (stream) {
|
||||
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
|
||||
} else {
|
||||
newRequest.headers["accept"] = "*/*";
|
||||
}
|
||||
|
||||
const { body, inboundApi, outboundApi } = req;
|
||||
req.log.info(
|
||||
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
|
||||
"Assigned AWS credentials to request"
|
||||
);
|
||||
|
||||
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
|
||||
};
|
||||
|
||||
type Credential = {
|
||||
accessKeyId: string;
|
||||
secretAccessKey: string;
|
||||
region: string;
|
||||
};
|
||||
|
||||
function getCredentialParts(req: express.Request): Credential {
|
||||
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
|
||||
|
||||
if (!accessKeyId || !secretAccessKey || !region) {
|
||||
req.log.error(
|
||||
{ key: req.key!.hash },
|
||||
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
|
||||
);
|
||||
throw new Error("The key assigned to this request is invalid.");
|
||||
}
|
||||
|
||||
return { accessKeyId, secretAccessKey, region };
|
||||
}
|
||||
|
||||
async function sign(request: HttpRequest, credential: Credential) {
|
||||
const { accessKeyId, secretAccessKey, region } = credential;
|
||||
|
||||
const signer = new SignatureV4({
|
||||
sha256: Sha256,
|
||||
credentials: { accessKeyId, secretAccessKey },
|
||||
region,
|
||||
service: "bedrock",
|
||||
});
|
||||
|
||||
return signer.sign(request);
|
||||
}
|
||||
|
||||
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
|
||||
// AWS uses vendor API formats but imposes additional (more strict) validation
|
||||
// rules, namely that extraneous parameters are not allowed. We will validate
|
||||
// using the vendor's zod schema but apply `.strip` to ensure that any
|
||||
// extraneous parameters are removed.
|
||||
let strippedParams: Record<string, unknown> = {};
|
||||
switch (req.outboundApi) {
|
||||
case "anthropic-text":
|
||||
strippedParams = AnthropicV1TextSchema.pick({
|
||||
prompt: true,
|
||||
max_tokens_to_sample: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
break;
|
||||
case "anthropic-chat":
|
||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||
messages: true,
|
||||
system: true,
|
||||
max_tokens: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
strippedParams.anthropic_version = "bedrock-2023-05-31";
|
||||
break;
|
||||
case "mistral-ai":
|
||||
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
|
||||
break;
|
||||
case "mistral-text":
|
||||
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
|
||||
break;
|
||||
default:
|
||||
throw new Error("Unexpected outbound API for AWS.");
|
||||
}
|
||||
return strippedParams;
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
|
||||
import { GcpKey, keyPool } from "../../../../shared/key-management";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
import {
|
||||
getCredentialsFromGcpKey,
|
||||
refreshGcpAccessToken,
|
||||
} from "../../../../shared/key-management/gcp/oauth";
|
||||
|
||||
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
|
||||
|
||||
export const signGcpRequest: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
const serviceValid = req.service === "gcp";
|
||||
if (!serviceValid) {
|
||||
throw new Error("addVertexAIKey called on invalid request");
|
||||
}
|
||||
|
||||
if (!req.body?.model) {
|
||||
throw new Error("You must specify a model with your request.");
|
||||
}
|
||||
|
||||
const { model } = req.body;
|
||||
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
|
||||
|
||||
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
|
||||
const [token, durationSec] = await refreshGcpAccessToken(key);
|
||||
keyPool.update(key, {
|
||||
accessToken: token,
|
||||
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
|
||||
} as GcpKey);
|
||||
// nb: key received by `get` is a clone and will not have the new access
|
||||
// token we just set, so it must be manually updated.
|
||||
key.accessToken = token;
|
||||
}
|
||||
|
||||
manager.setKey(key);
|
||||
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
|
||||
|
||||
// TODO: This should happen in transform-outbound-payload.ts
|
||||
// TODO: Support tools
|
||||
let strippedParams: Record<string, unknown>;
|
||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||
messages: true,
|
||||
system: true,
|
||||
max_tokens: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
stream: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
strippedParams.anthropic_version = "vertex-2023-10-16";
|
||||
|
||||
const credential = await getCredentialsFromGcpKey(key);
|
||||
|
||||
const host = GCP_HOST.replace("%REGION%", credential.region);
|
||||
// GCP doesn't use the anthropic-version header, but we set it to ensure the
|
||||
// stream adapter selects the correct transformer.
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
|
||||
manager.setSignedRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: host,
|
||||
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
|
||||
headers: {
|
||||
["host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
["authorization"]: `Bearer ${key.accessToken}`,
|
||||
},
|
||||
body: JSON.stringify(strippedParams),
|
||||
});
|
||||
};
|
||||
@@ -1,33 +0,0 @@
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
/**
|
||||
* Removes origin and referer headers before sending the request to the API for
|
||||
* privacy reasons.
|
||||
*/
|
||||
export const stripHeaders: ProxyReqMutator = (manager) => {
|
||||
manager.removeHeader("origin");
|
||||
manager.removeHeader("referer");
|
||||
|
||||
// Some APIs refuse requests coming from browsers to discourage embedding
|
||||
// API keys in client-side code, so we must remove all CORS/fetch headers.
|
||||
Object.keys(manager.request.headers).forEach((key) => {
|
||||
if (key.startsWith("sec-")) {
|
||||
manager.removeHeader(key);
|
||||
}
|
||||
});
|
||||
|
||||
manager.removeHeader("tailscale-user-login");
|
||||
manager.removeHeader("tailscale-user-name");
|
||||
manager.removeHeader("tailscale-headers-info");
|
||||
manager.removeHeader("tailscale-user-profile-pic");
|
||||
manager.removeHeader("cf-connecting-ip");
|
||||
manager.removeHeader("cf-ray");
|
||||
manager.removeHeader("cf-visitor");
|
||||
manager.removeHeader("cf-warp-tag-id");
|
||||
manager.removeHeader("forwarded");
|
||||
manager.removeHeader("true-client-ip");
|
||||
manager.removeHeader("x-forwarded-for");
|
||||
manager.removeHeader("x-forwarded-host");
|
||||
manager.removeHeader("x-forwarded-proto");
|
||||
manager.removeHeader("x-real-ip");
|
||||
};
|
||||
@@ -0,0 +1,45 @@
|
||||
import {
|
||||
applyQuotaLimits,
|
||||
blockZoomerOrigins,
|
||||
checkModelFamily,
|
||||
HPMRequestCallback,
|
||||
stripHeaders,
|
||||
} from "./index";
|
||||
|
||||
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
|
||||
|
||||
/**
|
||||
* Returns an http-proxy-middleware request handler that runs the given set of
|
||||
* onProxyReq callback functions in sequence.
|
||||
*
|
||||
* These will run each time a request is proxied, including on automatic retries
|
||||
* by the queue after encountering a rate limit.
|
||||
*/
|
||||
export const createOnProxyReqHandler = ({
|
||||
pipeline,
|
||||
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
|
||||
const callbackPipeline = [
|
||||
checkModelFamily,
|
||||
applyQuotaLimits,
|
||||
blockZoomerOrigins,
|
||||
stripHeaders,
|
||||
...pipeline,
|
||||
];
|
||||
return (proxyReq, req, res, options) => {
|
||||
// The streaming flag must be set before any other onProxyReq handler runs,
|
||||
// as it may influence the behavior of subsequent handlers.
|
||||
// Image generation requests can't be streamed.
|
||||
// TODO: this flag is set in too many places
|
||||
req.isStreaming =
|
||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
||||
req.body.stream = req.isStreaming;
|
||||
|
||||
try {
|
||||
for (const fn of callbackPipeline) {
|
||||
fn(proxyReq, req, res, options);
|
||||
}
|
||||
} catch (error) {
|
||||
proxyReq.destroy(error);
|
||||
}
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,33 @@
|
||||
import { AnthropicKey, Key } from "../../../../shared/key-management";
|
||||
import { isTextGenerationRequest } from "../../common";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
|
||||
* know this without trying to send the request and seeing if it fails. If a
|
||||
* key is marked as requiring a preamble, it will be added here.
|
||||
*/
|
||||
export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
|
||||
if (
|
||||
!isTextGenerationRequest(req) ||
|
||||
req.key?.service !== "anthropic" ||
|
||||
req.outboundApi !== "anthropic-text"
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
let preamble = "";
|
||||
let prompt = req.body.prompt;
|
||||
assertAnthropicKey(req.key);
|
||||
if (req.key.requiresPreamble && prompt) {
|
||||
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
||||
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
|
||||
}
|
||||
req.body.prompt = preamble + prompt;
|
||||
};
|
||||
|
||||
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
|
||||
if (key.service !== "anthropic") {
|
||||
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
|
||||
}
|
||||
}
|
||||
+21
-26
@@ -2,12 +2,10 @@ import { AnthropicChatMessage } from "../../../../shared/api-schemas";
|
||||
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
|
||||
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
|
||||
import { isEmbeddingsRequest } from "../../common";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
|
||||
export const addKey: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
|
||||
export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||
let assignedKey: Key;
|
||||
const { service, inboundApi, outboundApi, body } = req;
|
||||
|
||||
@@ -40,10 +38,7 @@ export const addKey: ProxyReqMutator = (manager) => {
|
||||
// translation now reassigns the model earlier in the request pipeline.
|
||||
case "anthropic-text":
|
||||
case "anthropic-chat":
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
case "google-ai":
|
||||
assignedKey = keyPool.get(body.model, service);
|
||||
assignedKey = keyPool.get("claude-v1", service, needsMultimodal);
|
||||
break;
|
||||
case "openai-text":
|
||||
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
|
||||
@@ -52,15 +47,17 @@ export const addKey: ProxyReqMutator = (manager) => {
|
||||
assignedKey = keyPool.get("dall-e-3", service);
|
||||
break;
|
||||
case "openai":
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
throw new Error(
|
||||
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
|
||||
`add-key should not be called for outbound API ${outboundApi}`
|
||||
);
|
||||
default:
|
||||
assertNever(outboundApi);
|
||||
}
|
||||
}
|
||||
|
||||
manager.setKey(assignedKey);
|
||||
req.key = assignedKey;
|
||||
req.log.info(
|
||||
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
|
||||
"Assigned key to request"
|
||||
@@ -69,27 +66,23 @@ export const addKey: ProxyReqMutator = (manager) => {
|
||||
// TODO: KeyProvider should assemble all necessary headers
|
||||
switch (assignedKey.service) {
|
||||
case "anthropic":
|
||||
manager.setHeader("X-API-Key", assignedKey.key);
|
||||
if (!manager.request.headers["anthropic-version"]) {
|
||||
manager.setHeader("anthropic-version", "2023-06-01");
|
||||
}
|
||||
proxyReq.setHeader("X-API-Key", assignedKey.key);
|
||||
break;
|
||||
case "openai":
|
||||
const key: OpenAIKey = assignedKey as OpenAIKey;
|
||||
if (key.organizationId && !key.key.includes("svcacct")) {
|
||||
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||
if (key.organizationId) {
|
||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||
}
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "mistral-ai":
|
||||
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||
break;
|
||||
case "azure":
|
||||
const azureKey = assignedKey.key;
|
||||
manager.setHeader("api-key", azureKey);
|
||||
proxyReq.setHeader("api-key", azureKey);
|
||||
break;
|
||||
case "aws":
|
||||
case "gcp":
|
||||
case "google-ai":
|
||||
throw new Error("add-key should not be used for this service.");
|
||||
default:
|
||||
@@ -101,8 +94,10 @@ export const addKey: ProxyReqMutator = (manager) => {
|
||||
* Special case for embeddings requests which don't go through the normal
|
||||
* request pipeline.
|
||||
*/
|
||||
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
|
||||
const req = manager.request;
|
||||
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
||||
proxyReq,
|
||||
req
|
||||
) => {
|
||||
if (!isEmbeddingsRequest(req)) {
|
||||
throw new Error(
|
||||
"addKeyForEmbeddingsRequest called on non-embeddings request"
|
||||
@@ -113,18 +108,18 @@ export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
|
||||
throw new Error("Embeddings requests must be from OpenAI");
|
||||
}
|
||||
|
||||
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
|
||||
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
|
||||
|
||||
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
|
||||
|
||||
manager.setKey(key);
|
||||
req.key = key;
|
||||
req.log.info(
|
||||
{ key: key.hash, toApi: req.outboundApi },
|
||||
"Assigned Turbo key to embeddings request"
|
||||
);
|
||||
|
||||
manager.setHeader("Authorization", `Bearer ${key.key}`);
|
||||
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
|
||||
if (key.organizationId) {
|
||||
manager.setHeader("OpenAI-Organization", key.organizationId);
|
||||
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||
}
|
||||
};
|
||||
+2
-2
@@ -1,4 +1,4 @@
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
|
||||
|
||||
@@ -13,7 +13,7 @@ class ZoomerForbiddenError extends Error {
|
||||
* Blocks requests from Janitor AI users with a fake, scary error message so I
|
||||
* stop getting emails asking for tech support.
|
||||
*/
|
||||
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
|
||||
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
|
||||
const origin = req.headers.origin || req.headers.referer;
|
||||
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
|
||||
// Venus-derivatives send a test prompt to check if the proxy is working.
|
||||
+4
-6
@@ -1,16 +1,14 @@
|
||||
import { HPMRequestCallback } from "../index";
|
||||
import { config } from "../../../../config";
|
||||
import { ForbiddenError } from "../../../../shared/errors";
|
||||
import { getModelFamilyForRequest } from "../../../../shared/models";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
/**
|
||||
* Ensures the selected model family is enabled by the proxy configuration.
|
||||
*/
|
||||
export const validateModelFamily: RequestPreprocessor = (req) => {
|
||||
**/
|
||||
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
|
||||
const family = getModelFamilyForRequest(req);
|
||||
if (!config.allowedModelFamilies.includes(family)) {
|
||||
throw new ForbiddenError(
|
||||
`Model family '${family}' is not enabled on this proxy`
|
||||
);
|
||||
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
|
||||
}
|
||||
};
|
||||
@@ -0,0 +1,23 @@
|
||||
import { fixRequestBody } from "http-proxy-middleware";
|
||||
import type { HPMRequestCallback } from "../index";
|
||||
|
||||
/** Finalize the rewritten request body. Must be the last rewriter. */
|
||||
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
|
||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||
// For image generation requests, remove stream flag.
|
||||
if (req.outboundApi === "openai-image") {
|
||||
delete req.body.stream;
|
||||
}
|
||||
// For anthropic text to chat requests, remove undefined prompt.
|
||||
if (req.outboundApi === "anthropic-chat") {
|
||||
delete req.body.prompt;
|
||||
}
|
||||
|
||||
const updatedBody = JSON.stringify(req.body);
|
||||
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
|
||||
(req as any).rawBody = Buffer.from(updatedBody);
|
||||
|
||||
// body-parser and http-proxy-middleware don't play nice together
|
||||
fixRequestBody(proxyReq, req);
|
||||
}
|
||||
};
|
||||
@@ -0,0 +1,26 @@
|
||||
import type { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* For AWS/Azure/Google requests, the body is signed earlier in the request
|
||||
* pipeline, before the proxy middleware. This function just assigns the path
|
||||
* and headers to the proxy request.
|
||||
*/
|
||||
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
|
||||
if (!req.signedRequest) {
|
||||
throw new Error("Expected req.signedRequest to be set");
|
||||
}
|
||||
|
||||
// The path depends on the selected model and the assigned key's region.
|
||||
proxyReq.path = req.signedRequest.path;
|
||||
|
||||
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||
// reassign only the ones specified in the signed request.
|
||||
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
|
||||
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||
proxyReq.setHeader(key, value);
|
||||
});
|
||||
|
||||
// Don't use fixRequestBody here because it adds a content-length header.
|
||||
// Amazon doesn't want that and it breaks the signature.
|
||||
proxyReq.write(req.signedRequest.body);
|
||||
};
|
||||
@@ -0,0 +1,21 @@
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
/**
|
||||
* Removes origin and referer headers before sending the request to the API for
|
||||
* privacy reasons.
|
||||
**/
|
||||
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
|
||||
proxyReq.setHeader("origin", "");
|
||||
proxyReq.setHeader("referer", "");
|
||||
proxyReq.removeHeader("tailscale-user-login");
|
||||
proxyReq.removeHeader("tailscale-user-name");
|
||||
proxyReq.removeHeader("tailscale-headers-info");
|
||||
proxyReq.removeHeader("tailscale-user-profile-pic")
|
||||
proxyReq.removeHeader("cf-connecting-ip");
|
||||
proxyReq.removeHeader("forwarded");
|
||||
proxyReq.removeHeader("true-client-ip");
|
||||
proxyReq.removeHeader("x-forwarded-for");
|
||||
proxyReq.removeHeader("x-forwarded-host");
|
||||
proxyReq.removeHeader("x-forwarded-proto");
|
||||
proxyReq.removeHeader("x-real-ip");
|
||||
};
|
||||
@@ -4,15 +4,12 @@ import { initializeSseStream } from "../../../shared/streaming";
|
||||
import { classifyErrorAndSend } from "../common";
|
||||
import {
|
||||
RequestPreprocessor,
|
||||
blockZoomerOrigins,
|
||||
countPromptTokens,
|
||||
languageFilter,
|
||||
setApiFormat,
|
||||
transformOutboundPayload,
|
||||
validateContextSize,
|
||||
validateModelFamily,
|
||||
validateVision,
|
||||
applyQuotaLimits,
|
||||
} from ".";
|
||||
|
||||
type RequestPreprocessorOptions = {
|
||||
@@ -33,15 +30,14 @@ type RequestPreprocessorOptions = {
|
||||
/**
|
||||
* Returns a middleware function that processes the request body into the given
|
||||
* API format, and then sequentially runs the given additional preprocessors.
|
||||
* These should be used for validation and transformations that only need to
|
||||
* happen once per request.
|
||||
*
|
||||
* These run first in the request lifecycle, a single time per request before it
|
||||
* is added to the request queue. They aren't run again if the request is
|
||||
* re-attempted after a rate limit.
|
||||
*
|
||||
* To run functions against requests every time they are re-attempted, write a
|
||||
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
|
||||
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
|
||||
* It will run after these preprocessors, but before the request is sent to
|
||||
* http-proxy-middleware.
|
||||
*/
|
||||
export const createPreprocessorMiddleware = (
|
||||
apiFormat: Parameters<typeof setApiFormat>[0],
|
||||
@@ -49,7 +45,6 @@ export const createPreprocessorMiddleware = (
|
||||
): RequestHandler => {
|
||||
const preprocessors: RequestPreprocessor[] = [
|
||||
setApiFormat(apiFormat),
|
||||
blockZoomerOrigins,
|
||||
...(beforeTransform ?? []),
|
||||
transformOutboundPayload,
|
||||
countPromptTokens,
|
||||
@@ -57,8 +52,6 @@ export const createPreprocessorMiddleware = (
|
||||
...(afterTransform ?? []),
|
||||
validateContextSize,
|
||||
validateVision,
|
||||
validateModelFamily,
|
||||
applyQuotaLimits,
|
||||
];
|
||||
return async (...args) => executePreprocessors(preprocessors, args);
|
||||
};
|
||||
@@ -90,10 +83,10 @@ async function executePreprocessors(
|
||||
next();
|
||||
} catch (error) {
|
||||
if (error.constructor.name === "ZodError") {
|
||||
const issues = error?.issues
|
||||
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
|
||||
const msg = error?.issues
|
||||
?.map((issue: ZodIssue) => issue.message)
|
||||
.join("; ");
|
||||
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
|
||||
req.log.info(msg, "Prompt validation failed.");
|
||||
} else {
|
||||
req.log.error(error, "Error while executing request preprocessor");
|
||||
}
|
||||
@@ -143,21 +136,14 @@ const handleTestMessage: RequestHandler = (req, res) => {
|
||||
completion: "Hello!",
|
||||
// anthropic chat
|
||||
content: [{ type: "text", text: "Hello!" }],
|
||||
// gemini
|
||||
candidates: [
|
||||
{
|
||||
content: { parts: [{ text: "Hello!" }] },
|
||||
finishReason: "stop",
|
||||
},
|
||||
],
|
||||
proxy_note:
|
||||
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
|
||||
"This response was generated by the proxy's test message handler and did not go to the API.",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
function isTestMessage(body: any) {
|
||||
const { messages, prompt, contents } = body;
|
||||
const { messages, prompt } = body;
|
||||
|
||||
if (messages) {
|
||||
return (
|
||||
@@ -165,8 +151,6 @@ function isTestMessage(body: any) {
|
||||
messages[0].role === "user" &&
|
||||
messages[0].content === "Hi"
|
||||
);
|
||||
} else if (contents) {
|
||||
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
|
||||
} else {
|
||||
return (
|
||||
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
|
||||
|
||||
+7
-13
@@ -3,16 +3,14 @@ import {
|
||||
AzureOpenAIKey,
|
||||
keyPool,
|
||||
} from "../../../../shared/key-management";
|
||||
import { ProxyReqMutator } from "../index";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
const req = manager.request;
|
||||
export const addAzureKey: RequestPreprocessor = (req) => {
|
||||
const validAPIs: APIFormat[] = ["openai", "openai-image"];
|
||||
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
|
||||
validAPIs.includes(api)
|
||||
);
|
||||
const serviceValid = req.service === "azure";
|
||||
|
||||
if (!apisValid || !serviceValid) {
|
||||
throw new Error("addAzureKey called on invalid request");
|
||||
}
|
||||
@@ -24,15 +22,11 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
const model = req.body.model.startsWith("azure-")
|
||||
? req.body.model
|
||||
: `azure-${req.body.model}`;
|
||||
// TODO: untracked mutation to body, I think this should just be a
|
||||
// RequestPreprocessor because we don't need to do it every dequeue.
|
||||
|
||||
req.key = keyPool.get(model, "azure");
|
||||
req.body.model = model;
|
||||
|
||||
const key = keyPool.get(model, "azure");
|
||||
manager.setKey(key);
|
||||
|
||||
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
|
||||
// TODO: this should also probably be a RequestPreprocessor
|
||||
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
|
||||
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
|
||||
// OpenAI wants logprobs: true/false and top_logprobs: number
|
||||
@@ -49,7 +43,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
}
|
||||
|
||||
req.log.info(
|
||||
{ key: key.hash, model },
|
||||
{ key: req.key.hash, model },
|
||||
"Assigned Azure OpenAI key to request"
|
||||
);
|
||||
|
||||
@@ -61,7 +55,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
const apiVersion =
|
||||
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
|
||||
|
||||
manager.setSignedRequest({
|
||||
req.signedRequest = {
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: `${resourceName}.openai.azure.com`,
|
||||
@@ -72,7 +66,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
|
||||
["api-key"]: apiKey,
|
||||
},
|
||||
body: JSON.stringify(req.body),
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
function getCredentialsFromKey(key: AzureOpenAIKey) {
|
||||
@@ -0,0 +1,40 @@
|
||||
import { keyPool } from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
export const addGoogleAIKey: RequestPreprocessor = (req) => {
|
||||
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
|
||||
const serviceValid = req.service === "google-ai";
|
||||
if (!apisValid || !serviceValid) {
|
||||
throw new Error("addGoogleAIKey called on invalid request");
|
||||
}
|
||||
|
||||
if (!req.body?.model) {
|
||||
throw new Error("You must specify a model with your request.");
|
||||
}
|
||||
|
||||
const model = req.body.model;
|
||||
req.key = keyPool.get(model, "google-ai");
|
||||
|
||||
req.log.info(
|
||||
{ key: req.key.hash, model },
|
||||
"Assigned Google AI API key to request"
|
||||
);
|
||||
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
|
||||
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
|
||||
|
||||
req.isStreaming = req.isStreaming || req.body.stream;
|
||||
delete req.body.stream;
|
||||
|
||||
req.signedRequest = {
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: "generativelanguage.googleapis.com",
|
||||
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
|
||||
headers: {
|
||||
["host"]: `generativelanguage.googleapis.com`,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(req.body),
|
||||
};
|
||||
};
|
||||
@@ -1,6 +1,6 @@
|
||||
import { hasAvailableQuota } from "../../../../shared/users/user-store";
|
||||
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
import { HPMRequestCallback } from "../index";
|
||||
|
||||
export class QuotaExceededError extends Error {
|
||||
public quotaInfo: any;
|
||||
@@ -11,7 +11,7 @@ export class QuotaExceededError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
export const applyQuotaLimits: RequestPreprocessor = (req) => {
|
||||
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
||||
const subjectToQuota =
|
||||
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
||||
if (!subjectToQuota || !req.user) return;
|
||||
|
||||
@@ -2,6 +2,7 @@ import { RequestPreprocessor } from "../index";
|
||||
import { countTokens } from "../../../../shared/tokenization";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import {
|
||||
AnthropicChatMessage,
|
||||
GoogleAIChatMessage,
|
||||
MistralAIChatMessage,
|
||||
OpenAIChatMessage,
|
||||
@@ -17,7 +18,7 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
|
||||
switch (service) {
|
||||
case "openai": {
|
||||
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
|
||||
req.outputTokens = req.body.max_tokens;
|
||||
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
@@ -30,13 +31,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
}
|
||||
case "anthropic-chat": {
|
||||
req.outputTokens = req.body.max_tokens;
|
||||
let system = req.body.system ?? "";
|
||||
if (Array.isArray(system)) {
|
||||
system = system
|
||||
.map((m: { type: string; text: string }) => m.text)
|
||||
.join("\n");
|
||||
}
|
||||
const prompt = { system, messages: req.body.messages };
|
||||
const prompt = {
|
||||
system: req.body.system ?? "",
|
||||
messages: req.body.messages,
|
||||
};
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
}
|
||||
@@ -52,11 +50,9 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
}
|
||||
case "mistral-ai":
|
||||
case "mistral-text": {
|
||||
case "mistral-ai": {
|
||||
req.outputTokens = req.body.max_tokens;
|
||||
const prompt: string | MistralAIChatMessage[] =
|
||||
req.body.messages ?? req.body.prompt;
|
||||
const prompt: MistralAIChatMessage[] = req.body.messages;
|
||||
result = await countTokens({ req, prompt, service });
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { Request } from "express";
|
||||
import { z } from "zod";
|
||||
import { config } from "../../../../config";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
@@ -9,7 +8,6 @@ import {
|
||||
OpenAIChatMessage,
|
||||
flattenAnthropicMessages,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
|
||||
|
||||
const rejectedClients = new Map<string, number>();
|
||||
|
||||
@@ -52,16 +50,14 @@ export const languageFilter: RequestPreprocessor = async (req) => {
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
TODO: this is not type safe and does not raise errors if request body zod schema
|
||||
is changed.
|
||||
*/
|
||||
function getPromptFromRequest(req: Request) {
|
||||
const service = req.outboundApi;
|
||||
const body = req.body;
|
||||
switch (service) {
|
||||
case "anthropic-chat":
|
||||
return flattenAnthropicMessages(body.messages);
|
||||
case "anthropic-text":
|
||||
return body.prompt;
|
||||
case "openai":
|
||||
case "mistral-ai":
|
||||
return body.messages
|
||||
@@ -76,18 +72,11 @@ function getPromptFromRequest(req: Request) {
|
||||
return `${msg.role}: ${text}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
case "anthropic-text":
|
||||
case "openai-text":
|
||||
case "openai-image":
|
||||
case "mistral-text":
|
||||
return body.prompt;
|
||||
case "google-ai": {
|
||||
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
|
||||
return [
|
||||
b.systemInstruction?.parts.map((p) => p.text),
|
||||
...b.contents.flatMap((c) => c.parts.map((p) => p.text)),
|
||||
].join("\n");
|
||||
}
|
||||
case "google-ai":
|
||||
return body.prompt.text;
|
||||
default:
|
||||
assertNever(service);
|
||||
}
|
||||
|
||||
@@ -4,22 +4,8 @@ import { LLMService } from "../../../../shared/models";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
export const setApiFormat = (api: {
|
||||
/**
|
||||
* The API format the user made the request in and expects the response to be
|
||||
* in.
|
||||
*/
|
||||
inApi: Request["inboundApi"];
|
||||
/**
|
||||
* The API format the proxy will make the request in and expects the response
|
||||
* to be in. If different from `inApi`, the proxy will transform the user's
|
||||
* request body to this format, and will transform the response body or stream
|
||||
* events from this format.
|
||||
*/
|
||||
outApi: APIFormat;
|
||||
/**
|
||||
* The service the request will be sent to, which determines authentication
|
||||
* and possibly the streaming transport.
|
||||
*/
|
||||
service: LLMService;
|
||||
}): RequestPreprocessor => {
|
||||
return function configureRequestApiFormat(req) {
|
||||
|
||||
@@ -0,0 +1,130 @@
|
||||
import express from "express";
|
||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||
import { SignatureV4 } from "@smithy/signature-v4";
|
||||
import { HttpRequest } from "@smithy/protocol-http";
|
||||
import {
|
||||
AnthropicV1TextSchema,
|
||||
AnthropicV1MessagesSchema,
|
||||
} from "../../../../shared/api-schemas";
|
||||
import { keyPool } from "../../../../shared/key-management";
|
||||
import { RequestPreprocessor } from "../index";
|
||||
|
||||
const AMZ_HOST =
|
||||
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
||||
|
||||
/**
|
||||
* Signs an outgoing AWS request with the appropriate headers modifies the
|
||||
* request object in place to fix the path.
|
||||
* This happens AFTER request transformation.
|
||||
*/
|
||||
export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||
const { model, stream } = req.body;
|
||||
req.key = keyPool.get(model, "aws");
|
||||
|
||||
req.isStreaming = stream === true || stream === "true";
|
||||
|
||||
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
|
||||
if (req.outboundApi === "anthropic-text") {
|
||||
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
||||
req.body.prompt = preamble + req.body.prompt;
|
||||
}
|
||||
|
||||
// AWS uses mostly the same parameters as Anthropic, with a few removed params
|
||||
// and much stricter validation on unused parameters. Rather than treating it
|
||||
// as a separate schema we will use the anthropic ones and strip the unused
|
||||
// parameters.
|
||||
// TODO: This should happen in transform-outbound-payload.ts
|
||||
let strippedParams: Record<string, unknown>;
|
||||
if (req.outboundApi === "anthropic-chat") {
|
||||
strippedParams = AnthropicV1MessagesSchema.pick({
|
||||
messages: true,
|
||||
system: true,
|
||||
max_tokens: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
strippedParams.anthropic_version = "bedrock-2023-05-31";
|
||||
} else {
|
||||
strippedParams = AnthropicV1TextSchema.pick({
|
||||
prompt: true,
|
||||
max_tokens_to_sample: true,
|
||||
stop_sequences: true,
|
||||
temperature: true,
|
||||
top_k: true,
|
||||
top_p: true,
|
||||
})
|
||||
.strip()
|
||||
.parse(req.body);
|
||||
}
|
||||
|
||||
const credential = getCredentialParts(req);
|
||||
const host = AMZ_HOST.replace("%REGION%", credential.region);
|
||||
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
||||
// set it so that the stream adapter always selects the correct transformer.
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
||||
// with the headers generated by the SDK.
|
||||
const newRequest = new HttpRequest({
|
||||
method: "POST",
|
||||
protocol: "https:",
|
||||
hostname: host,
|
||||
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
|
||||
headers: {
|
||||
["Host"]: host,
|
||||
["content-type"]: "application/json",
|
||||
},
|
||||
body: JSON.stringify(strippedParams),
|
||||
});
|
||||
|
||||
if (stream) {
|
||||
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
|
||||
} else {
|
||||
newRequest.headers["accept"] = "*/*";
|
||||
}
|
||||
|
||||
const { key, body, inboundApi, outboundApi } = req;
|
||||
req.log.info(
|
||||
{ key: key.hash, model: body.model, inboundApi, outboundApi },
|
||||
"Assigned AWS credentials to request"
|
||||
);
|
||||
|
||||
req.signedRequest = await sign(newRequest, getCredentialParts(req));
|
||||
};
|
||||
|
||||
type Credential = {
|
||||
accessKeyId: string;
|
||||
secretAccessKey: string;
|
||||
region: string;
|
||||
};
|
||||
|
||||
function getCredentialParts(req: express.Request): Credential {
|
||||
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
|
||||
|
||||
if (!accessKeyId || !secretAccessKey || !region) {
|
||||
req.log.error(
|
||||
{ key: req.key!.hash },
|
||||
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
|
||||
);
|
||||
throw new Error("The key assigned to this request is invalid.");
|
||||
}
|
||||
|
||||
return { accessKeyId, secretAccessKey, region };
|
||||
}
|
||||
|
||||
async function sign(request: HttpRequest, credential: Credential) {
|
||||
const { accessKeyId, secretAccessKey, region } = credential;
|
||||
|
||||
const signer = new SignatureV4({
|
||||
sha256: Sha256,
|
||||
credentials: { accessKeyId, secretAccessKey },
|
||||
region,
|
||||
service: "bedrock",
|
||||
});
|
||||
|
||||
return signer.sign(request);
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
import { Request } from "express";
|
||||
import {
|
||||
API_REQUEST_VALIDATORS,
|
||||
API_REQUEST_TRANSFORMERS,
|
||||
@@ -13,39 +12,41 @@ import { RequestPreprocessor } from "../index";
|
||||
|
||||
/** Transforms an incoming request body to one that matches the target API. */
|
||||
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||
const sameService = req.inboundApi === req.outboundApi;
|
||||
const alreadyTransformed = req.retryCount > 0;
|
||||
const notTransformable =
|
||||
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
|
||||
|
||||
if (alreadyTransformed) {
|
||||
return;
|
||||
} else if (notTransformable) {
|
||||
// This is probably an indication of a bug in the proxy.
|
||||
const { inboundApi, outboundApi, method, path } = req;
|
||||
req.log.warn(
|
||||
{ inboundApi, outboundApi, method, path },
|
||||
"`transformOutboundPayload` called on a non-transformable request."
|
||||
if (alreadyTransformed || notTransformable) return;
|
||||
|
||||
// TODO: this should be an APIFormatTransformer
|
||||
if (req.inboundApi === "mistral-ai") {
|
||||
const messages = req.body.messages;
|
||||
req.body.messages = fixMistralPrompt(messages);
|
||||
req.log.info(
|
||||
{ old: messages.length, new: req.body.messages.length },
|
||||
"Fixed Mistral prompt"
|
||||
);
|
||||
}
|
||||
|
||||
if (sameService) {
|
||||
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
|
||||
if (!result.success) {
|
||||
req.log.warn(
|
||||
{ issues: result.error.issues, body: req.body },
|
||||
"Request validation failed"
|
||||
);
|
||||
throw result.error;
|
||||
}
|
||||
req.body = result.data;
|
||||
return;
|
||||
}
|
||||
|
||||
applyMistralPromptFixes(req);
|
||||
|
||||
// Native prompts are those which were already provided by the client in the
|
||||
// target API format. We don't need to transform them.
|
||||
const isNativePrompt = req.inboundApi === req.outboundApi;
|
||||
if (isNativePrompt) {
|
||||
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
|
||||
req.body = result;
|
||||
return;
|
||||
}
|
||||
|
||||
// Prompt requires translation from one API format to another.
|
||||
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
|
||||
const transFn = API_REQUEST_TRANSFORMERS[transformation];
|
||||
|
||||
if (transFn) {
|
||||
req.log.info({ transformation }, "Transforming request...");
|
||||
req.log.info({ transformation }, "Transforming request");
|
||||
req.body = await transFn(req);
|
||||
return;
|
||||
}
|
||||
@@ -54,36 +55,3 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
|
||||
);
|
||||
};
|
||||
|
||||
// handles weird cases that don't fit into our abstractions
|
||||
function applyMistralPromptFixes(req: Request): void {
|
||||
if (req.inboundApi === "mistral-ai") {
|
||||
// Mistral Chat is very similar to OpenAI but not identical and many clients
|
||||
// don't properly handle the differences. We will try to validate the
|
||||
// mistral prompt and try to fix it if it fails. It will be re-validated
|
||||
// after this function returns.
|
||||
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
|
||||
req.body.messages = fixMistralPrompt(result.messages);
|
||||
req.log.info(
|
||||
{ n: req.body.messages.length, prev: result.messages.length },
|
||||
"Applied Mistral chat prompt fixes."
|
||||
);
|
||||
|
||||
// If the prompt relies on `prefix: true` for the last message, we need to
|
||||
// convert it to a text completions request because AWS Mistral support for
|
||||
// this feature is broken.
|
||||
// On Mistral La Plateforme, we can't do this because they don't expose
|
||||
// a text completions endpoint.
|
||||
const { messages } = req.body;
|
||||
const lastMessage = messages && messages[messages.length - 1];
|
||||
if (lastMessage?.role === "assistant" && req.service === "aws") {
|
||||
// enable prefix if client forgot, otherwise the template will insert an
|
||||
// eos token which is very unlikely to be what the client wants.
|
||||
lastMessage.prefix = true;
|
||||
req.outboundApi = "mistral-text";
|
||||
req.log.info(
|
||||
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,9 +6,8 @@ import { RequestPreprocessor } from "../index";
|
||||
|
||||
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
|
||||
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
|
||||
// todo: make configurable
|
||||
const GOOGLE_AI_MAX_CONTEXT = 2048000;
|
||||
const MISTRAL_AI_MAX_CONTENT = 131072;
|
||||
const GOOGLE_AI_MAX_CONTEXT = 32000;
|
||||
const MISTRAL_AI_MAX_CONTENT = 32768;
|
||||
|
||||
/**
|
||||
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
|
||||
@@ -38,7 +37,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
proxyMax = GOOGLE_AI_MAX_CONTEXT;
|
||||
break;
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
proxyMax = MISTRAL_AI_MAX_CONTENT;
|
||||
break;
|
||||
case "openai-image":
|
||||
@@ -58,8 +56,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 16384;
|
||||
} else if (model.match(/^gpt-4o/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^chatgpt-4o/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 131072;
|
||||
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
|
||||
@@ -68,10 +64,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 131072;
|
||||
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
|
||||
modelMax = 131072;
|
||||
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/^o1(-preview)?(-\d{4}-\d{2}-\d{2})?$/)) {
|
||||
modelMax = 128000;
|
||||
} else if (model.match(/gpt-3.5-turbo/)) {
|
||||
modelMax = 16384;
|
||||
} else if (model.match(/gpt-4-32k/)) {
|
||||
@@ -88,19 +80,17 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^claude-3/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^gemini-/)) {
|
||||
modelMax = 1024000;
|
||||
} else if (model.match(/^gemini-\d{3}$/)) {
|
||||
modelMax = GOOGLE_AI_MAX_CONTEXT;
|
||||
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
|
||||
modelMax = MISTRAL_AI_MAX_CONTENT;
|
||||
} else if (model.match(/^anthropic\.claude-3/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
|
||||
modelMax = 200000;
|
||||
} else if (model.match(/^anthropic\.claude/)) {
|
||||
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
|
||||
modelMax = 100000;
|
||||
} else if (model.match(/tral/)) {
|
||||
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
|
||||
// no name convention and wildly different context windows so this is a
|
||||
// catch-all
|
||||
modelMax = MISTRAL_AI_MAX_CONTENT;
|
||||
} else {
|
||||
req.log.warn({ model }, "Unknown model, using 200k token limit.");
|
||||
modelMax = 200000;
|
||||
|
||||
@@ -28,7 +28,6 @@ export const validateVision: RequestPreprocessor = async (req) => {
|
||||
case "anthropic-text":
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
case "mistral-text":
|
||||
case "openai-image":
|
||||
case "openai-text":
|
||||
return;
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
import { Request, Response } from "express";
|
||||
import http from "http";
|
||||
import ProxyServer from "http-proxy";
|
||||
import { Readable } from "stream";
|
||||
import {
|
||||
createProxyMiddleware,
|
||||
Options,
|
||||
debugProxyErrorsPlugin,
|
||||
proxyEventsPlugin,
|
||||
} from "http-proxy-middleware";
|
||||
import { ProxyReqMutator, stripHeaders } from "./index";
|
||||
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
|
||||
import { createQueueMiddleware } from "../../queue";
|
||||
import { getHttpAgents } from "../../../shared/network";
|
||||
import { classifyErrorAndSend } from "../common";
|
||||
|
||||
/**
|
||||
* Options for the `createQueuedProxyMiddleware` factory function.
|
||||
*/
|
||||
type ProxyMiddlewareFactoryOptions = {
|
||||
/**
|
||||
* Functions which receive a ProxyReqManager and can modify the request before
|
||||
* it is proxied. The modifications will be automatically reverted if the
|
||||
* request needs to be returned to the queue.
|
||||
*/
|
||||
mutations?: ProxyReqMutator[];
|
||||
/**
|
||||
* The target URL to proxy requests to. This can be a string or a function
|
||||
* which accepts the request and returns a string.
|
||||
*/
|
||||
target: string | Options<Request>["router"];
|
||||
/**
|
||||
* A function which receives the proxy response and the JSON-decoded request
|
||||
* body. Only fired for non-streaming responses; streaming responses are
|
||||
* handled in `handle-streaming-response.ts`.
|
||||
*/
|
||||
blockingResponseHandler?: ProxyResHandlerWithBody;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a middleware function that accepts incoming requests and places them
|
||||
* into the request queue. When the request is dequeued, it is proxied to the
|
||||
* target URL using the given options and middleware. Non-streaming responses
|
||||
* are handled by the given `blockingResponseHandler`.
|
||||
*/
|
||||
export function createQueuedProxyMiddleware({
|
||||
target,
|
||||
mutations,
|
||||
blockingResponseHandler,
|
||||
}: ProxyMiddlewareFactoryOptions) {
|
||||
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
|
||||
const hpmRouter = typeof target === "function" ? target : undefined;
|
||||
|
||||
const [httpAgent, httpsAgent] = getHttpAgents();
|
||||
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
|
||||
|
||||
const proxyMiddleware = createProxyMiddleware<Request, Response>({
|
||||
target: hpmTarget,
|
||||
router: hpmRouter,
|
||||
agent,
|
||||
changeOrigin: true,
|
||||
toProxy: true,
|
||||
selfHandleResponse: typeof blockingResponseHandler === "function",
|
||||
// Disable HPM logger plugin (requires re-adding the other default plugins).
|
||||
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
|
||||
// fixes several error handling/connection close issues in http-proxy core.
|
||||
ejectPlugins: true,
|
||||
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
|
||||
// the default plugins only allow http.IncomingMessage for TReq. They are
|
||||
// compatible with express.Request, so we can use them. `Plugin` type is not
|
||||
// exported for some reason.
|
||||
plugins: [
|
||||
debugProxyErrorsPlugin,
|
||||
pinoLoggerPlugin,
|
||||
proxyEventsPlugin,
|
||||
] as any,
|
||||
on: {
|
||||
proxyRes: createOnProxyResHandler(
|
||||
blockingResponseHandler ? [blockingResponseHandler] : []
|
||||
),
|
||||
error: classifyErrorAndSend,
|
||||
},
|
||||
buffer: ((req: Request) => {
|
||||
// This is a hack/monkey patch and is not part of the official
|
||||
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
|
||||
let payload = req.body;
|
||||
if (typeof payload === "string") {
|
||||
payload = Buffer.from(payload);
|
||||
}
|
||||
const stream = new Readable();
|
||||
stream.push(payload);
|
||||
stream.push(null);
|
||||
return stream;
|
||||
}) as any,
|
||||
});
|
||||
|
||||
return createQueueMiddleware({
|
||||
mutations: [stripHeaders, ...(mutations ?? [])],
|
||||
proxyMiddleware,
|
||||
});
|
||||
}
|
||||
|
||||
type ProxiedResponse = http.IncomingMessage & Response & any;
|
||||
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
|
||||
proxyServer.on("error", (err, req, res, target) => {
|
||||
req.log.error(
|
||||
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
|
||||
"Error occurred while proxying request to target"
|
||||
);
|
||||
});
|
||||
proxyServer.on("proxyReq", (proxyReq, req) => {
|
||||
const { protocol, host, path } = proxyReq;
|
||||
req.log.info(
|
||||
{
|
||||
from: req.originalUrl,
|
||||
to: `${protocol}//${host}${path}`,
|
||||
},
|
||||
"Sending request to upstream API..."
|
||||
);
|
||||
});
|
||||
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
|
||||
const { protocol, host, path } = proxyRes.req;
|
||||
req.log.info(
|
||||
{
|
||||
target: `${protocol}//${host}${path}`,
|
||||
status: proxyRes.statusCode,
|
||||
contentType: proxyRes.headers["content-type"],
|
||||
contentEncoding: proxyRes.headers["content-encoding"],
|
||||
contentLength: proxyRes.headers["content-length"],
|
||||
transferEncoding: proxyRes.headers["transfer-encoding"],
|
||||
},
|
||||
"Got response from upstream API."
|
||||
);
|
||||
});
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
import { Request } from "express";
|
||||
import { Key } from "../../../shared/key-management";
|
||||
import { assertNever } from "../../../shared/utils";
|
||||
|
||||
/**
|
||||
* Represents a change to the request that will be reverted if the request
|
||||
* fails.
|
||||
*/
|
||||
interface ProxyReqMutation {
|
||||
target: "header" | "path" | "body" | "api-key" | "signed-request";
|
||||
key?: string;
|
||||
originalValue: any | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages a request's headers, body, and path, allowing them to be modified
|
||||
* before the request is proxied and automatically reverted if the request
|
||||
* needs to be retried.
|
||||
*/
|
||||
export class ProxyReqManager {
|
||||
private req: Request;
|
||||
private mutations: ProxyReqMutation[] = [];
|
||||
|
||||
/**
|
||||
* A read-only proxy of the request object. Avoid changing any properties
|
||||
* here as they will persist across retries.
|
||||
*/
|
||||
public readonly request: Readonly<Request>;
|
||||
|
||||
constructor(req: Request) {
|
||||
this.req = req;
|
||||
|
||||
this.request = new Proxy(req, {
|
||||
get: (target, prop) => {
|
||||
if (typeof prop === "string") return target[prop as keyof Request];
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
setHeader(name: string, newValue: string): void {
|
||||
const originalValue = this.req.get(name);
|
||||
this.mutations.push({ target: "header", key: name, originalValue });
|
||||
this.req.headers[name.toLowerCase()] = newValue;
|
||||
}
|
||||
|
||||
removeHeader(name: string): void {
|
||||
const originalValue = this.req.get(name);
|
||||
this.mutations.push({ target: "header", key: name, originalValue });
|
||||
delete this.req.headers[name.toLowerCase()];
|
||||
}
|
||||
|
||||
setBody(newBody: any): void {
|
||||
const originalValue = this.req.body;
|
||||
this.mutations.push({ target: "body", key: "body", originalValue });
|
||||
this.req.body = newBody;
|
||||
}
|
||||
|
||||
setKey(newKey: Key): void {
|
||||
const originalValue = this.req.key;
|
||||
this.mutations.push({ target: "api-key", key: "key", originalValue });
|
||||
this.req.key = newKey;
|
||||
}
|
||||
|
||||
setPath(newPath: string): void {
|
||||
const originalValue = this.req.path;
|
||||
this.mutations.push({ target: "path", key: "path", originalValue });
|
||||
this.req.url = newPath;
|
||||
}
|
||||
|
||||
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
|
||||
const originalValue = this.req.signedRequest;
|
||||
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
|
||||
this.req.signedRequest = newSignedRequest;
|
||||
}
|
||||
|
||||
hasChanged(): boolean {
|
||||
return this.mutations.length > 0;
|
||||
}
|
||||
|
||||
revert(): void {
|
||||
for (const mutation of this.mutations.reverse()) {
|
||||
switch (mutation.target) {
|
||||
case "header":
|
||||
if (mutation.originalValue === undefined) {
|
||||
delete this.req.headers[mutation.key!.toLowerCase()];
|
||||
continue;
|
||||
} else {
|
||||
this.req.headers[mutation.key!.toLowerCase()] =
|
||||
mutation.originalValue;
|
||||
}
|
||||
break;
|
||||
case "path":
|
||||
this.req.url = mutation.originalValue;
|
||||
break;
|
||||
case "body":
|
||||
this.req.body = mutation.originalValue;
|
||||
break;
|
||||
case "api-key":
|
||||
// We don't reset the key here because it's not a property of the
|
||||
// inbound request, so we'd only ever be reverting it to null.
|
||||
break;
|
||||
case "signed-request":
|
||||
this.req.signedRequest = mutation.originalValue;
|
||||
break;
|
||||
default:
|
||||
assertNever(mutation.target);
|
||||
}
|
||||
}
|
||||
this.mutations = [];
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
import util from "util";
|
||||
import zlib from "zlib";
|
||||
import { PassThrough } from "stream";
|
||||
|
||||
const BUFFER_DECODER_MAP = {
|
||||
gzip: util.promisify(zlib.gunzip),
|
||||
deflate: util.promisify(zlib.inflate),
|
||||
br: util.promisify(zlib.brotliDecompress),
|
||||
text: (data: Buffer) => data,
|
||||
};
|
||||
|
||||
const STREAM_DECODER_MAP = {
|
||||
gzip: zlib.createGunzip,
|
||||
deflate: zlib.createInflate,
|
||||
br: zlib.createBrotliDecompress,
|
||||
text: () => new PassThrough(),
|
||||
};
|
||||
|
||||
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
|
||||
const isSupportedContentEncoding = (
|
||||
encoding: string
|
||||
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
|
||||
|
||||
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
|
||||
if (isSupportedContentEncoding(encoding)) {
|
||||
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
|
||||
}
|
||||
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||
}
|
||||
|
||||
export function getStreamDecompressor(encoding: string = "text") {
|
||||
if (isSupportedContentEncoding(encoding)) {
|
||||
return STREAM_DECODER_MAP[encoding]();
|
||||
}
|
||||
throw new Error(`Unsupported content-encoding: ${encoding}`);
|
||||
}
|
||||
@@ -2,33 +2,36 @@ import express from "express";
|
||||
import { APIFormat } from "../../../shared/key-management";
|
||||
import { assertNever } from "../../../shared/utils";
|
||||
import { initializeSseStream } from "../../../shared/streaming";
|
||||
import http from "http";
|
||||
|
||||
/**
|
||||
* Returns a Markdown-formatted message that renders semi-nicely in most chat
|
||||
* frontends. For example:
|
||||
*
|
||||
* **Proxy error (HTTP 404 Not Found)**
|
||||
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
||||
* ***
|
||||
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
|
||||
* ```
|
||||
* {
|
||||
* "type": "error",
|
||||
* "error": {
|
||||
* "type": "not_found_error",
|
||||
* "message": "model: some-invalid-model-id",
|
||||
* },
|
||||
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
function getMessageContent(params: {
|
||||
function getMessageContent({
|
||||
title,
|
||||
message,
|
||||
obj,
|
||||
}: {
|
||||
title: string;
|
||||
message: string;
|
||||
obj?: Record<string, any>;
|
||||
}) {
|
||||
const { title, message, obj } = params;
|
||||
/*
|
||||
Constructs a Markdown-formatted message that renders semi-nicely in most chat
|
||||
frontends. For example:
|
||||
|
||||
**Proxy error (HTTP 404 Not Found)**
|
||||
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
|
||||
***
|
||||
*The requested Claude model might not exist, or the key might not be provisioned for it.*
|
||||
```
|
||||
{
|
||||
"type": "error",
|
||||
"error": {
|
||||
"type": "not_found_error",
|
||||
"message": "model: some-invalid-model-id",
|
||||
},
|
||||
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
|
||||
}
|
||||
```
|
||||
*/
|
||||
|
||||
const note = obj?.proxy_note || obj?.error?.message || "";
|
||||
const header = `### **${title}**`;
|
||||
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
|
||||
@@ -62,17 +65,13 @@ type ErrorGeneratorOptions = {
|
||||
format: APIFormat | "unknown";
|
||||
title: string;
|
||||
message: string;
|
||||
obj?: Record<string, any>;
|
||||
obj?: object;
|
||||
reqId: string | number | object;
|
||||
model?: string;
|
||||
statusCode?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Very crude inference of the request format based on the request body. Don't
|
||||
* rely on this to be very accurate.
|
||||
*/
|
||||
function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
export function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
if (typeof body !== "object" || !body.model) {
|
||||
return "unknown";
|
||||
}
|
||||
@@ -96,82 +95,47 @@ function tryInferFormat(body: any): APIFormat | "unknown" {
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* Redacts the hostname from the error message if it contains a DNS resolution
|
||||
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
|
||||
* as those may contain sensitive information about the proxy's configuration.
|
||||
*/
|
||||
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
|
||||
if (!options.message.includes("getaddrinfo")) return options;
|
||||
|
||||
const redacted = { ...options };
|
||||
redacted.message = "Could not resolve hostname";
|
||||
|
||||
if (typeof redacted.obj?.error === "object") {
|
||||
redacted.obj = {
|
||||
...redacted.obj,
|
||||
error: { message: "Could not resolve hostname" },
|
||||
};
|
||||
}
|
||||
|
||||
return redacted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates an appropriately-formatted error response and sends it to the
|
||||
* client over their requested transport (blocking or SSE stream).
|
||||
*/
|
||||
export function sendErrorToClient(params: {
|
||||
export function sendErrorToClient({
|
||||
options,
|
||||
req,
|
||||
res,
|
||||
}: {
|
||||
options: ErrorGeneratorOptions;
|
||||
req: express.Request;
|
||||
res: express.Response;
|
||||
}) {
|
||||
const { req, res } = params;
|
||||
const options = redactHostname(params.options);
|
||||
const { statusCode, message, title, obj: details } = options;
|
||||
const { format: inputFormat } = options;
|
||||
|
||||
// Since we want to send the error in a format the client understands, we
|
||||
// need to know the request format. `setApiFormat` might not have been called
|
||||
// yet, so we'll try to infer it from the request body.
|
||||
// This is an error thrown before we know the format of the request, so we
|
||||
// can't send a response in the format the client expects.
|
||||
const format =
|
||||
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
|
||||
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
|
||||
if (format === "unknown") {
|
||||
// Early middleware error (auth, rate limit) so we can only send something
|
||||
// generic.
|
||||
const code = statusCode || 400;
|
||||
const hasDetails = details && Object.keys(details).length > 0;
|
||||
return res.status(code).json({
|
||||
error: {
|
||||
message,
|
||||
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
|
||||
},
|
||||
...(hasDetails ? { details } : {}),
|
||||
return res.status(options.statusCode || 400).json({
|
||||
error: options.message,
|
||||
details: options.obj,
|
||||
});
|
||||
}
|
||||
|
||||
// Cannot modify headers if client opted into streaming and made it into the
|
||||
// proxy request queue, because that immediately starts an SSE stream.
|
||||
const completion = buildSpoofedCompletion({ ...options, format });
|
||||
const event = buildSpoofedSSE({ ...options, format });
|
||||
const isStreaming =
|
||||
req.isStreaming || req.body.stream === true || req.body.stream === "true";
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.setHeader("x-oai-proxy-error", title);
|
||||
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
|
||||
res.setHeader("x-oai-proxy-error", options.title);
|
||||
res.setHeader("x-oai-proxy-error-status", options.statusCode || 500);
|
||||
}
|
||||
|
||||
// By this point, we know the request format. To get the error to display in
|
||||
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
|
||||
// from the language model. Depending on whether the client is streaming, we
|
||||
// will either send an SSE event or a JSON response.
|
||||
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||
if (isStreaming) {
|
||||
// User can have opted into streaming but not made it into the queue yet,
|
||||
// in which case the stream must be started first.
|
||||
if (!res.headersSent) {
|
||||
initializeSseStream(res);
|
||||
}
|
||||
res.write(buildSpoofedSSE({ ...options, format }));
|
||||
res.write(event);
|
||||
res.write(`data: [DONE]\n\n`);
|
||||
res.end();
|
||||
} else {
|
||||
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
|
||||
res.status(200).json(completion);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,11 +173,6 @@ export function buildSpoofedCompletion({
|
||||
},
|
||||
],
|
||||
};
|
||||
case "mistral-text":
|
||||
return {
|
||||
outputs: [{ text: content, stop_reason: title }],
|
||||
model,
|
||||
};
|
||||
case "openai-text":
|
||||
return {
|
||||
id: "error-" + id,
|
||||
@@ -245,7 +204,13 @@ export function buildSpoofedCompletion({
|
||||
stop_sequence: null,
|
||||
};
|
||||
case "google-ai":
|
||||
// TODO: Native Google AI non-streaming responses are not supported, this
|
||||
// is an untested guess at what the response should look like.
|
||||
return {
|
||||
id: "error-" + id,
|
||||
object: "chat.completion",
|
||||
created: Date.now(),
|
||||
model,
|
||||
candidates: [
|
||||
{
|
||||
content: { parts: [{ text: content }], role: "model" },
|
||||
@@ -292,11 +257,6 @@ export function buildSpoofedSSE({
|
||||
choices: [{ delta: { content }, index: 0, finish_reason: title }],
|
||||
};
|
||||
break;
|
||||
case "mistral-text":
|
||||
event = {
|
||||
outputs: [{ text: content, stop_reason: title }],
|
||||
};
|
||||
break;
|
||||
case "openai-text":
|
||||
event = {
|
||||
id: "cmpl-" + id,
|
||||
@@ -326,10 +286,7 @@ export function buildSpoofedSSE({
|
||||
};
|
||||
break;
|
||||
case "google-ai":
|
||||
// TODO: google ai supports two streaming transports, SSE and JSON.
|
||||
// we currently only support SSE.
|
||||
// return JSON.stringify({
|
||||
event = {
|
||||
return JSON.stringify({
|
||||
candidates: [
|
||||
{
|
||||
content: { parts: [{ text: content }], role: "model" },
|
||||
@@ -339,8 +296,7 @@ export function buildSpoofedSSE({
|
||||
safetyRatings: [],
|
||||
},
|
||||
],
|
||||
};
|
||||
break;
|
||||
});
|
||||
case "openai-image":
|
||||
return JSON.stringify(obj);
|
||||
default:
|
||||
|
||||
@@ -1,6 +1,19 @@
|
||||
import util from "util";
|
||||
import zlib from "zlib";
|
||||
import { sendProxyError } from "../common";
|
||||
import type { RawResponseBodyHandler } from "./index";
|
||||
import { decompressBuffer } from "./compression";
|
||||
|
||||
const DECODER_MAP = {
|
||||
gzip: util.promisify(zlib.gunzip),
|
||||
deflate: util.promisify(zlib.inflate),
|
||||
br: util.promisify(zlib.brotliDecompress),
|
||||
};
|
||||
|
||||
const isSupportedContentEncoding = (
|
||||
contentEncoding: string
|
||||
): contentEncoding is keyof typeof DECODER_MAP => {
|
||||
return contentEncoding in DECODER_MAP;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handles the response from the upstream service and decodes the body if
|
||||
@@ -22,49 +35,42 @@ export const handleBlockingResponse: RawResponseBodyHandler = async (
|
||||
throw err;
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
let chunks: Buffer[] = [];
|
||||
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
||||
proxyRes.on("end", async () => {
|
||||
const contentEncoding = proxyRes.headers["content-encoding"];
|
||||
const contentType = proxyRes.headers["content-type"];
|
||||
let body: string | Buffer = Buffer.concat(chunks);
|
||||
const rejectWithMessage = function (msg: string, err: Error) {
|
||||
const error = `${msg} (${err.message})`;
|
||||
req.log.warn(
|
||||
{ msg: error, stack: err.stack },
|
||||
"Error in blocking response handler"
|
||||
);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", { error });
|
||||
return reject(error);
|
||||
};
|
||||
let body = Buffer.concat(chunks);
|
||||
|
||||
try {
|
||||
body = await decompressBuffer(body, contentEncoding);
|
||||
} catch (e) {
|
||||
return rejectWithMessage(`Could not decode response body`, e);
|
||||
const contentEncoding = proxyRes.headers["content-encoding"];
|
||||
if (contentEncoding) {
|
||||
if (isSupportedContentEncoding(contentEncoding)) {
|
||||
const decoder = DECODER_MAP[contentEncoding];
|
||||
// @ts-ignore - started failing after upgrading TypeScript, don't care
|
||||
// as it was never a problem.
|
||||
body = await decoder(body);
|
||||
} else {
|
||||
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
|
||||
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", {
|
||||
error,
|
||||
contentEncoding,
|
||||
});
|
||||
return reject(error);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
return resolve(tryParseAsJson(body, contentType));
|
||||
if (proxyRes.headers["content-type"]?.includes("application/json")) {
|
||||
const json = JSON.parse(body.toString());
|
||||
return resolve(json);
|
||||
}
|
||||
return resolve(body.toString());
|
||||
} catch (e) {
|
||||
return rejectWithMessage("API responded with invalid JSON", e);
|
||||
const msg = `Proxy received response with invalid JSON: ${e.message}`;
|
||||
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
|
||||
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
|
||||
return reject(msg);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
function tryParseAsJson(body: string, contentType?: string) {
|
||||
// If the response is declared as JSON, it must parse or we will throw
|
||||
if (contentType?.includes("application/json")) {
|
||||
return JSON.parse(body);
|
||||
}
|
||||
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
|
||||
// anyway since some APIs return the wrong content-type header in some cases.
|
||||
// If it fails to parse, we'll just return the raw body without throwing.
|
||||
try {
|
||||
return JSON.parse(body);
|
||||
} catch (e) {
|
||||
return body;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import express from "express";
|
||||
import { pipeline, Readable, Transform } from "stream";
|
||||
import StreamArray from "stream-json/streamers/StreamArray";
|
||||
import { StringDecoder } from "string_decoder";
|
||||
import { promisify } from "util";
|
||||
import type { logger } from "../../../logger";
|
||||
@@ -17,45 +18,43 @@ import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
|
||||
import { EventAggregator } from "./streaming/event-aggregator";
|
||||
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
|
||||
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
|
||||
import { getStreamDecompressor } from "./compression";
|
||||
|
||||
const pipelineAsync = promisify(pipeline);
|
||||
|
||||
/**
|
||||
* `handleStreamedResponse` consumes a streamed response from the upstream API,
|
||||
* decodes chunk-by-chunk into a stream of events, transforms those events into
|
||||
* the client's requested format, and forwards the result to the client.
|
||||
*
|
||||
* `handleStreamedResponse` consumes and transforms a streamed response from the
|
||||
* upstream service, forwarding events to the client in their requested format.
|
||||
* After the entire stream has been consumed, it resolves with the full response
|
||||
* body so that subsequent middleware in the chain can process it as if it were
|
||||
* a non-streaming response (to count output tokens, track usage, etc).
|
||||
* a non-streaming response.
|
||||
*
|
||||
* In the event of an error, the request's streaming flag is unset and the
|
||||
* request is bounced back to the non-streaming response handler. If the error
|
||||
* is retryable, that handler will re-enqueue the request and also reset the
|
||||
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
|
||||
* places, so it's hard to keep track of.
|
||||
* In the event of an error, the request's streaming flag is unset and the non-
|
||||
* streaming response handler is called instead.
|
||||
*
|
||||
* If the error is retryable, that handler will re-enqueue the request and also
|
||||
* reset the streaming flag. Unfortunately the streaming flag is set and unset
|
||||
* in multiple places, so it's hard to keep track of.
|
||||
*/
|
||||
export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
proxyRes,
|
||||
req,
|
||||
res
|
||||
) => {
|
||||
const { headers, statusCode } = proxyRes;
|
||||
const { hash } = req.key!;
|
||||
if (!req.isStreaming) {
|
||||
throw new Error("handleStreamedResponse called for non-streaming request.");
|
||||
}
|
||||
|
||||
if (statusCode! > 201) {
|
||||
if (proxyRes.statusCode! > 201) {
|
||||
req.isStreaming = false;
|
||||
req.log.warn(
|
||||
{ statusCode },
|
||||
{ statusCode: proxyRes.statusCode, key: hash },
|
||||
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
||||
);
|
||||
return handleBlockingResponse(proxyRes, req, res);
|
||||
}
|
||||
|
||||
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
|
||||
req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
|
||||
|
||||
// Typically, streaming will have already been initialized by the request
|
||||
// queue to send heartbeat pings.
|
||||
@@ -66,25 +65,18 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
|
||||
const prefersNativeEvents = req.inboundApi === req.outboundApi;
|
||||
const streamOptions = {
|
||||
contentType: headers["content-type"],
|
||||
contentType: proxyRes.headers["content-type"],
|
||||
api: req.outboundApi,
|
||||
logger: req.log,
|
||||
};
|
||||
|
||||
// While the request is streaming, aggregator collects all events so that we
|
||||
// can compile them into a single response object and publish that to the
|
||||
// remaining middleware. Because we have an OpenAI transformer for every
|
||||
// supported format, EventAggregator always consumes OpenAI events so that we
|
||||
// only have to write one aggregator (OpenAI input) for each output format.
|
||||
const aggregator = new EventAggregator(req);
|
||||
|
||||
const decompressor = getStreamDecompressor(headers["content-encoding"]);
|
||||
// Decoder reads from the response bytes to produce a stream of plaintext.
|
||||
// Decoder turns the raw response stream into a stream of events in some
|
||||
// format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
|
||||
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
|
||||
// Adapter consumes the decoded text and produces server-sent events so we
|
||||
// have a standard event format for the client and to translate between API
|
||||
// message formats.
|
||||
// Adapter transforms the decoded events into server-sent events.
|
||||
const adapter = new SSEStreamAdapter(streamOptions);
|
||||
// Aggregator compiles all events into a single response object.
|
||||
const aggregator = new EventAggregator({ format: req.outboundApi });
|
||||
// Transformer converts server-sent events from one vendor's API message
|
||||
// format to another.
|
||||
const transformer = new SSEMessageTransformer({
|
||||
@@ -106,7 +98,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||
try {
|
||||
await Promise.race([
|
||||
handleAbortedStream(req, res),
|
||||
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
|
||||
pipelineAsync(proxyRes, decoder, adapter, transformer),
|
||||
]);
|
||||
req.log.debug(`Finished proxying SSE stream.`);
|
||||
res.end();
|
||||
@@ -173,13 +165,14 @@ function getDecoder(options: {
|
||||
logger: typeof logger;
|
||||
contentType?: string;
|
||||
}) {
|
||||
const { contentType, input, logger } = options;
|
||||
const { api, contentType, input, logger } = options;
|
||||
if (contentType?.includes("application/vnd.amazon.eventstream")) {
|
||||
return getAwsEventStreamDecoder({ input, logger });
|
||||
} else if (contentType?.includes("application/json")) {
|
||||
throw new Error("JSON streaming not supported, request SSE instead");
|
||||
} else if (api === "google-ai") {
|
||||
return StreamArray.withParser();
|
||||
} else {
|
||||
// Ensures split chunks across multi-byte characters are handled correctly.
|
||||
// Passthrough stream, but ensures split chunks across multi-byte characters
|
||||
// are handled correctly.
|
||||
const stringDecoder = new StringDecoder("utf8");
|
||||
return new Transform({
|
||||
readableObjectMode: true,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
/* This file is fucking horrendous, sorry */
|
||||
// TODO: extract all per-service error response handling into its own modules
|
||||
import { Request, Response } from "express";
|
||||
import * as http from "http";
|
||||
import { config } from "../../../config";
|
||||
@@ -47,7 +46,7 @@ export type ProxyResHandlerWithBody = (
|
||||
*/
|
||||
body: string | Record<string, any>
|
||||
) => Promise<void>;
|
||||
export type ProxyResMiddleware = ProxyResHandlerWithBody[] | undefined;
|
||||
export type ProxyResMiddleware = ProxyResHandlerWithBody[];
|
||||
|
||||
/**
|
||||
* Returns a on.proxyRes handler that executes the given middleware stack after
|
||||
@@ -71,22 +70,11 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
req: Request,
|
||||
res: Response
|
||||
) => {
|
||||
// Proxied request has by now been sent to the upstream API, so we revert
|
||||
// tracked mutations that were only needed to send the request.
|
||||
// This generally means path adjustment, headers, and body serialization.
|
||||
if (req.changeManager) {
|
||||
req.changeManager.revert();
|
||||
}
|
||||
|
||||
const initialHandler = req.isStreaming
|
||||
const initialHandler: RawResponseBodyHandler = req.isStreaming
|
||||
? handleStreamedResponse
|
||||
: handleBlockingResponse;
|
||||
let lastMiddleware = initialHandler.name;
|
||||
|
||||
if (Buffer.isBuffer(req.body)) {
|
||||
req.body = JSON.parse(req.body.toString());
|
||||
}
|
||||
|
||||
try {
|
||||
const body = await initialHandler(proxyRes, req, res);
|
||||
const middlewareStack: ProxyResMiddleware = [];
|
||||
@@ -111,7 +99,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
saveImage,
|
||||
logPrompt,
|
||||
logEvent,
|
||||
...(apiMiddleware ?? [])
|
||||
...apiMiddleware
|
||||
);
|
||||
}
|
||||
|
||||
@@ -135,15 +123,15 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
||||
}
|
||||
|
||||
const { stack, message } = error;
|
||||
const details = { stack, message, lastMiddleware, key: req.key?.hash };
|
||||
const info = { stack, lastMiddleware, key: req.key?.hash };
|
||||
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
|
||||
|
||||
if (res.headersSent) {
|
||||
req.log.error(details, description);
|
||||
req.log.error(info, description);
|
||||
if (!res.writableEnded) res.end();
|
||||
return;
|
||||
} else {
|
||||
req.log.error(details, description);
|
||||
req.log.error(info, description);
|
||||
res
|
||||
.status(500)
|
||||
.json({ error: "Internal server error", proxy_note: description });
|
||||
@@ -174,64 +162,53 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
) => {
|
||||
const statusCode = proxyRes.statusCode || 500;
|
||||
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
|
||||
const service = req.key!.service;
|
||||
// Not an error, continue to next response handler
|
||||
let errorPayload: ProxiedErrorPayload;
|
||||
|
||||
if (statusCode < 400) return;
|
||||
|
||||
// Parse the error response body
|
||||
let errorPayload: ProxiedErrorPayload;
|
||||
try {
|
||||
assertJsonResponse(body);
|
||||
errorPayload = body;
|
||||
} catch (parseError) {
|
||||
const strBody = String(body).slice(0, 128);
|
||||
req.log.error({ statusCode, strBody }, "Error body is not JSON");
|
||||
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
|
||||
const hash = req.key?.hash;
|
||||
req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
|
||||
|
||||
const details = {
|
||||
const errorObject = {
|
||||
error: parseError.message,
|
||||
status: statusCode,
|
||||
statusMessage,
|
||||
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service. Response body: ${strBody}`,
|
||||
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
|
||||
};
|
||||
|
||||
sendProxyError(req, res, statusCode, statusMessage, details);
|
||||
sendProxyError(req, res, statusCode, statusMessage, errorObject);
|
||||
throw new HttpError(statusCode, parseError.message);
|
||||
}
|
||||
|
||||
// Extract the error type from the response body depending on the service
|
||||
if (service === "gcp") {
|
||||
if (Array.isArray(errorPayload)) {
|
||||
errorPayload = errorPayload[0];
|
||||
}
|
||||
}
|
||||
const errorType =
|
||||
errorPayload.error?.code ||
|
||||
errorPayload.error?.type ||
|
||||
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
|
||||
|
||||
req.log.warn(
|
||||
{ statusCode, statusMessage, errorType, errorPayload, key: req.key?.hash },
|
||||
`API returned an error.`
|
||||
{ statusCode, type: errorType, errorPayload, key: req.key?.hash },
|
||||
`Received error response from upstream. (${proxyRes.statusMessage})`
|
||||
);
|
||||
|
||||
// TODO: split upstream error handling into separate modules for each service,
|
||||
// this is out of control.
|
||||
|
||||
// Try to convert response body to a ProxiedErrorPayload with message/type
|
||||
const service = req.key!.service;
|
||||
if (service === "aws") {
|
||||
// Try to standardize the error format for AWS
|
||||
errorPayload.error = { message: errorPayload.message, type: errorType };
|
||||
delete errorPayload.message;
|
||||
} else if (service === "gcp") {
|
||||
if (errorPayload.error?.code) {
|
||||
errorPayload.error = {
|
||||
message: errorPayload.error.message,
|
||||
type: errorPayload.error.status || errorPayload.error.code,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out what to do with the error
|
||||
// TODO: separate error handling for each service
|
||||
if (statusCode === 400) {
|
||||
switch (service) {
|
||||
case "openai":
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
case "azure":
|
||||
const filteredCodes = ["content_policy_violation", "content_filter"];
|
||||
@@ -243,16 +220,12 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
// same 429 billing error that other models return.
|
||||
await handleOpenAIRateLimitError(req, errorPayload);
|
||||
} else {
|
||||
errorPayload.proxy_note = `The upstream API rejected the request. Check the error message for details.`;
|
||||
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
|
||||
}
|
||||
break;
|
||||
case "anthropic":
|
||||
case "aws":
|
||||
case "gcp":
|
||||
await handleAnthropicAwsBadRequestError(req, errorPayload);
|
||||
break;
|
||||
case "google-ai":
|
||||
await handleGoogleAIBadRequestError(req, errorPayload);
|
||||
await handleAnthropicBadRequestError(req, errorPayload);
|
||||
break;
|
||||
default:
|
||||
assertNever(service);
|
||||
@@ -268,11 +241,13 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
errorType === "permission_error" &&
|
||||
errorPayload.error?.message?.toLowerCase().includes("multimodal")
|
||||
) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash },
|
||||
"This Anthropic key does not support multimodal prompts."
|
||||
);
|
||||
keyPool.update(req.key!, { allowsMultimodality: false });
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError(
|
||||
"Claude request re-enqueued because key does not support multimodality."
|
||||
);
|
||||
throw new RetryableError("Claude request re-enqueued because key does not support multimodality.");
|
||||
} else {
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||
@@ -300,12 +275,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
default:
|
||||
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
|
||||
}
|
||||
return;
|
||||
case "mistral-ai":
|
||||
case "gcp":
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
|
||||
return;
|
||||
}
|
||||
} else if (statusCode === 429) {
|
||||
switch (service) {
|
||||
@@ -318,9 +287,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
case "aws":
|
||||
await handleAwsRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "gcp":
|
||||
await handleGcpRateLimitError(req, errorPayload);
|
||||
break;
|
||||
case "azure":
|
||||
case "mistral-ai":
|
||||
await handleAzureRateLimitError(req, errorPayload);
|
||||
@@ -335,7 +301,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
// Most likely model not found
|
||||
switch (service) {
|
||||
case "openai":
|
||||
if (errorType === "model_not_found") {
|
||||
if (errorPayload.error?.code === "model_not_found") {
|
||||
const requestedModel = req.body.model;
|
||||
const modelFamily = getOpenAIModelFamily(requestedModel);
|
||||
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
|
||||
@@ -346,35 +312,28 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
}
|
||||
break;
|
||||
case "anthropic":
|
||||
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "google-ai":
|
||||
errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "mistral-ai":
|
||||
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
|
||||
break;
|
||||
case "aws":
|
||||
case "gcp":
|
||||
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
|
||||
break;
|
||||
case "azure":
|
||||
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model.`;
|
||||
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
|
||||
break;
|
||||
default:
|
||||
assertNever(service);
|
||||
}
|
||||
} else if (statusCode === 503) {
|
||||
switch (service) {
|
||||
case "aws":
|
||||
if (
|
||||
errorType === "ServiceUnavailableException" &&
|
||||
errorPayload.error?.message?.match(/too many connections/i)
|
||||
) {
|
||||
errorPayload.proxy_note = `The requested AWS Bedrock model is overloaded. Try again in a few minutes, or try another model.`;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
errorPayload.proxy_note = `Upstream service unavailable. Try again later.`;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
||||
}
|
||||
|
||||
// Redact the OpenAI org id from the error message
|
||||
// Some OAI errors contain the organization ID, which we don't want to reveal.
|
||||
if (errorPayload.error?.message) {
|
||||
errorPayload.error.message = errorPayload.error.message.replace(
|
||||
/org-.{24}/gm,
|
||||
@@ -382,14 +341,13 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||
);
|
||||
}
|
||||
|
||||
// Send the error to the client
|
||||
sendProxyError(req, res, statusCode, statusMessage, errorPayload);
|
||||
|
||||
// Re-throw the error to bubble up to onProxyRes's handler for logging
|
||||
// This is bubbled up to onProxyRes's handler for logging but will not trigger
|
||||
// a write to the response as `sendProxyError` has just done that.
|
||||
throw new HttpError(statusCode, errorPayload.error?.message);
|
||||
};
|
||||
|
||||
async function handleAnthropicAwsBadRequestError(
|
||||
async function handleAnthropicBadRequestError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
@@ -424,13 +382,11 @@ async function handleAnthropicAwsBadRequestError(
|
||||
return;
|
||||
}
|
||||
|
||||
const isDisabled =
|
||||
error?.message?.match(/organization has been disabled/i) ||
|
||||
error?.message?.match(/^operation not allowed/i);
|
||||
const isDisabled = error?.message?.match(/organization has been disabled/i);
|
||||
if (isDisabled) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, message: error?.message },
|
||||
"Anthropic/AWS key has been disabled."
|
||||
"Anthropic key has been disabled."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
|
||||
@@ -471,19 +427,6 @@ async function handleAwsRateLimitError(
|
||||
}
|
||||
}
|
||||
|
||||
async function handleGcpRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
if (errorPayload.error?.type === "RESOURCE_EXHAUSTED") {
|
||||
keyPool.markRateLimited(req.key!);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("GCP rate-limited request re-enqueued.");
|
||||
} else {
|
||||
errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from GCP.`;
|
||||
}
|
||||
}
|
||||
|
||||
async function handleOpenAIRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
@@ -518,8 +461,58 @@ async function handleOpenAIRateLimitError(
|
||||
// Per-minute request or token rate limit is exceeded, which we can retry
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
// WIP/nonfunctional
|
||||
// case "tokens_usage_based":
|
||||
// // Weird new rate limit type that seems limited to preview models.
|
||||
// // Distinct from `tokens` type. Can be per-minute or per-day.
|
||||
//
|
||||
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
|
||||
// // 10k tokens per minute is problematic, because this is much less than
|
||||
// // GPT4-Turbo's max context size for a single prompt and is effectively a
|
||||
// // cap on the max context size for just that key+model, which the app is
|
||||
// // not able to deal with.
|
||||
//
|
||||
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
|
||||
// // been used today, the max context for that key becomes 50k tokens until
|
||||
// // the next day and becomes progressively smaller as more tokens are used.
|
||||
//
|
||||
// // To work around these keys we will first retry the request a few times.
|
||||
// // After that we will reject the request, and if it's a per-day limit we
|
||||
// // will also disable the key.
|
||||
//
|
||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
|
||||
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
|
||||
//
|
||||
// const regex =
|
||||
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
|
||||
// const [, period, limit, used, requested] =
|
||||
// errorPayload.error?.message?.match(regex) || [];
|
||||
//
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, period, limit, used, requested },
|
||||
// "Received `tokens_usage_based` rate limit error from OpenAI."
|
||||
// );
|
||||
//
|
||||
// if (!period || !limit || !requested) {
|
||||
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// if (req.retryCount < 2) {
|
||||
// await reenqueueRequest(req);
|
||||
// throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
// }
|
||||
//
|
||||
// if (period === "min") {
|
||||
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
|
||||
// } else {
|
||||
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
|
||||
// }
|
||||
//
|
||||
// keyPool.markRateLimited(req.key!);
|
||||
// break;
|
||||
default:
|
||||
errorPayload.proxy_note = `This is likely a temporary error with the API. Try again in a few seconds.`;
|
||||
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
|
||||
break;
|
||||
}
|
||||
return errorPayload;
|
||||
@@ -541,98 +534,17 @@ async function handleAzureRateLimitError(
|
||||
}
|
||||
}
|
||||
|
||||
//{"error":{"code":400,"message":"API Key not found. Please pass a valid API key.","status":"INVALID_ARGUMENT","details":[{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]}}
|
||||
//{"error":{"code":400,"message":"Gemini API free tier is not available in your country. Please enable billing on your project in Google AI Studio.","status":"FAILED_PRECONDITION"}}
|
||||
async function handleGoogleAIBadRequestError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
const error = errorPayload.error || {};
|
||||
// google changes this shit every few months
|
||||
// i don't want to deal with it
|
||||
const keyDeadMsgs = [
|
||||
/please enable billing/i,
|
||||
/API key not valid/i,
|
||||
/API key expired/i,
|
||||
/pass a valid API/i,
|
||||
];
|
||||
const text = JSON.stringify(error);
|
||||
if (keyDeadMsgs.some((msg) => text.match(msg))) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, error: text },
|
||||
"Google API key appears to be inoperative."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
} else {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, error: text },
|
||||
"Unknown Google API error."
|
||||
);
|
||||
errorPayload.proxy_note = `Unrecognized error from Google AI.`;
|
||||
}
|
||||
|
||||
// const { message, status, details } = error;
|
||||
//
|
||||
// if (status === "INVALID_ARGUMENT") {
|
||||
// const reason = details?.[0]?.reason;
|
||||
// if (reason === "API_KEY_INVALID") {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, reason, msg: error.message },
|
||||
// "Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
|
||||
// );
|
||||
// keyPool.disable(req.key!, "revoked");
|
||||
// errorPayload.proxy_note = `Assigned API key is invalid.`;
|
||||
// }
|
||||
// } else if (status === "FAILED_PRECONDITION") {
|
||||
// if (message.match(/please enable billing/i)) {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, msg: error.message },
|
||||
// "Cannot use key due to billing restrictions."
|
||||
// );
|
||||
// keyPool.disable(req.key!, "revoked");
|
||||
// errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
// }
|
||||
// } else {
|
||||
// req.log.warn(
|
||||
// { key: req.key?.hash, status, msg: error.message },
|
||||
// "Received unexpected 400 error from Google AI."
|
||||
// );
|
||||
// }
|
||||
}
|
||||
|
||||
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
|
||||
//
|
||||
async function handleGoogleAIRateLimitError(
|
||||
req: Request,
|
||||
errorPayload: ProxiedErrorPayload
|
||||
) {
|
||||
const status = errorPayload.error?.status;
|
||||
const text = JSON.stringify(errorPayload.error);
|
||||
|
||||
// sometimes they block keys by rate limiting them to 0 requests per minute
|
||||
// for some indefinite period of time
|
||||
const keyDeadMsgs = [
|
||||
/GenerateContentRequestsPerMinutePerProjectPerRegion/i,
|
||||
/"quota_limit_value":"0"/i,
|
||||
];
|
||||
|
||||
switch (status) {
|
||||
case "RESOURCE_EXHAUSTED": {
|
||||
if (keyDeadMsgs.every((msg) => text.match(msg))) {
|
||||
req.log.warn(
|
||||
{ key: req.key?.hash, error: text },
|
||||
"Google API key appears to be temporarily inoperative and will be disabled."
|
||||
);
|
||||
keyPool.disable(req.key!, "revoked");
|
||||
errorPayload.proxy_note = `Assigned API key cannot be used.`;
|
||||
return;
|
||||
}
|
||||
|
||||
case "RESOURCE_EXHAUSTED":
|
||||
keyPool.markRateLimited(req.key!);
|
||||
await reenqueueRequest(req);
|
||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||
}
|
||||
default:
|
||||
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
|
||||
break;
|
||||
@@ -682,23 +594,15 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
|
||||
const completion = getCompletionFromBody(req, body);
|
||||
const tokens = await countTokens({ req, completion, service });
|
||||
|
||||
if (req.service === "openai" || req.service === "azure") {
|
||||
// O1 consumes (a significant amount of) invisible tokens for the chain-
|
||||
// of-thought reasoning. We have no way to count these other than to check
|
||||
// the response body.
|
||||
tokens.reasoning_tokens =
|
||||
body.usage?.completion_tokens_details?.reasoning_tokens;
|
||||
}
|
||||
|
||||
req.log.debug(
|
||||
{ service, prevOutputTokens: req.outputTokens, tokens },
|
||||
{ service, tokens, prevOutputTokens: req.outputTokens },
|
||||
`Counted tokens for completion`
|
||||
);
|
||||
if (req.tokenizerInfo) {
|
||||
req.tokenizerInfo.completion_tokens = tokens;
|
||||
}
|
||||
|
||||
req.outputTokens = tokens.token_count + (tokens.reasoning_tokens ?? 0);
|
||||
req.outputTokens = tokens.token_count;
|
||||
} catch (error) {
|
||||
req.log.warn(
|
||||
error,
|
||||
@@ -713,25 +617,22 @@ const trackKeyRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
|
||||
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
||||
};
|
||||
|
||||
const omittedHeaders = new Set<string>([
|
||||
// Omit content-encoding because we will always decode the response body
|
||||
"content-encoding",
|
||||
// Omit transfer-encoding because we are using response.json which will
|
||||
// set a content-length header, which is not valid for chunked responses.
|
||||
"transfer-encoding",
|
||||
// Don't set cookies from upstream APIs because proxied requests are stateless
|
||||
"set-cookie",
|
||||
"openai-organization",
|
||||
"x-request-id",
|
||||
"cf-ray",
|
||||
]);
|
||||
const copyHttpHeaders: ProxyResHandlerWithBody = async (
|
||||
proxyRes,
|
||||
_req,
|
||||
res
|
||||
) => {
|
||||
Object.keys(proxyRes.headers).forEach((key) => {
|
||||
if (omittedHeaders.has(key)) return;
|
||||
// Omit content-encoding because we will always decode the response body
|
||||
if (key === "content-encoding") {
|
||||
return;
|
||||
}
|
||||
// We're usually using res.json() to send the response, which causes express
|
||||
// to set content-length. That's not valid for chunked responses and some
|
||||
// clients will reject it so we need to omit it.
|
||||
if (key === "transfer-encoding") {
|
||||
return;
|
||||
}
|
||||
res.setHeader(key, proxyRes.headers[key] as string);
|
||||
});
|
||||
};
|
||||
@@ -775,6 +676,6 @@ function getAwsErrorType(header: string | string[] | undefined) {
|
||||
|
||||
function assertJsonResponse(body: any): asserts body is Record<string, any> {
|
||||
if (typeof body !== "object") {
|
||||
throw new Error(`Expected response to be an object, got ${typeof body}`);
|
||||
throw new Error("Expected response to be an object");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ import { ProxyResHandlerWithBody } from ".";
|
||||
import { assertNever } from "../../../shared/utils";
|
||||
import {
|
||||
AnthropicChatMessage,
|
||||
flattenAnthropicMessages,
|
||||
GoogleAIChatMessage,
|
||||
flattenAnthropicMessages, GoogleAIChatMessage,
|
||||
MistralAIChatMessage,
|
||||
OpenAIChatMessage,
|
||||
} from "../../../shared/api-schemas";
|
||||
@@ -75,16 +74,8 @@ const getPromptForRequest = (
|
||||
case "mistral-ai":
|
||||
return req.body.messages;
|
||||
case "anthropic-chat":
|
||||
let system = req.body.system;
|
||||
if (Array.isArray(system)) {
|
||||
system = system
|
||||
.map((m: { type: string; text: string }) => m.text)
|
||||
.join("\n");
|
||||
}
|
||||
return { system, messages: req.body.messages };
|
||||
return { system: req.body.system, messages: req.body.messages };
|
||||
case "openai-text":
|
||||
case "anthropic-text":
|
||||
case "mistral-text":
|
||||
return req.body.prompt;
|
||||
case "openai-image":
|
||||
return {
|
||||
@@ -94,6 +85,8 @@ const getPromptForRequest = (
|
||||
quality: req.body.quality,
|
||||
revisedPrompt: responseBody.data[0].revised_prompt,
|
||||
};
|
||||
case "anthropic-text":
|
||||
return req.body.prompt;
|
||||
case "google-ai":
|
||||
return { contents: req.body.contents };
|
||||
default:
|
||||
@@ -120,7 +113,9 @@ const flattenMessages = (
|
||||
if (isGoogleAIChatPrompt(val)) {
|
||||
return val.contents
|
||||
.map(({ parts, role }) => {
|
||||
const text = parts.map((p) => p.text).join("\n");
|
||||
const text = parts
|
||||
.map((p) => p.text)
|
||||
.join("\n");
|
||||
return `${role}: ${text}`;
|
||||
})
|
||||
.join("\n");
|
||||
@@ -148,7 +143,11 @@ const flattenMessages = (
|
||||
function isGoogleAIChatPrompt(
|
||||
val: unknown
|
||||
): val is { contents: GoogleAIChatMessage[] } {
|
||||
return typeof val === "object" && val !== null && "contents" in val;
|
||||
return (
|
||||
typeof val === "object" &&
|
||||
val !== null &&
|
||||
"contents" in val
|
||||
);
|
||||
}
|
||||
|
||||
function isAnthropicChatPrompt(
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||
|
||||
export type MistralChatCompletionResponse = {
|
||||
choices: {
|
||||
index: number;
|
||||
message: { role: string; content: string };
|
||||
finish_reason: string | null;
|
||||
}[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||
* finalized Mistral chat completion response so that non-streaming middleware
|
||||
* can operate on it as if it were a blocking response.
|
||||
*/
|
||||
export function mergeEventsForMistralChat(
|
||||
events: OpenAIChatCompletionStreamEvent[]
|
||||
): MistralChatCompletionResponse {
|
||||
let merged: MistralChatCompletionResponse = {
|
||||
choices: [
|
||||
{ index: 0, message: { role: "", content: "" }, finish_reason: "" },
|
||||
],
|
||||
};
|
||||
merged = events.reduce((acc, event, i) => {
|
||||
// The first event will only contain role assignment and response metadata
|
||||
if (i === 0) {
|
||||
acc.choices[0].message.role = event.choices[0].delta.role ?? "assistant";
|
||||
return acc;
|
||||
}
|
||||
|
||||
acc.choices[0].finish_reason = event.choices[0].finish_reason ?? "";
|
||||
if (event.choices[0].delta.content) {
|
||||
acc.choices[0].message.content += event.choices[0].delta.content;
|
||||
}
|
||||
|
||||
return acc;
|
||||
}, merged);
|
||||
return merged;
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||
|
||||
export type MistralTextCompletionResponse = {
|
||||
outputs: {
|
||||
text: string;
|
||||
stop_reason: string | null;
|
||||
}[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||
* finalized Mistral text completion response so that non-streaming middleware
|
||||
* can operate on it as if it were a blocking response.
|
||||
*/
|
||||
export function mergeEventsForMistralText(
|
||||
events: OpenAIChatCompletionStreamEvent[]
|
||||
): MistralTextCompletionResponse {
|
||||
let merged: MistralTextCompletionResponse = {
|
||||
outputs: [{ text: "", stop_reason: "" }],
|
||||
};
|
||||
merged = events.reduce((acc, event, i) => {
|
||||
// The first event will only contain role assignment and response metadata
|
||||
if (i === 0) {
|
||||
return acc;
|
||||
}
|
||||
|
||||
acc.outputs[0].text += event.choices[0].delta.content ?? "";
|
||||
acc.outputs[0].stop_reason = event.choices[0].finish_reason ?? "";
|
||||
|
||||
return acc;
|
||||
}, merged);
|
||||
return merged;
|
||||
}
|
||||
@@ -24,7 +24,7 @@ export function getAwsEventStreamDecoder(params: {
|
||||
if (eventType === "chunk") {
|
||||
result = input[eventType];
|
||||
} else {
|
||||
// AWS unmarshaller treats non-chunk events (errors and exceptions) oddly.
|
||||
// AWS unmarshaller treats non-chunk (errors and exceptions) oddly.
|
||||
result = { [eventType]: input[eventType] } as any;
|
||||
}
|
||||
return result;
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import express from "express";
|
||||
import { APIFormat } from "../../../../shared/key-management";
|
||||
import { assertNever } from "../../../../shared/utils";
|
||||
import {
|
||||
@@ -7,13 +6,8 @@ import {
|
||||
mergeEventsForAnthropicText,
|
||||
mergeEventsForOpenAIChat,
|
||||
mergeEventsForOpenAIText,
|
||||
mergeEventsForMistralChat,
|
||||
mergeEventsForMistralText,
|
||||
AnthropicV2StreamEvent,
|
||||
OpenAIChatCompletionStreamEvent,
|
||||
mistralAIToOpenAI,
|
||||
MistralAIStreamEvent,
|
||||
MistralChatCompletionEvent,
|
||||
} from "./index";
|
||||
|
||||
/**
|
||||
@@ -21,70 +15,45 @@ import {
|
||||
* compiles them into a single finalized response for downstream middleware.
|
||||
*/
|
||||
export class EventAggregator {
|
||||
private readonly model: string;
|
||||
private readonly requestFormat: APIFormat;
|
||||
private readonly responseFormat: APIFormat;
|
||||
private readonly format: APIFormat;
|
||||
private readonly events: OpenAIChatCompletionStreamEvent[];
|
||||
|
||||
constructor({ body, inboundApi, outboundApi }: express.Request) {
|
||||
constructor({ format }: { format: APIFormat }) {
|
||||
this.events = [];
|
||||
this.requestFormat = inboundApi;
|
||||
this.responseFormat = outboundApi;
|
||||
this.model = body.model;
|
||||
this.format = format;
|
||||
}
|
||||
|
||||
addEvent(
|
||||
event:
|
||||
| OpenAIChatCompletionStreamEvent
|
||||
| AnthropicV2StreamEvent
|
||||
| MistralAIStreamEvent
|
||||
) {
|
||||
addEvent(event: OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent) {
|
||||
if (eventIsOpenAIEvent(event)) {
|
||||
this.events.push(event);
|
||||
} else {
|
||||
// horrible special case. previously all transformers' target format was
|
||||
// openai, so the event aggregator could conveniently assume all incoming
|
||||
// events were in openai format.
|
||||
// now we have added some transformers that convert between non-openai
|
||||
// formats, so aggregator needs to know how to collapse for more than
|
||||
// just openai.
|
||||
// because writing aggregation logic for every possible output format is
|
||||
// annoying, we will just transform any non-openai output events to openai
|
||||
// format (even if the client did not request openai at all) so that we
|
||||
// still only need to write aggregators for openai SSEs.
|
||||
let openAIEvent: OpenAIChatCompletionStreamEvent | undefined;
|
||||
switch (this.requestFormat) {
|
||||
case "anthropic-text":
|
||||
assertIsAnthropicV2Event(event);
|
||||
openAIEvent = anthropicV2ToOpenAI({
|
||||
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
|
||||
lastPosition: -1,
|
||||
index: 0,
|
||||
fallbackId: event.log_id || "fallback-" + Date.now(),
|
||||
fallbackModel: event.model || this.model || "fallback-claude-3",
|
||||
})?.event;
|
||||
break;
|
||||
case "mistral-ai":
|
||||
assertIsMistralChatEvent(event);
|
||||
openAIEvent = mistralAIToOpenAI({
|
||||
data: `data: ${JSON.stringify(event)}\n\n`,
|
||||
lastPosition: -1,
|
||||
index: 0,
|
||||
fallbackId: "fallback-" + Date.now(),
|
||||
fallbackModel: this.model || "fallback-mistral",
|
||||
})?.event;
|
||||
break;
|
||||
}
|
||||
if (openAIEvent) {
|
||||
this.events.push(openAIEvent);
|
||||
// now we have added anthropic-chat-to-text, so aggregator needs to know
|
||||
// how to collapse events from two formats.
|
||||
// because that is annoying, we will simply transform anthropic events to
|
||||
// openai (even if the client didn't ask for openai) so we don't have to
|
||||
// write aggregation logic for anthropic chat (which is also a troublesome
|
||||
// stateful format).
|
||||
const openAIEvent = anthropicV2ToOpenAI({
|
||||
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
|
||||
lastPosition: -1,
|
||||
index: 0,
|
||||
fallbackId: event.log_id || "event-aggregator-fallback",
|
||||
fallbackModel: event.model || "claude-3-fallback",
|
||||
});
|
||||
if (openAIEvent.event) {
|
||||
this.events.push(openAIEvent.event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getFinalResponse() {
|
||||
switch (this.responseFormat) {
|
||||
switch (this.format) {
|
||||
case "openai":
|
||||
case "google-ai": // TODO: this is probably wrong now that we support native Google Makersuite prompts
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
return mergeEventsForOpenAIChat(this.events);
|
||||
case "openai-text":
|
||||
return mergeEventsForOpenAIText(this.events);
|
||||
@@ -92,16 +61,10 @@ export class EventAggregator {
|
||||
return mergeEventsForAnthropicText(this.events);
|
||||
case "anthropic-chat":
|
||||
return mergeEventsForAnthropicChat(this.events);
|
||||
case "mistral-ai":
|
||||
return mergeEventsForMistralChat(this.events);
|
||||
case "mistral-text":
|
||||
return mergeEventsForMistralText(this.events);
|
||||
case "openai-image":
|
||||
throw new Error(
|
||||
`SSE aggregation not supported for ${this.responseFormat}`
|
||||
);
|
||||
throw new Error(`SSE aggregation not supported for ${this.format}`);
|
||||
default:
|
||||
assertNever(this.responseFormat);
|
||||
assertNever(this.format);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,17 +78,3 @@ function eventIsOpenAIEvent(
|
||||
): event is OpenAIChatCompletionStreamEvent {
|
||||
return event?.object === "chat.completion.chunk";
|
||||
}
|
||||
|
||||
function assertIsAnthropicV2Event(event: any): asserts event is AnthropicV2StreamEvent {
|
||||
if (!event?.completion) {
|
||||
throw new Error(`Bad event for Anthropic V2 SSE aggregation`);
|
||||
}
|
||||
}
|
||||
|
||||
function assertIsMistralChatEvent(
|
||||
event: any
|
||||
): asserts event is MistralChatCompletionEvent {
|
||||
if (!event?.choices) {
|
||||
throw new Error(`Bad event for Mistral SSE aggregation`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,25 +7,6 @@ export type SSEResponseTransformArgs<S = Record<string, any>> = {
|
||||
state?: S;
|
||||
};
|
||||
|
||||
export type MistralChatCompletionEvent = {
|
||||
choices: {
|
||||
index: number;
|
||||
message: { role: string; content: string };
|
||||
stop_reason: string | null;
|
||||
}[];
|
||||
};
|
||||
export type MistralTextCompletionEvent = {
|
||||
outputs: { text: string; stop_reason: string | null }[];
|
||||
};
|
||||
export type MistralAIStreamEvent = {
|
||||
"amazon-bedrock-invocationMetrics"?: {
|
||||
inputTokenCount: number;
|
||||
outputTokenCount: number;
|
||||
invocationLatency: number;
|
||||
firstByteLatency: number;
|
||||
};
|
||||
} & (MistralChatCompletionEvent | MistralTextCompletionEvent);
|
||||
|
||||
export type AnthropicV2StreamEvent = {
|
||||
log_id?: string;
|
||||
model?: string;
|
||||
@@ -60,12 +41,8 @@ export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
|
||||
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
|
||||
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
|
||||
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
|
||||
export { mistralAIToOpenAI } from "./transformers/mistral-ai-to-openai";
|
||||
export { mistralTextToMistralChat } from "./transformers/mistral-text-to-mistral-chat";
|
||||
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
|
||||
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
|
||||
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
|
||||
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
|
||||
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
|
||||
export { mergeEventsForMistralChat } from "./aggregators/mistral-chat";
|
||||
export { mergeEventsForMistralText } from "./aggregators/mistral-text";
|
||||
|
||||
@@ -11,11 +11,8 @@ import {
|
||||
googleAIToOpenAI,
|
||||
OpenAIChatCompletionStreamEvent,
|
||||
openAITextToOpenAIChat,
|
||||
mistralAIToOpenAI,
|
||||
mistralTextToMistralChat,
|
||||
passthroughToOpenAI,
|
||||
StreamingCompletionTransformer,
|
||||
MistralChatCompletionEvent,
|
||||
} from "./index";
|
||||
|
||||
type SSEMessageTransformerOptions = TransformOptions & {
|
||||
@@ -38,9 +35,7 @@ export class SSEMessageTransformer extends Transform {
|
||||
private readonly inputFormat: APIFormat;
|
||||
private readonly transformFn: StreamingCompletionTransformer<
|
||||
// TODO: Refactor transformers to not assume only OpenAI events as output
|
||||
| OpenAIChatCompletionStreamEvent
|
||||
| AnthropicV2StreamEvent
|
||||
| MistralChatCompletionEvent
|
||||
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
|
||||
>;
|
||||
private readonly log;
|
||||
private readonly fallbackId: string;
|
||||
@@ -126,17 +121,16 @@ function eventIsOpenAIEvent(
|
||||
function getTransformer(
|
||||
responseApi: APIFormat,
|
||||
version?: string,
|
||||
// In most cases, we are transforming back to OpenAI. Some responses can be
|
||||
// translated between two non-OpenAI formats, eg Anthropic Chat -> Anthropic
|
||||
// Text, or Mistral Text -> Mistral Chat.
|
||||
// There's only one case where we're not transforming back to OpenAI, which is
|
||||
// Anthropic Chat response -> Anthropic Text request. This parameter is only
|
||||
// used for that case.
|
||||
requestApi: APIFormat = "openai"
|
||||
): StreamingCompletionTransformer<
|
||||
| OpenAIChatCompletionStreamEvent
|
||||
| AnthropicV2StreamEvent
|
||||
| MistralChatCompletionEvent
|
||||
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
|
||||
> {
|
||||
switch (responseApi) {
|
||||
case "openai":
|
||||
case "mistral-ai":
|
||||
return passthroughToOpenAI;
|
||||
case "openai-text":
|
||||
return openAITextToOpenAIChat;
|
||||
@@ -146,16 +140,10 @@ function getTransformer(
|
||||
: anthropicV2ToOpenAI;
|
||||
case "anthropic-chat":
|
||||
return requestApi === "anthropic-text"
|
||||
? anthropicChatToAnthropicV2 // User's legacy text prompt was converted to chat, and response must be converted back to text
|
||||
? anthropicChatToAnthropicV2
|
||||
: anthropicChatToOpenAI;
|
||||
case "google-ai":
|
||||
return googleAIToOpenAI;
|
||||
case "mistral-ai":
|
||||
return mistralAIToOpenAI;
|
||||
case "mistral-text":
|
||||
return requestApi === "mistral-ai"
|
||||
? mistralTextToMistralChat // User's chat request was converted to text, and response must be converted back to chat
|
||||
: mistralAIToOpenAI;
|
||||
case "openai-image":
|
||||
throw new Error(`SSE transformation not supported for ${responseApi}`);
|
||||
default:
|
||||
|
||||
@@ -2,6 +2,7 @@ import pino from "pino";
|
||||
import { Transform, TransformOptions } from "stream";
|
||||
import { Message } from "@smithy/eventstream-codec";
|
||||
import { APIFormat } from "../../../../shared/key-management";
|
||||
import { buildSpoofedSSE } from "../error-generator";
|
||||
import { BadRequestError, RetryableError } from "../../../../shared/errors";
|
||||
|
||||
type SSEStreamAdapterOptions = TransformOptions & {
|
||||
@@ -19,6 +20,7 @@ type SSEStreamAdapterOptions = TransformOptions & {
|
||||
*/
|
||||
export class SSEStreamAdapter extends Transform {
|
||||
private readonly isAwsStream;
|
||||
private readonly isGoogleStream;
|
||||
private api: APIFormat;
|
||||
private partialMessage = "";
|
||||
private textDecoder = new TextDecoder("utf8");
|
||||
@@ -28,6 +30,7 @@ export class SSEStreamAdapter extends Transform {
|
||||
super({ ...options, objectMode: true });
|
||||
this.isAwsStream =
|
||||
options?.contentType === "application/vnd.amazon.eventstream";
|
||||
this.isGoogleStream = options?.api === "google-ai";
|
||||
this.api = options.api;
|
||||
this.log = options.logger.child({ module: "sse-stream-adapter" });
|
||||
}
|
||||
@@ -52,10 +55,8 @@ export class SSEStreamAdapter extends Transform {
|
||||
|
||||
if ("completion" in eventObj) {
|
||||
return ["event: completion", `data: ${event}`].join(`\n`);
|
||||
} else if (eventObj.type) {
|
||||
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
|
||||
} else {
|
||||
return `data: ${event}`;
|
||||
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
|
||||
}
|
||||
}
|
||||
// noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
|
||||
@@ -107,12 +108,44 @@ export class SSEStreamAdapter extends Transform {
|
||||
}
|
||||
}
|
||||
|
||||
/** Processes an incoming array element from the Google AI JSON stream. */
|
||||
protected processGoogleObject(data: any): string | null {
|
||||
// Sometimes data has fields key and value, sometimes it's just the
|
||||
// candidates array.
|
||||
const candidates = data.value?.candidates ?? data.candidates ?? [{}];
|
||||
try {
|
||||
const hasParts = candidates[0].content?.parts?.length > 0;
|
||||
if (hasParts) {
|
||||
return `data: ${JSON.stringify(data.value ?? data)}\n`;
|
||||
} else {
|
||||
this.log.error({ event: data }, "Received bad Google AI event");
|
||||
return `data: ${buildSpoofedSSE({
|
||||
format: "google-ai",
|
||||
title: "Proxy stream error",
|
||||
message:
|
||||
"The proxy received malformed or unexpected data from Google AI while streaming.",
|
||||
obj: data,
|
||||
reqId: "proxy-sse-adapter-message",
|
||||
model: "",
|
||||
})}`;
|
||||
}
|
||||
} catch (error) {
|
||||
error.lastEvent = data;
|
||||
this.emit("error", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
|
||||
try {
|
||||
if (this.isAwsStream) {
|
||||
// `data` is a Message object
|
||||
const message = this.processAwsMessage(data);
|
||||
if (message) this.push(message + "\n\n");
|
||||
} else if (this.isGoogleStream) {
|
||||
// `data` is an element from the Google AI JSON stream
|
||||
const message = this.processGoogleObject(data);
|
||||
if (message) this.push(message + "\n\n");
|
||||
} else {
|
||||
// `data` is a string, but possibly only a partial message
|
||||
const fullMessages = (this.partialMessage + data).split(
|
||||
|
||||
@@ -34,7 +34,7 @@ export const anthropicChatToOpenAI: StreamingCompletionTransformer = (
|
||||
model: params.fallbackModel,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
index: params.index,
|
||||
delta: { content: deltaEvent.delta.text },
|
||||
finish_reason: null,
|
||||
},
|
||||
|
||||
@@ -9,7 +9,7 @@ const log = logger.child({
|
||||
|
||||
type GoogleAIStreamEvent = {
|
||||
candidates: {
|
||||
content?: { parts?: { text: string }[]; role: string };
|
||||
content: { parts: { text: string }[]; role: string };
|
||||
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
|
||||
index: number;
|
||||
tokenCount?: number;
|
||||
@@ -34,15 +34,9 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
const parts = completionEvent.candidates[0].content?.parts || [];
|
||||
const parts = completionEvent.candidates[0].content.parts;
|
||||
let content = parts[0]?.text ?? "";
|
||||
|
||||
if (isSafetyStop(completionEvent)) {
|
||||
content = `[Proxy Warning] Gemini safety filter triggered: ${JSON.stringify(
|
||||
completionEvent.candidates[0].safetyRatings
|
||||
)}`;
|
||||
}
|
||||
|
||||
// If this is the first chunk, try stripping speaker names from the response
|
||||
// e.g. "John: Hello" -> "Hello"
|
||||
if (index === 0) {
|
||||
@@ -66,14 +60,6 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||
return { position: -1, event: newEvent };
|
||||
};
|
||||
|
||||
function isSafetyStop(completion: GoogleAIStreamEvent) {
|
||||
const isSafetyStop = ["SAFETY", "OTHER"].includes(
|
||||
completion.candidates[0].finishReason ?? ""
|
||||
);
|
||||
const hasNoContent = completion.candidates[0].content?.parts?.length === 0;
|
||||
return isSafetyStop && hasNoContent;
|
||||
}
|
||||
|
||||
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
|
||||
try {
|
||||
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
import { logger } from "../../../../../logger";
|
||||
import { MistralAIStreamEvent, SSEResponseTransformArgs } from "../index";
|
||||
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||
|
||||
const log = logger.child({
|
||||
module: "sse-transformer",
|
||||
transformer: "mistral-ai-to-openai",
|
||||
});
|
||||
|
||||
export const mistralAIToOpenAI = (params: SSEResponseTransformArgs) => {
|
||||
const { data } = params;
|
||||
|
||||
const rawEvent = parseEvent(data);
|
||||
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
const completionEvent = asCompletion(rawEvent);
|
||||
if (!completionEvent) {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
if ("choices" in completionEvent) {
|
||||
const newChatEvent = {
|
||||
id: params.fallbackId,
|
||||
object: "chat.completion.chunk" as const,
|
||||
created: Date.now(),
|
||||
model: params.fallbackModel,
|
||||
choices: [
|
||||
{
|
||||
index: completionEvent.choices[0].index,
|
||||
delta: { content: completionEvent.choices[0].message.content },
|
||||
finish_reason: completionEvent.choices[0].stop_reason,
|
||||
},
|
||||
],
|
||||
};
|
||||
return { position: -1, event: newChatEvent };
|
||||
} else if ("outputs" in completionEvent) {
|
||||
const newTextEvent = {
|
||||
id: params.fallbackId,
|
||||
object: "chat.completion.chunk" as const,
|
||||
created: Date.now(),
|
||||
model: params.fallbackModel,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: { content: completionEvent.outputs[0].text },
|
||||
finish_reason: completionEvent.outputs[0].stop_reason,
|
||||
},
|
||||
],
|
||||
};
|
||||
return { position: -1, event: newTextEvent };
|
||||
}
|
||||
|
||||
// should never happen
|
||||
return { position: -1 };
|
||||
};
|
||||
|
||||
function asCompletion(event: ServerSentEvent): MistralAIStreamEvent | null {
|
||||
try {
|
||||
const parsed = JSON.parse(event.data);
|
||||
if (
|
||||
(Array.isArray(parsed.choices) &&
|
||||
parsed.choices[0].message !== undefined) ||
|
||||
(Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined)
|
||||
) {
|
||||
return parsed;
|
||||
} else {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error("Missing required fields");
|
||||
}
|
||||
} catch (error) {
|
||||
log.warn({ error: error.stack, event }, "Received invalid data event");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
import {
|
||||
MistralChatCompletionEvent,
|
||||
MistralTextCompletionEvent,
|
||||
StreamingCompletionTransformer,
|
||||
} from "../index";
|
||||
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||
import { logger } from "../../../../../logger";
|
||||
|
||||
const log = logger.child({
|
||||
module: "sse-transformer",
|
||||
transformer: "mistral-text-to-mistral-chat",
|
||||
});
|
||||
|
||||
/**
|
||||
* Transforms an incoming Mistral Text SSE to an equivalent Mistral Chat SSE.
|
||||
* This is generally used when a client sends a Mistral Chat prompt, but we
|
||||
* convert it to Mistral Text before sending it to the API to work around
|
||||
* some bugs in Mistral/AWS prompt templating. In these cases we need to convert
|
||||
* the response back to Mistral Chat.
|
||||
*/
|
||||
export const mistralTextToMistralChat: StreamingCompletionTransformer<
|
||||
MistralChatCompletionEvent
|
||||
> = (params) => {
|
||||
const { data } = params;
|
||||
|
||||
const rawEvent = parseEvent(data);
|
||||
if (!rawEvent.data) {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
const textCompletion = asTextCompletion(rawEvent);
|
||||
if (!textCompletion) {
|
||||
return { position: -1 };
|
||||
}
|
||||
|
||||
const chatEvent: MistralChatCompletionEvent = {
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: { role: "assistant", content: textCompletion.outputs[0].text },
|
||||
stop_reason: textCompletion.outputs[0].stop_reason,
|
||||
},
|
||||
],
|
||||
};
|
||||
return { position: -1, event: chatEvent };
|
||||
};
|
||||
|
||||
function asTextCompletion(
|
||||
event: ServerSentEvent
|
||||
): MistralTextCompletionEvent | null {
|
||||
try {
|
||||
const parsed = JSON.parse(event.data);
|
||||
if (Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined) {
|
||||
return parsed;
|
||||
} else {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error("Missing required fields");
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.warn({ error: error.stack, event }, "Received invalid data event");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
+43
-86
@@ -1,61 +1,48 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { BadRequestError } from "../shared/errors";
|
||||
import { RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
getMistralAIModelFamily,
|
||||
MistralAIModelFamily,
|
||||
ModelFamily,
|
||||
} from "../shared/models";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
|
||||
// Mistral can't settle on a single naming scheme and deprecates models within
|
||||
// months of releasing them so this list is hard to keep up to date. 2024-07-28
|
||||
// https://docs.mistral.ai/platform/endpoints
|
||||
export const KNOWN_MISTRAL_AI_MODELS = [
|
||||
/*
|
||||
Mistral Nemo
|
||||
"A 12B model built with the partnership with Nvidia. It is easy to use and a
|
||||
drop-in replacement in any system using Mistral 7B that it supersedes."
|
||||
*/
|
||||
"open-mistral-nemo",
|
||||
"open-mistral-nemo-2407",
|
||||
/*
|
||||
Mistral Large
|
||||
"Our flagship model with state-of-the-art reasoning, knowledge, and coding
|
||||
capabilities."
|
||||
*/
|
||||
"mistral-large-latest",
|
||||
"mistral-large-2407",
|
||||
"mistral-large-2402", // deprecated
|
||||
/*
|
||||
Codestral
|
||||
"A cutting-edge generative model that has been specifically designed and
|
||||
optimized for code generation tasks, including fill-in-the-middle and code
|
||||
completion."
|
||||
note: this uses a separate bidi completion endpoint that is not implemented
|
||||
*/
|
||||
"codestral-latest",
|
||||
"codestral-2405",
|
||||
/* So-called "Research Models" */
|
||||
// Mistral 7b (open weight, legacy)
|
||||
"open-mistral-7b",
|
||||
"mistral-tiny-2312",
|
||||
// Mixtral 8x7b (open weight, legacy)
|
||||
"open-mixtral-8x7b",
|
||||
"open-mistral-8x22b",
|
||||
"open-codestral-mamba",
|
||||
/* Deprecated production models */
|
||||
"mistral-small-2312",
|
||||
// Mixtral Small (newer 8x7b, closed weight)
|
||||
"mistral-small-latest",
|
||||
"mistral-small-2402",
|
||||
// Mistral Medium
|
||||
"mistral-medium-latest",
|
||||
"mistral-medium-2312",
|
||||
// Mistral Large
|
||||
"mistral-large-latest",
|
||||
"mistral-large-2402",
|
||||
// Deprecated identifiers (2024-05-01)
|
||||
"mistral-tiny",
|
||||
"mistral-tiny-2312",
|
||||
"mistral-small",
|
||||
"mistral-medium",
|
||||
];
|
||||
|
||||
let modelsCache: any = null;
|
||||
@@ -102,28 +89,23 @@ const mistralAIResponseHandler: ProxyResHandlerWithBody = async (
|
||||
throw new Error("Expected body to be an object");
|
||||
}
|
||||
|
||||
let newBody = body;
|
||||
if (req.inboundApi === "mistral-text" && req.outboundApi === "mistral-ai") {
|
||||
newBody = transformMistralTextToMistralChat(body);
|
||||
}
|
||||
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
res.status(200).json({ ...body, proxy: body.proxy });
|
||||
};
|
||||
|
||||
export function transformMistralTextToMistralChat(textBody: any) {
|
||||
return {
|
||||
...textBody,
|
||||
choices: [
|
||||
{ message: { content: textBody.outputs[0].text, role: "assistant" } },
|
||||
],
|
||||
outputs: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
const mistralAIProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.mistral.ai",
|
||||
mutations: [addKey, finalizeBody],
|
||||
blockingResponseHandler: mistralAIResponseHandler,
|
||||
const mistralAIProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.mistral.ai",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKey, finalizeBody],
|
||||
}),
|
||||
proxyRes: createOnProxyResHandler([mistralAIResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const mistralAIRouter = Router();
|
||||
@@ -132,37 +114,12 @@ mistralAIRouter.get("/v1/models", handleModelRequest);
|
||||
mistralAIRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{
|
||||
inApi: "mistral-ai",
|
||||
outApi: "mistral-ai",
|
||||
service: "mistral-ai",
|
||||
},
|
||||
{ beforeTransform: [detectMistralInputApi] }
|
||||
),
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "mistral-ai",
|
||||
outApi: "mistral-ai",
|
||||
service: "mistral-ai",
|
||||
}),
|
||||
mistralAIProxy
|
||||
);
|
||||
|
||||
/**
|
||||
* We can't determine if a request is Mistral text or chat just from the path
|
||||
* because they both use the same endpoint. We need to check the request body
|
||||
* for either `messages` or `prompt`.
|
||||
* @param req
|
||||
*/
|
||||
export function detectMistralInputApi(req: Request) {
|
||||
const { messages, prompt } = req.body;
|
||||
if (messages) {
|
||||
req.inboundApi = "mistral-ai";
|
||||
req.outboundApi = "mistral-ai";
|
||||
} else if (prompt && req.service === "mistral-ai") {
|
||||
// Mistral La Plateforme doesn't expose a text completions endpoint.
|
||||
throw new BadRequestError(
|
||||
"Mistral (via La Plateforme API) does not support text completions. This format is only supported on Mistral via the AWS API."
|
||||
);
|
||||
} else if (prompt && req.service === "aws") {
|
||||
req.inboundApi = "mistral-text";
|
||||
req.outboundApi = "mistral-text";
|
||||
}
|
||||
}
|
||||
|
||||
export const mistralAI = mistralAIRouter;
|
||||
|
||||
+29
-22
@@ -1,15 +1,22 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
||||
import { generateModelList } from "./openai";
|
||||
import { RequestHandler, Router, Request } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
createOnProxyReqHandler,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
import { generateModelList } from "./openai";
|
||||
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
|
||||
|
||||
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
|
||||
|
||||
@@ -19,9 +26,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
if (new Date().getTime() - modelListValid < 1000 * 60) {
|
||||
return res.status(200).json(modelListCache);
|
||||
}
|
||||
const result = generateModelList("openai").filter((m: { id: string }) =>
|
||||
KNOWN_MODELS.includes(m.id)
|
||||
);
|
||||
const result = generateModelList(KNOWN_MODELS);
|
||||
modelListCache = { object: "list", data: result };
|
||||
modelListValid = new Date().getTime();
|
||||
res.status(200).json(modelListCache);
|
||||
@@ -89,19 +94,21 @@ function transformResponseForChat(
|
||||
};
|
||||
}
|
||||
|
||||
function replacePath(manager: ProxyReqManager) {
|
||||
const req = manager.request;
|
||||
const pathname = req.url.split("?")[0];
|
||||
req.log.debug({ pathname }, "OpenAI image path filter");
|
||||
if (req.path.startsWith("/v1/chat/completions")) {
|
||||
manager.setPath("/v1/images/generations");
|
||||
}
|
||||
}
|
||||
|
||||
const openaiImagesProxy = createQueuedProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
mutations: [replacePath, addKey, finalizeBody],
|
||||
blockingResponseHandler: openaiImagesResponseHandler,
|
||||
const openaiImagesProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
pathRewrite: {
|
||||
"^/v1/chat/completions": "/v1/images/generations",
|
||||
},
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
||||
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const openaiImagesRouter = Router();
|
||||
|
||||
+113
-73
@@ -1,80 +1,113 @@
|
||||
import { Request, RequestHandler, Router } from "express";
|
||||
import { RequestHandler, Router } from "express";
|
||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||
import { config } from "../config";
|
||||
import { AzureOpenAIKey, keyPool, OpenAIKey } from "../shared/key-management";
|
||||
import { getOpenAIModelFamily } from "../shared/models";
|
||||
import { keyPool, OpenAIKey } from "../shared/key-management";
|
||||
import {
|
||||
getOpenAIModelFamily,
|
||||
ModelFamily,
|
||||
OpenAIModelFamily,
|
||||
} from "../shared/models";
|
||||
import { logger } from "../logger";
|
||||
import { createQueueMiddleware } from "./queue";
|
||||
import { ipLimiter } from "./rate-limit";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import {
|
||||
addKey,
|
||||
addKeyForEmbeddingsRequest,
|
||||
createEmbeddingsPreprocessorMiddleware,
|
||||
createOnProxyReqHandler,
|
||||
createPreprocessorMiddleware,
|
||||
finalizeBody,
|
||||
forceModel,
|
||||
RequestPreprocessor,
|
||||
} from "./middleware/request";
|
||||
import { ProxyResHandlerWithBody } from "./middleware/response";
|
||||
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
|
||||
import {
|
||||
createOnProxyResHandler,
|
||||
ProxyResHandlerWithBody,
|
||||
} from "./middleware/response";
|
||||
|
||||
// https://platform.openai.com/docs/models/overview
|
||||
export const KNOWN_OPENAI_MODELS = [
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-turbo", // alias for latest gpt4-turbo stable
|
||||
"gpt-4-turbo-2024-04-09", // gpt4-turbo stable, with vision
|
||||
"gpt-4-turbo-preview", // alias for latest turbo preview
|
||||
"gpt-4-0125-preview", // gpt4-turbo preview 2
|
||||
"gpt-4-1106-preview", // gpt4-turbo preview 1
|
||||
"gpt-4-vision-preview", // gpt4-turbo preview 1 with vision
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-0314", // EOL 2024-06-13
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314", // EOL 2024-06-13
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301", // EOL 2024-06-13
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
"text-embedding-ada-002",
|
||||
];
|
||||
|
||||
let modelsCache: any = null;
|
||||
let modelsCacheTime = 0;
|
||||
|
||||
export function generateModelList(service: "openai" | "azure") {
|
||||
const keys = keyPool
|
||||
.list()
|
||||
.filter((k) => k.service === service && !k.isDisabled) as
|
||||
| OpenAIKey[]
|
||||
| AzureOpenAIKey[];
|
||||
if (keys.length === 0) return [];
|
||||
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
|
||||
// Get available families and snapshots
|
||||
let availableFamilies = new Set<OpenAIModelFamily>();
|
||||
const availableSnapshots = new Set<string>();
|
||||
for (const key of keyPool.list()) {
|
||||
if (key.isDisabled || key.service !== "openai") continue;
|
||||
const asOpenAIKey = key as OpenAIKey;
|
||||
asOpenAIKey.modelFamilies.forEach((f) => availableFamilies.add(f));
|
||||
asOpenAIKey.modelSnapshots.forEach((s) => availableSnapshots.add(s));
|
||||
}
|
||||
|
||||
const allowedModelFamilies = new Set(config.allowedModelFamilies);
|
||||
const modelFamilies = new Set(
|
||||
keys
|
||||
.flatMap((k) => k.modelFamilies)
|
||||
.filter((f) => allowedModelFamilies.has(f))
|
||||
// Remove disabled families
|
||||
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||
availableFamilies = new Set(
|
||||
[...availableFamilies].filter((x) => allowed.has(x))
|
||||
);
|
||||
|
||||
const modelIds = new Set(
|
||||
keys
|
||||
.flatMap((k) => k.modelIds)
|
||||
.filter((id) => {
|
||||
const allowed = modelFamilies.has(getOpenAIModelFamily(id));
|
||||
const known = ["gpt", "o1", "dall-e", "chatgpt", "text-embedding"].some(
|
||||
(prefix) => id.startsWith(prefix)
|
||||
);
|
||||
const isFinetune = id.includes("ft");
|
||||
return allowed && known && !isFinetune;
|
||||
})
|
||||
);
|
||||
return models
|
||||
.map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: "openai",
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
}))
|
||||
.filter((model) => {
|
||||
// First check if the family is available
|
||||
const hasFamily = availableFamilies.has(getOpenAIModelFamily(model.id));
|
||||
if (!hasFamily) return false;
|
||||
|
||||
return Array.from(modelIds).map((id) => ({
|
||||
id,
|
||||
object: "model",
|
||||
created: new Date().getTime(),
|
||||
owned_by: service,
|
||||
permission: [
|
||||
{
|
||||
id: "modelperm-" + id,
|
||||
object: "model_permission",
|
||||
created: new Date().getTime(),
|
||||
organization: "*",
|
||||
group: null,
|
||||
is_blocking: false,
|
||||
},
|
||||
],
|
||||
root: id,
|
||||
parent: null,
|
||||
}));
|
||||
// Then for snapshots, ensure the specific snapshot is available
|
||||
const isSnapshot = model.id.match(/-\d{4}(-preview)?$/);
|
||||
if (!isSnapshot) return true;
|
||||
return availableSnapshots.has(model.id);
|
||||
});
|
||||
}
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||
return res.status(200).json(modelsCache);
|
||||
}
|
||||
|
||||
if (!config.openaiKey) return { object: "list", data: [] };
|
||||
|
||||
const result = generateModelList("openai");
|
||||
|
||||
const result = generateModelList();
|
||||
modelsCache = { object: "list", data: result };
|
||||
modelsCacheTime = new Date().getTime();
|
||||
res.status(200).json(modelsCache);
|
||||
@@ -118,6 +151,7 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||
res.status(200).json({ ...newBody, proxy: body.proxy });
|
||||
};
|
||||
|
||||
/** Only used for non-streaming responses. */
|
||||
function transformTurboInstructResponse(
|
||||
turboInstructBody: Record<string, any>
|
||||
): Record<string, any> {
|
||||
@@ -135,15 +169,31 @@ function transformTurboInstructResponse(
|
||||
return transformed;
|
||||
}
|
||||
|
||||
const openaiProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKey, finalizeBody],
|
||||
target: "https://api.openai.com",
|
||||
blockingResponseHandler: openaiResponseHandler,
|
||||
const openaiProxy = createQueueMiddleware({
|
||||
proxyMiddleware: createProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: true,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
||||
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
|
||||
error: handleProxyError,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const openaiEmbeddingsProxy = createQueuedProxyMiddleware({
|
||||
mutations: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||
const openaiEmbeddingsProxy = createProxyMiddleware({
|
||||
target: "https://api.openai.com",
|
||||
changeOrigin: true,
|
||||
selfHandleResponse: false,
|
||||
logger,
|
||||
on: {
|
||||
proxyReq: createOnProxyReqHandler({
|
||||
pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||
}),
|
||||
error: handleProxyError,
|
||||
},
|
||||
});
|
||||
|
||||
const openaiRouter = Router();
|
||||
@@ -176,10 +226,11 @@ openaiRouter.post(
|
||||
openaiRouter.post(
|
||||
"/v1/chat/completions",
|
||||
ipLimiter,
|
||||
createPreprocessorMiddleware(
|
||||
{ inApi: "openai", outApi: "openai", service: "openai" },
|
||||
{ afterTransform: [fixupMaxTokens] }
|
||||
),
|
||||
createPreprocessorMiddleware({
|
||||
inApi: "openai",
|
||||
outApi: "openai",
|
||||
service: "openai",
|
||||
}),
|
||||
openaiProxy
|
||||
);
|
||||
// Embeddings endpoint.
|
||||
@@ -190,15 +241,4 @@ openaiRouter.post(
|
||||
openaiEmbeddingsProxy
|
||||
);
|
||||
|
||||
function forceModel(model: string): RequestPreprocessor {
|
||||
return (req: Request) => void (req.body.model = model);
|
||||
}
|
||||
|
||||
function fixupMaxTokens(req: Request) {
|
||||
if (!req.body.max_completion_tokens) {
|
||||
req.body.max_completion_tokens = req.body.max_tokens;
|
||||
}
|
||||
delete req.body.max_tokens;
|
||||
}
|
||||
|
||||
export const openai = openaiRouter;
|
||||
|
||||
+66
-54
@@ -13,7 +13,6 @@
|
||||
|
||||
import crypto from "crypto";
|
||||
import { Handler, Request } from "express";
|
||||
import { config } from "../config";
|
||||
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
|
||||
import { keyPool } from "../shared/key-management";
|
||||
import {
|
||||
@@ -23,25 +22,24 @@ import {
|
||||
} from "../shared/models";
|
||||
import { initializeSseStream } from "../shared/streaming";
|
||||
import { logger } from "../logger";
|
||||
import { getUniqueIps } from "./rate-limit";
|
||||
import { ProxyReqMutator, RequestPreprocessor } from "./middleware/request";
|
||||
import { getUniqueIps, SHARED_IP_ADDRESSES } from "./rate-limit";
|
||||
import { RequestPreprocessor } from "./middleware/request";
|
||||
import { handleProxyError } from "./middleware/common";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
|
||||
import { classifyErrorAndSend } from "./middleware/common";
|
||||
|
||||
const queue: Request[] = [];
|
||||
const log = logger.child({ module: "request-queue" });
|
||||
|
||||
/** Maximum number of queue slots for Agnai.chat requests. */
|
||||
const AGNAI_CONCURRENCY_LIMIT = 5;
|
||||
/** Maximum number of queue slots for individual users. */
|
||||
const USER_CONCURRENCY_LIMIT = parseInt(
|
||||
process.env.USER_CONCURRENCY_LIMIT ?? "1"
|
||||
);
|
||||
const USER_CONCURRENCY_LIMIT = 1;
|
||||
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
|
||||
const MAX_HEARTBEAT_SIZE =
|
||||
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
|
||||
const HEARTBEAT_INTERVAL =
|
||||
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
|
||||
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "150");
|
||||
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
|
||||
const PAYLOAD_SCALE_FACTOR = parseFloat(
|
||||
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
|
||||
);
|
||||
@@ -60,28 +58,39 @@ const QUEUE_JOIN_TIMEOUT = 5000;
|
||||
function getIdentifier(req: Request) {
|
||||
if (req.user) return req.user.token;
|
||||
if (req.risuToken) return req.risuToken;
|
||||
// if (isFromSharedIp(req)) return "shared-ip";
|
||||
if (isFromSharedIp(req)) return "shared-ip";
|
||||
return req.ip;
|
||||
}
|
||||
|
||||
const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
|
||||
getIdentifier(queued) === getIdentifier(incoming);
|
||||
|
||||
const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
|
||||
|
||||
async function enqueue(req: Request) {
|
||||
if (req.socket.destroyed || req.res?.writableEnded) {
|
||||
// In rare cases, a request can be disconnected after it is dequeued for a
|
||||
// retry, but before it is re-enqueued. In this case we may miss the abort
|
||||
// and the request will loop in the queue forever.
|
||||
req.log.warn("Attempt to enqueue aborted request.");
|
||||
throw new Error("Attempt to enqueue aborted request.");
|
||||
}
|
||||
|
||||
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
||||
let isGuest = req.user?.token === undefined;
|
||||
|
||||
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT) {
|
||||
throw new TooManyRequestsError(
|
||||
"Your IP or user token already has another request in the queue."
|
||||
);
|
||||
// Requests from shared IP addresses such as Agnai.chat are exempt from IP-
|
||||
// based rate limiting but can only occupy a certain number of slots in the
|
||||
// queue. Authenticated users always get a single spot in the queue.
|
||||
const isSharedIp = isFromSharedIp(req);
|
||||
const maxConcurrentQueuedRequests =
|
||||
isGuest && isSharedIp ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT;
|
||||
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
|
||||
if (isSharedIp) {
|
||||
// Re-enqueued requests are not counted towards the limit since they
|
||||
// already made it through the queue once.
|
||||
if (req.retryCount === 0) {
|
||||
throw new TooManyRequestsError(
|
||||
"Too many agnai.chat requests are already queued"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new TooManyRequestsError(
|
||||
"Your IP or user token already has another request in the queue."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// shitty hack to remove hpm's event listeners on retried requests
|
||||
@@ -137,7 +146,19 @@ export async function reenqueueRequest(req: Request) {
|
||||
}
|
||||
|
||||
function getQueueForPartition(partition: ModelFamily): Request[] {
|
||||
return queue.filter((req) => getModelFamilyForRequest(req) === partition);
|
||||
return queue
|
||||
.filter((req) => getModelFamilyForRequest(req) === partition)
|
||||
.sort((a, b) => {
|
||||
// Certain requests are exempted from IP-based rate limiting because they
|
||||
// come from a shared IP address. To prevent these requests from starving
|
||||
// out other requests during periods of high traffic, we sort them to the
|
||||
// end of the queue.
|
||||
const aIsExempted = isFromSharedIp(a);
|
||||
const bIsExempted = isFromSharedIp(b);
|
||||
if (aIsExempted && !bIsExempted) return 1;
|
||||
if (!aIsExempted && bIsExempted) return -1;
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
|
||||
export function dequeue(partition: ModelFamily): Request | undefined {
|
||||
@@ -148,14 +169,7 @@ export function dequeue(partition: ModelFamily): Request | undefined {
|
||||
}
|
||||
|
||||
const req = modelQueue.reduce((prev, curr) =>
|
||||
prev.startTime +
|
||||
config.tokensPunishmentFactor *
|
||||
((prev.promptTokens ?? 0) + (prev.outputTokens ?? 0)) <
|
||||
curr.startTime +
|
||||
config.tokensPunishmentFactor *
|
||||
((curr.promptTokens ?? 0) + (curr.outputTokens ?? 0))
|
||||
? prev
|
||||
: curr
|
||||
prev.startTime < curr.startTime ? prev : curr
|
||||
);
|
||||
queue.splice(queue.indexOf(req), 1);
|
||||
|
||||
@@ -247,6 +261,7 @@ let waitTimes: {
|
||||
partition: ModelFamily;
|
||||
start: number;
|
||||
end: number;
|
||||
isDeprioritized: boolean;
|
||||
}[] = [];
|
||||
|
||||
/** Adds a successful request to the list of wait times. */
|
||||
@@ -255,6 +270,7 @@ export function trackWaitTime(req: Request) {
|
||||
partition: getModelFamilyForRequest(req),
|
||||
start: req.startTime!,
|
||||
end: req.queueOutTime ?? Date.now(),
|
||||
isDeprioritized: isFromSharedIp(req),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -280,7 +296,8 @@ function calculateWaitTime(partition: ModelFamily) {
|
||||
.filter((wait) => {
|
||||
const isSamePartition = wait.partition === partition;
|
||||
const isRecent = now - wait.end < 300 * 1000;
|
||||
return isSamePartition && isRecent;
|
||||
const isNormalPriority = !wait.isDeprioritized;
|
||||
return isSamePartition && isRecent && isNormalPriority;
|
||||
})
|
||||
.map((wait) => wait.end - wait.start);
|
||||
const recentAverage = recentWaits.length
|
||||
@@ -294,7 +311,11 @@ function calculateWaitTime(partition: ModelFamily) {
|
||||
);
|
||||
|
||||
const currentWaits = queue
|
||||
.filter((req) => getModelFamilyForRequest(req) === partition)
|
||||
.filter((req) => {
|
||||
const isSamePartition = getModelFamilyForRequest(req) === partition;
|
||||
const isNormalPriority = !isFromSharedIp(req);
|
||||
return isSamePartition && isNormalPriority;
|
||||
})
|
||||
.map((req) => now - req.startTime!);
|
||||
const longestCurrentWait = Math.max(...currentWaits, 0);
|
||||
|
||||
@@ -322,35 +343,26 @@ export function getQueueLength(partition: ModelFamily | "all" = "all") {
|
||||
}
|
||||
|
||||
export function createQueueMiddleware({
|
||||
mutations = [],
|
||||
beforeProxy,
|
||||
proxyMiddleware,
|
||||
}: {
|
||||
mutations?: ProxyReqMutator[];
|
||||
beforeProxy?: RequestPreprocessor;
|
||||
proxyMiddleware: Handler;
|
||||
}): Handler {
|
||||
return async (req, res, next) => {
|
||||
req.proceed = async () => {
|
||||
// canonicalize the stream field which is set in a few places not always
|
||||
// consistently
|
||||
req.isStreaming = req.isStreaming || String(req.body.stream) === "true";
|
||||
req.body.stream = req.isStreaming;
|
||||
|
||||
try {
|
||||
// Just before executing the proxyMiddleware, we will create a
|
||||
// ProxyReqManager to track modifications to the request. This allows
|
||||
// us to revert those changes if the proxied request fails with a
|
||||
// retryable error. That happens in proxyMiddleware's onProxyRes
|
||||
// handler.
|
||||
const changeManager = new ProxyReqManager(req);
|
||||
req.changeManager = changeManager;
|
||||
for (const mutator of mutations) {
|
||||
await mutator(changeManager);
|
||||
if (beforeProxy) {
|
||||
try {
|
||||
// Hack to let us run asynchronous middleware before the
|
||||
// http-proxy-middleware handler. This is used to sign AWS requests
|
||||
// before they are proxied, as the signing is asynchronous.
|
||||
// Unlike RequestPreprocessors, this runs every time the request is
|
||||
// dequeued, not just the first time.
|
||||
await beforeProxy(req);
|
||||
} catch (err) {
|
||||
return handleProxyError(err, req, res);
|
||||
}
|
||||
} catch (err) {
|
||||
// Failure during request preparation is a fatal error.
|
||||
return classifyErrorAndSend(err, req, res);
|
||||
}
|
||||
|
||||
proxyMiddleware(req, res, next);
|
||||
};
|
||||
|
||||
|
||||
+32
-15
@@ -1,6 +1,14 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { config } from "../config";
|
||||
|
||||
export const SHARED_IP_ADDRESSES = new Set([
|
||||
// Agnai.chat
|
||||
"157.230.249.32", // old
|
||||
"157.245.148.56",
|
||||
"174.138.29.50",
|
||||
"209.97.162.44",
|
||||
]);
|
||||
|
||||
const ONE_MINUTE_MS = 60 * 1000;
|
||||
|
||||
type Timestamp = number;
|
||||
@@ -12,10 +20,7 @@ const exemptedRequests: Timestamp[] = [];
|
||||
const isRecentAttempt = (now: Timestamp) => (attempt: Timestamp) =>
|
||||
attempt > now - ONE_MINUTE_MS;
|
||||
|
||||
/**
|
||||
* Returns duration in seconds to wait before retrying for Retry-After header.
|
||||
*/
|
||||
const getRetryAfter = (ip: string, type: "text" | "image") => {
|
||||
const getTryAgainInMs = (ip: string, type: "text" | "image") => {
|
||||
const now = Date.now();
|
||||
const attempts = lastAttempts.get(ip) || [];
|
||||
const validAttempts = attempts.filter(isRecentAttempt(now));
|
||||
@@ -24,7 +29,7 @@ const getRetryAfter = (ip: string, type: "text" | "image") => {
|
||||
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
|
||||
|
||||
if (validAttempts.length >= limit) {
|
||||
return (validAttempts[0] - now + ONE_MINUTE_MS) / 1000;
|
||||
return validAttempts[0] - now + ONE_MINUTE_MS;
|
||||
} else {
|
||||
lastAttempts.set(ip, [...validAttempts, now]);
|
||||
return 0;
|
||||
@@ -91,11 +96,22 @@ export const ipLimiter = async (
|
||||
if (!textLimit && !imageLimit) return next();
|
||||
if (req.user?.type === "special") return next();
|
||||
|
||||
const path = req.baseUrl + req.path;
|
||||
const type =
|
||||
path.includes("openai-image") || path.includes("images/generations")
|
||||
? "image"
|
||||
: "text";
|
||||
// Exempts Agnai.chat from IP-based rate limiting because its IPs are shared
|
||||
// by many users. Instead, the request queue will limit the number of such
|
||||
// requests that may wait in the queue at a time, and sorts them to the end to
|
||||
// let individual users go first.
|
||||
if (SHARED_IP_ADDRESSES.has(req.ip)) {
|
||||
exemptedRequests.push(Date.now());
|
||||
req.log.info(
|
||||
{ ip: req.ip, recentExemptions: exemptedRequests.length },
|
||||
"Exempting Agnai request from rate limiting."
|
||||
);
|
||||
return next();
|
||||
}
|
||||
|
||||
const type = (req.baseUrl + req.path).includes("openai-image")
|
||||
? "image"
|
||||
: "text";
|
||||
const limit = type === "image" ? imageLimit : textLimit;
|
||||
|
||||
// If user is authenticated, key rate limiting by their token. Otherwise, key
|
||||
@@ -107,14 +123,15 @@ export const ipLimiter = async (
|
||||
res.set("X-RateLimit-Remaining", remaining.toString());
|
||||
res.set("X-RateLimit-Reset", reset.toString());
|
||||
|
||||
const retryAfterTime = getRetryAfter(rateLimitKey, type);
|
||||
if (retryAfterTime > 0) {
|
||||
const waitSec = Math.ceil(retryAfterTime).toString();
|
||||
res.set("Retry-After", waitSec);
|
||||
const tryAgainInMs = getTryAgainInMs(rateLimitKey, type);
|
||||
if (tryAgainInMs > 0) {
|
||||
res.set("Retry-After", tryAgainInMs.toString());
|
||||
res.status(429).json({
|
||||
error: {
|
||||
type: "proxy_rate_limited",
|
||||
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${waitSec} seconds.`,
|
||||
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${Math.ceil(
|
||||
tryAgainInMs / 1000
|
||||
)} seconds.`,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
|
||||
+19
-25
@@ -1,55 +1,42 @@
|
||||
import express from "express";
|
||||
import { addV1 } from "./add-v1";
|
||||
import { anthropic } from "./anthropic";
|
||||
import { aws } from "./aws";
|
||||
import { azure } from "./azure";
|
||||
import { checkRisuToken } from "./check-risu-token";
|
||||
import express, { Request, Response, NextFunction } from "express";
|
||||
import { gatekeeper } from "./gatekeeper";
|
||||
import { gcp } from "./gcp";
|
||||
import { googleAI } from "./google-ai";
|
||||
import { mistralAI } from "./mistral-ai";
|
||||
import { checkRisuToken } from "./check-risu-token";
|
||||
import { openai } from "./openai";
|
||||
import { openaiImage } from "./openai-image";
|
||||
import { anthropic } from "./anthropic";
|
||||
import { googleAI } from "./google-ai";
|
||||
import { mistralAI } from "./mistral-ai";
|
||||
import { aws } from "./aws";
|
||||
import { azure } from "./azure";
|
||||
import { sendErrorToClient } from "./middleware/response/error-generator";
|
||||
|
||||
const proxyRouter = express.Router();
|
||||
|
||||
// Remove `expect: 100-continue` header from requests due to incompatibility
|
||||
// with node-http-proxy.
|
||||
proxyRouter.use((req, _res, next) => {
|
||||
if (req.headers.expect) {
|
||||
// node-http-proxy does not like it when clients send `expect: 100-continue`
|
||||
// and will stall. none of the upstream APIs use this header anyway.
|
||||
delete req.headers.expect;
|
||||
}
|
||||
next();
|
||||
});
|
||||
|
||||
// Apply body parsers.
|
||||
proxyRouter.use(
|
||||
express.json({ limit: "100mb" }),
|
||||
express.urlencoded({ extended: true, limit: "100mb" })
|
||||
);
|
||||
|
||||
// Apply auth/rate limits.
|
||||
proxyRouter.use(gatekeeper);
|
||||
proxyRouter.use(checkRisuToken);
|
||||
|
||||
// Initialize request queue metadata.
|
||||
proxyRouter.use((req, _res, next) => {
|
||||
req.startTime = Date.now();
|
||||
req.retryCount = 0;
|
||||
next();
|
||||
});
|
||||
|
||||
// Proxy endpoints.
|
||||
proxyRouter.use("/openai", addV1, openai);
|
||||
proxyRouter.use("/openai-image", addV1, openaiImage);
|
||||
proxyRouter.use("/anthropic", addV1, anthropic);
|
||||
proxyRouter.use("/google-ai", addV1, googleAI);
|
||||
proxyRouter.use("/mistral-ai", addV1, mistralAI);
|
||||
proxyRouter.use("/aws", aws);
|
||||
proxyRouter.use("/gcp/claude", addV1, gcp);
|
||||
proxyRouter.use("/aws/claude", addV1, aws);
|
||||
proxyRouter.use("/azure/openai", addV1, azure);
|
||||
|
||||
// Redirect browser requests to the homepage.
|
||||
proxyRouter.get("*", (req, res, next) => {
|
||||
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
|
||||
@@ -59,8 +46,7 @@ proxyRouter.get("*", (req, res, next) => {
|
||||
next();
|
||||
}
|
||||
});
|
||||
|
||||
// Send a fake client error if user specifies an invalid proxy endpoint.
|
||||
// Handle 404s.
|
||||
proxyRouter.use((req, res) => {
|
||||
sendErrorToClient({
|
||||
req,
|
||||
@@ -81,3 +67,11 @@ proxyRouter.use((req, res) => {
|
||||
});
|
||||
|
||||
export { proxyRouter as proxyRouter };
|
||||
|
||||
function addV1(req: Request, res: Response, next: NextFunction) {
|
||||
// Clients don't consistently use the /v1 prefix so we'll add it for them.
|
||||
if (!req.path.startsWith("/v1/")) {
|
||||
req.url = `/v1${req.url}`;
|
||||
}
|
||||
next();
|
||||
}
|
||||
|
||||
+1
-18
@@ -23,7 +23,6 @@ import { init as initTokenizers } from "./shared/tokenization";
|
||||
import { checkOrigin } from "./proxy/check-origin";
|
||||
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
|
||||
import { initializeDatabase, getDatabase } from "./shared/database";
|
||||
import { initializeFirebase } from "./shared/firebase";
|
||||
|
||||
const PORT = config.port;
|
||||
const BIND_ADDRESS = config.bindAddress;
|
||||
@@ -50,7 +49,6 @@ app.use(
|
||||
// Don't log the prompt text on transform errors
|
||||
"body.messages",
|
||||
"body.prompt",
|
||||
"body.contents",
|
||||
],
|
||||
censor: "********",
|
||||
},
|
||||
@@ -89,15 +87,6 @@ app.use(blacklist);
|
||||
app.use(checkOrigin);
|
||||
|
||||
app.use("/admin", adminRouter);
|
||||
app.use((req, _, next) => {
|
||||
// For whatever reason SillyTavern just ignores the path a user provides
|
||||
// when using Google AI with reverse proxy. We'll fix it here.
|
||||
if (req.path.startsWith("/v1beta/models/")) {
|
||||
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
|
||||
return next();
|
||||
}
|
||||
next();
|
||||
});
|
||||
app.use(config.proxyEndpointRoute, proxyRouter);
|
||||
app.use("/user", userRouter);
|
||||
if (config.staticServiceInfo) {
|
||||
@@ -138,12 +127,6 @@ async function start() {
|
||||
logger.info("Checking configs and external dependencies...");
|
||||
await assertConfigIsValid();
|
||||
|
||||
if (config.gatekeeperStore.startsWith("firebase")) {
|
||||
logger.info("Testing Firebase connection...");
|
||||
await initializeFirebase();
|
||||
logger.info("Firebase connection successful.");
|
||||
}
|
||||
|
||||
keyPool.init();
|
||||
|
||||
await initTokenizers();
|
||||
@@ -173,7 +156,7 @@ async function start() {
|
||||
app.listen(PORT, BIND_ADDRESS, () => {
|
||||
logger.info(
|
||||
{ port: PORT, interface: BIND_ADDRESS },
|
||||
"Server ready to accept connections."
|
||||
"Now listening for connections."
|
||||
);
|
||||
registerUncaughtExceptionHandler();
|
||||
});
|
||||
|
||||
+127
-118
@@ -2,7 +2,8 @@ import { config, listConfig } from "./config";
|
||||
import {
|
||||
AnthropicKey,
|
||||
AwsBedrockKey,
|
||||
GcpKey,
|
||||
AzureOpenAIKey,
|
||||
GoogleAIKey,
|
||||
keyPool,
|
||||
OpenAIKey,
|
||||
} from "./shared/key-management";
|
||||
@@ -10,7 +11,6 @@ import {
|
||||
AnthropicModelFamily,
|
||||
assertIsKnownModelFamily,
|
||||
AwsBedrockModelFamily,
|
||||
GcpModelFamily,
|
||||
AzureOpenAIModelFamily,
|
||||
GoogleAIModelFamily,
|
||||
LLM_SERVICES,
|
||||
@@ -24,16 +24,22 @@ import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
|
||||
import { getUniqueIps } from "./proxy/rate-limit";
|
||||
import { assertNever } from "./shared/utils";
|
||||
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
|
||||
import { MistralAIKey } from "./shared/key-management/mistral-ai/provider";
|
||||
|
||||
const CACHE_TTL = 2000;
|
||||
|
||||
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
|
||||
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
|
||||
k.service === "openai";
|
||||
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
|
||||
k.service === "azure";
|
||||
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
|
||||
k.service === "anthropic";
|
||||
const keyIsGoogleAIKey = (k: KeyPoolKey): k is GoogleAIKey =>
|
||||
k.service === "google-ai";
|
||||
const keyIsMistralAIKey = (k: KeyPoolKey): k is MistralAIKey =>
|
||||
k.service === "mistral-ai";
|
||||
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
|
||||
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
|
||||
|
||||
/** Stats aggregated across all keys for a given service. */
|
||||
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
|
||||
@@ -45,15 +51,10 @@ type ModelAggregates = {
|
||||
overQuota?: number;
|
||||
pozzed?: number;
|
||||
awsLogged?: number;
|
||||
// needed to disambugiate aws-claude family's variants
|
||||
awsClaude2?: number;
|
||||
awsSonnet3?: number;
|
||||
awsSonnet3_5?: number;
|
||||
awsHaiku: number;
|
||||
gcpSonnet?: number;
|
||||
gcpSonnet35?: number;
|
||||
gcpHaiku?: number;
|
||||
awsSonnet?: number;
|
||||
awsHaiku?: number;
|
||||
queued: number;
|
||||
queueTime: string;
|
||||
tokens: number;
|
||||
};
|
||||
/** All possible combinations of model family and aggregate type. */
|
||||
@@ -85,10 +86,8 @@ type AnthropicInfo = BaseFamilyInfo & {
|
||||
};
|
||||
type AwsInfo = BaseFamilyInfo & {
|
||||
privacy?: string;
|
||||
enabledVariants?: string;
|
||||
};
|
||||
type GcpInfo = BaseFamilyInfo & {
|
||||
enabledVariants?: string;
|
||||
sonnetKeys?: number;
|
||||
haikuKeys?: number;
|
||||
};
|
||||
|
||||
// prettier-ignore
|
||||
@@ -96,11 +95,12 @@ export type ServiceInfo = {
|
||||
uptime: number;
|
||||
endpoints: {
|
||||
openai?: string;
|
||||
openai2?: string;
|
||||
anthropic?: string;
|
||||
"anthropic-claude-3"?: string;
|
||||
"google-ai"?: string;
|
||||
"mistral-ai"?: string;
|
||||
"aws"?: string;
|
||||
gcp?: string;
|
||||
aws?: string;
|
||||
azure?: string;
|
||||
"openai-image"?: string;
|
||||
"azure-image"?: string;
|
||||
@@ -114,7 +114,6 @@ export type ServiceInfo = {
|
||||
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
|
||||
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
|
||||
& { [f in AwsBedrockModelFamily]?: AwsInfo }
|
||||
& { [f in GcpModelFamily]?: GcpInfo }
|
||||
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
|
||||
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
|
||||
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
|
||||
@@ -137,6 +136,7 @@ export type ServiceInfo = {
|
||||
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
||||
openai: {
|
||||
openai: `%BASE%/openai`,
|
||||
openai2: `%BASE%/openai/turbo-instruct`,
|
||||
"openai-image": `%BASE%/openai-image`,
|
||||
},
|
||||
anthropic: {
|
||||
@@ -149,11 +149,7 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
||||
"mistral-ai": `%BASE%/mistral-ai`,
|
||||
},
|
||||
aws: {
|
||||
"aws-claude": `%BASE%/aws/claude`,
|
||||
"aws-mistral": `%BASE%/aws/mistral`,
|
||||
},
|
||||
gcp: {
|
||||
gcp: `%BASE%/gcp/claude`,
|
||||
aws: `%BASE%/aws/claude`,
|
||||
},
|
||||
azure: {
|
||||
azure: `%BASE%/azure/openai`,
|
||||
@@ -161,7 +157,7 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
|
||||
},
|
||||
};
|
||||
|
||||
const familyStats = new Map<ModelAggregateKey, number>();
|
||||
const modelStats = new Map<ModelAggregateKey, number>();
|
||||
const serviceStats = new Map<keyof AllStats, number>();
|
||||
|
||||
let cachedInfo: ServiceInfo | undefined;
|
||||
@@ -178,7 +174,7 @@ export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
|
||||
.concat("turbo")
|
||||
);
|
||||
|
||||
familyStats.clear();
|
||||
modelStats.clear();
|
||||
serviceStats.clear();
|
||||
keys.forEach(addKeyToAggregates);
|
||||
|
||||
@@ -297,102 +293,131 @@ function increment<T extends keyof AllStats | ModelAggregateKey>(
|
||||
) {
|
||||
map.set(key, (map.get(key) || 0) + delta);
|
||||
}
|
||||
const addToService = increment.bind(null, serviceStats);
|
||||
const addToFamily = increment.bind(null, familyStats);
|
||||
|
||||
function addKeyToAggregates(k: KeyPoolKey) {
|
||||
addToService("proompts", k.promptCount);
|
||||
addToService("openai__keys", k.service === "openai" ? 1 : 0);
|
||||
addToService("anthropic__keys", k.service === "anthropic" ? 1 : 0);
|
||||
addToService("google-ai__keys", k.service === "google-ai" ? 1 : 0);
|
||||
addToService("mistral-ai__keys", k.service === "mistral-ai" ? 1 : 0);
|
||||
addToService("aws__keys", k.service === "aws" ? 1 : 0);
|
||||
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
|
||||
addToService("azure__keys", k.service === "azure" ? 1 : 0);
|
||||
increment(serviceStats, "proompts", k.promptCount);
|
||||
increment(serviceStats, "openai__keys", k.service === "openai" ? 1 : 0);
|
||||
increment(serviceStats, "anthropic__keys", k.service === "anthropic" ? 1 : 0);
|
||||
increment(serviceStats, "google-ai__keys", k.service === "google-ai" ? 1 : 0);
|
||||
increment(
|
||||
serviceStats,
|
||||
"mistral-ai__keys",
|
||||
k.service === "mistral-ai" ? 1 : 0
|
||||
);
|
||||
increment(serviceStats, "aws__keys", k.service === "aws" ? 1 : 0);
|
||||
increment(serviceStats, "azure__keys", k.service === "azure" ? 1 : 0);
|
||||
|
||||
let sumTokens = 0;
|
||||
let sumCost = 0;
|
||||
|
||||
const incrementGenericFamilyStats = (f: ModelFamily) => {
|
||||
const tokens = (k as any)[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
addToFamily(`${f}__tokens`, tokens);
|
||||
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
|
||||
};
|
||||
|
||||
switch (k.service) {
|
||||
case "openai":
|
||||
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
|
||||
addToService("openai__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
addToFamily(`${f}__trial`, k.isTrial ? 1 : 0);
|
||||
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
});
|
||||
break;
|
||||
case "anthropic":
|
||||
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
|
||||
addToService("anthropic__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
|
||||
k.modelFamilies.forEach((f) => {
|
||||
incrementGenericFamilyStats(f);
|
||||
addToFamily(`${f}__trial`, k.tier === "free" ? 1 : 0);
|
||||
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
addToFamily(`${f}__pozzed`, k.isPozzed ? 1 : 0);
|
||||
});
|
||||
break;
|
||||
increment(
|
||||
serviceStats,
|
||||
"openai__uncheckedKeys",
|
||||
Boolean(k.lastChecked) ? 0 : 1
|
||||
);
|
||||
|
||||
k.modelFamilies.forEach((f) => {
|
||||
const tokens = k[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
increment(modelStats, `${f}__tokens`, tokens);
|
||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
|
||||
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
});
|
||||
break;
|
||||
case "azure":
|
||||
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
const tokens = k[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
increment(modelStats, `${f}__tokens`, tokens);
|
||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
});
|
||||
break;
|
||||
case "anthropic": {
|
||||
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
const tokens = k[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
increment(modelStats, `${f}__tokens`, tokens);
|
||||
increment(modelStats, `${f}__trial`, k.tier === "free" ? 1 : 0);
|
||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||
increment(modelStats, `${f}__pozzed`, k.isPozzed ? 1 : 0);
|
||||
});
|
||||
increment(
|
||||
serviceStats,
|
||||
"anthropic__uncheckedKeys",
|
||||
Boolean(k.lastChecked) ? 0 : 1
|
||||
);
|
||||
break;
|
||||
}
|
||||
case "google-ai": {
|
||||
if (!keyIsGoogleAIKey(k)) throw new Error("Invalid key type");
|
||||
const family = "gemini-pro";
|
||||
sumTokens += k["gemini-proTokens"];
|
||||
sumCost += getTokenCostUsd(family, k["gemini-proTokens"]);
|
||||
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
|
||||
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
|
||||
increment(modelStats, `${family}__tokens`, k["gemini-proTokens"]);
|
||||
break;
|
||||
}
|
||||
case "mistral-ai": {
|
||||
if (!keyIsMistralAIKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach((f) => {
|
||||
const tokens = k[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
increment(modelStats, `${f}__tokens`, tokens);
|
||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case "aws": {
|
||||
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
if (!k.isDisabled) {
|
||||
// Don't add revoked keys to available AWS variants
|
||||
k.modelIds.forEach((id) => {
|
||||
if (id.includes("claude-3-sonnet")) {
|
||||
addToFamily(`aws-claude__awsSonnet3`, 1);
|
||||
} else if (id.includes("claude-3-5-sonnet")) {
|
||||
addToFamily(`aws-claude__awsSonnet3_5`, 1);
|
||||
} else if (id.includes("claude-3-haiku")) {
|
||||
addToFamily(`aws-claude__awsHaiku`, 1);
|
||||
} else if (id.includes("claude-v2")) {
|
||||
addToFamily(`aws-claude__awsClaude2`, 1);
|
||||
}
|
||||
});
|
||||
}
|
||||
k.modelFamilies.forEach((f) => {
|
||||
const tokens = k[`${f}Tokens`];
|
||||
sumTokens += tokens;
|
||||
sumCost += getTokenCostUsd(f, tokens);
|
||||
increment(modelStats, `${f}__tokens`, tokens);
|
||||
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||
});
|
||||
increment(modelStats, `aws-claude__awsSonnet`, k.sonnetEnabled ? 1 : 0);
|
||||
increment(modelStats, `aws-claude__awsHaiku`, k.haikuEnabled ? 1 : 0);
|
||||
|
||||
// Ignore revoked keys for aws logging stats, but include keys where the
|
||||
// logging status is unknown.
|
||||
const countAsLogged =
|
||||
k.lastChecked && !k.isDisabled && k.awsLoggingStatus === "enabled";
|
||||
addToFamily(`aws-claude__awsLogged`, countAsLogged ? 1 : 0);
|
||||
increment(modelStats, `aws-claude__awsLogged`, countAsLogged ? 1 : 0);
|
||||
break;
|
||||
}
|
||||
case "gcp":
|
||||
if (!keyIsGcpKey(k)) throw new Error("Invalid key type");
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
// TODO: add modelIds to GcpKey
|
||||
break;
|
||||
// These services don't have any additional stats to track.
|
||||
case "azure":
|
||||
case "google-ai":
|
||||
case "mistral-ai":
|
||||
k.modelFamilies.forEach(incrementGenericFamilyStats);
|
||||
break;
|
||||
default:
|
||||
assertNever(k.service);
|
||||
}
|
||||
|
||||
addToService("tokens", sumTokens);
|
||||
addToService("tokenCost", sumCost);
|
||||
increment(serviceStats, "tokens", sumTokens);
|
||||
increment(serviceStats, "tokenCost", sumCost);
|
||||
}
|
||||
|
||||
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
const tokens = familyStats.get(`${family}__tokens`) || 0;
|
||||
const tokens = modelStats.get(`${family}__tokens`) || 0;
|
||||
const cost = getTokenCostUsd(family, tokens);
|
||||
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
|
||||
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo = {
|
||||
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
|
||||
activeKeys: familyStats.get(`${family}__active`) || 0,
|
||||
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
|
||||
activeKeys: modelStats.get(`${family}__active`) || 0,
|
||||
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
|
||||
};
|
||||
|
||||
// Add service-specific stats to the info object.
|
||||
@@ -400,8 +425,8 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
const service = MODEL_FAMILY_SERVICE[family];
|
||||
switch (service) {
|
||||
case "openai":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
|
||||
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
|
||||
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
|
||||
|
||||
// Delete trial/revoked keys for non-turbo families.
|
||||
// Trials are turbo 99% of the time, and if a key is invalid we don't
|
||||
@@ -412,25 +437,15 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
}
|
||||
break;
|
||||
case "anthropic":
|
||||
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
|
||||
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
|
||||
info.prefilledKeys = familyStats.get(`${family}__pozzed`) || 0;
|
||||
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
|
||||
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
|
||||
info.prefilledKeys = modelStats.get(`${family}__pozzed`) || 0;
|
||||
break;
|
||||
case "aws":
|
||||
if (family === "aws-claude") {
|
||||
const logged = familyStats.get(`${family}__awsLogged`) || 0;
|
||||
const variants = new Set<string>();
|
||||
if (familyStats.get(`${family}__awsClaude2`) || 0)
|
||||
variants.add("claude2");
|
||||
if (familyStats.get(`${family}__awsSonnet3`) || 0)
|
||||
variants.add("sonnet3");
|
||||
if (familyStats.get(`${family}__awsSonnet3_5`) || 0)
|
||||
variants.add("sonnet3.5");
|
||||
if (familyStats.get(`${family}__awsHaiku`) || 0)
|
||||
variants.add("haiku");
|
||||
info.enabledVariants = variants.size
|
||||
? `${Array.from(variants).join(",")}`
|
||||
: undefined;
|
||||
info.sonnetKeys = modelStats.get(`${family}__awsSonnet`) || 0;
|
||||
info.haikuKeys = modelStats.get(`${family}__awsHaiku`) || 0;
|
||||
const logged = modelStats.get(`${family}__awsLogged`) || 0;
|
||||
if (logged > 0) {
|
||||
info.privacy = config.allowAwsLogging
|
||||
? `AWS logging verification inactive. Prompts could be logged.`
|
||||
@@ -438,12 +453,6 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "gcp":
|
||||
if (family === "gcp-claude") {
|
||||
// TODO: implement
|
||||
info.enabledVariants = "not implemented";
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,12 +63,7 @@ export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
|
||||
.number()
|
||||
.int()
|
||||
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
|
||||
system: z
|
||||
.union([
|
||||
z.string(),
|
||||
z.array(z.object({ type: z.literal("text"), text: z.string() })),
|
||||
])
|
||||
.optional(),
|
||||
system: z.string().optional(),
|
||||
})
|
||||
);
|
||||
export type AnthropicChatMessage = z.infer<
|
||||
@@ -82,7 +77,7 @@ function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
|
||||
let role: string = m.role;
|
||||
if (role === "assistant") {
|
||||
role = "Assistant";
|
||||
} else if (["system", "developer"].includes(role)) {
|
||||
} else if (role === "system") {
|
||||
role = "System";
|
||||
} else if (role === "user") {
|
||||
role = "Human";
|
||||
@@ -110,6 +105,8 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const { messages: newMessages, system } =
|
||||
openAIMessagesToClaudeChatPrompt(messages);
|
||||
@@ -144,6 +141,8 @@ export const transformOpenAIToAnthropicText: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const prompt = openAIMessagesToClaudeTextPrompt(messages);
|
||||
|
||||
@@ -188,6 +187,8 @@ export const transformAnthropicTextToAnthropicChat: APIFormatTransformer<
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
req.headers["anthropic-version"] = "2023-06-01";
|
||||
|
||||
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
|
||||
validateAnthropicTextPrompt(prompt);
|
||||
|
||||
@@ -365,7 +366,7 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
||||
// Here we will lose the original name if it was a system message, but that
|
||||
// is generally okay because the system message is usually a prompt and not
|
||||
// a character in the chat.
|
||||
const name = ["system", "developer"].includes(msg.role) ? "System" : msg.name?.trim();
|
||||
const name = msg.role === "system" ? "System" : msg.name?.trim();
|
||||
const content = convertOpenAIContent(msg.content);
|
||||
|
||||
// Prepend the display name to the first text content in the current message
|
||||
@@ -395,8 +396,8 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
|
||||
|
||||
function isSystemOpenAIRole(
|
||||
role: OpenAIChatMessage["role"]
|
||||
): role is "developer" | "system" | "function" | "tool" {
|
||||
return ["developer","system", "function", "tool"].includes(role);
|
||||
): role is "system" | "function" | "tool" {
|
||||
return ["system", "function", "tool"].includes(role);
|
||||
}
|
||||
|
||||
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
|
||||
|
||||
@@ -0,0 +1,181 @@
|
||||
import { z } from "zod";
|
||||
import {
|
||||
OPENAI_OUTPUT_MAX,
|
||||
OpenAIV1ChatCompletionSchema,
|
||||
flattenOpenAIMessageContent,
|
||||
} from "./openai";
|
||||
import { APIFormatTransformer } from ".";
|
||||
|
||||
// https://docs.cohere.com/reference/chat
|
||||
export const CohereV1ChatSchema = z
|
||||
.object({
|
||||
message: z.string(),
|
||||
model: z.string().default("command-r-plus"),
|
||||
stream: z.boolean().default(false).optional(),
|
||||
preamble: z.string().optional(),
|
||||
chat_history: z
|
||||
.array(
|
||||
// Either a message from a chat participant, or a past tool call
|
||||
z.union([
|
||||
z.object({
|
||||
role: z.enum(["CHATBOT", "SYSTEM", "USER"]),
|
||||
message: z.string(),
|
||||
tool_calls: z
|
||||
.array(z.object({ name: z.string(), parameters: z.any() }))
|
||||
.optional(),
|
||||
}),
|
||||
z.object({
|
||||
role: z.enum(["TOOL"]),
|
||||
tool_results: z.array(
|
||||
z.object({
|
||||
call: z.object({ name: z.string(), parameters: z.any() }),
|
||||
outputs: z.array(z.any()),
|
||||
})
|
||||
),
|
||||
}),
|
||||
])
|
||||
)
|
||||
.optional(),
|
||||
// Don't allow conversation_id as it causes calls to be stateful and we don't
|
||||
// offer guarantees about which key a user's request will be routed to.
|
||||
conversation_id: z.literal(undefined).optional(),
|
||||
prompt_truncation: z
|
||||
.enum(["AUTO", "AUTO_PRESERVE_ORDER", "OFF"])
|
||||
.optional(),
|
||||
/*
|
||||
Supporting RAG is complex because documents can be arbitrary size and have
|
||||
to have embeddings generated, which incurs a cost that is not trivial to
|
||||
estimate. We don't support it for now.
|
||||
connectors: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
user_access_token: z.string().optional(),
|
||||
continue_on_failure: z.boolean().default(false).optional(),
|
||||
options: z.any().optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
search_queries_only: z.boolean().default(false).optional(),
|
||||
documents: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string().optional(),
|
||||
title: z.string().optional(),
|
||||
text: z.string(),
|
||||
_excludes: z.array(z.string()).optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
citation_quality: z.enum(["accurate", "fast"]).optional(),
|
||||
*/
|
||||
temperature: z.number().default(0.3).optional(),
|
||||
max_tokens: z
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.default(Math.min(OPENAI_OUTPUT_MAX, 4096))
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
max_input_tokens: z.number().int().optional(),
|
||||
k: z.number().int().min(0).max(500).default(0).optional(),
|
||||
p: z.number().min(0.01).max(0.99).default(0.75).optional(),
|
||||
seed: z.number().int().optional(),
|
||||
stop_sequences: z.array(z.string()).max(5).optional(),
|
||||
frequency_penalty: z.number().min(0).max(1).default(0).optional(),
|
||||
presence_penalty: z.number().min(0).max(1).default(0).optional(),
|
||||
tools: z
|
||||
.array(
|
||||
z.object({
|
||||
name: z.string(),
|
||||
description: z.string(),
|
||||
parameter_definitions: z.record(
|
||||
z.object({
|
||||
description: z.string().optional(),
|
||||
type: z.string(),
|
||||
required: z.boolean().optional().default(false),
|
||||
})
|
||||
),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
tool_results: z
|
||||
.array(
|
||||
z.object({
|
||||
call: z.object({
|
||||
name: z.string(),
|
||||
parameters: z.record(z.any()),
|
||||
}),
|
||||
outputs: z.array(z.record(z.any())),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
// We always force single step to avoid stateful calls or expensive multi-step
|
||||
// generations when tools are involved.
|
||||
force_single_step: z.literal(true).default(true).optional(),
|
||||
})
|
||||
.strip();
|
||||
export type CohereChatMessage = NonNullable<
|
||||
z.infer<typeof CohereV1ChatSchema>["chat_history"]
|
||||
>[number];
|
||||
|
||||
export function flattenCohereMessageContent(
|
||||
message: CohereChatMessage
|
||||
): string {
|
||||
return message.role === "TOOL"
|
||||
? message.tool_results.map((r) => r.outputs[0].text).join("\n")
|
||||
: message.message;
|
||||
}
|
||||
|
||||
export const transformOpenAIToCohere: APIFormatTransformer<
|
||||
typeof CohereV1ChatSchema
|
||||
> = async (req) => {
|
||||
const { body } = req;
|
||||
const result = OpenAIV1ChatCompletionSchema.safeParse({
|
||||
...body,
|
||||
model: "gpt-3.5-turbo",
|
||||
});
|
||||
if (!result.success) {
|
||||
req.log.warn(
|
||||
{ issues: result.error.issues, body },
|
||||
"Invalid OpenAI-to-Cohere request"
|
||||
);
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
// Final OAI message becomes the `message` field in Cohere
|
||||
const message = messages[messages.length - 1];
|
||||
// If the first message has system role, use it as preamble.
|
||||
const hasSystemPreamble = messages[0]?.role === "system";
|
||||
const preamble = hasSystemPreamble
|
||||
? flattenOpenAIMessageContent(messages[0].content)
|
||||
: undefined;
|
||||
|
||||
const chatHistory = messages.slice(0, -1).map((m) => {
|
||||
const role: Exclude<CohereChatMessage["role"], "TOOL"> =
|
||||
m.role === "assistant"
|
||||
? "CHATBOT"
|
||||
: m.role === "system"
|
||||
? "SYSTEM"
|
||||
: "USER";
|
||||
const content = flattenOpenAIMessageContent(m.content);
|
||||
const message = m.name ? `${m.name}: ${content}` : content;
|
||||
return { role, message };
|
||||
});
|
||||
|
||||
return {
|
||||
model: rest.model,
|
||||
preamble,
|
||||
chat_history: chatHistory,
|
||||
message: flattenOpenAIMessageContent(message.content),
|
||||
stop_sequences:
|
||||
typeof rest.stop === "string" ? [rest.stop] : rest.stop ?? undefined,
|
||||
max_tokens: rest.max_tokens,
|
||||
temperature: rest.temperature,
|
||||
p: rest.top_p,
|
||||
frequency_penalty: rest.frequency_penalty,
|
||||
presence_penalty: rest.presence_penalty,
|
||||
seed: rest.seed,
|
||||
stream: rest.stream,
|
||||
};
|
||||
};
|
||||
@@ -5,69 +5,32 @@ import {
|
||||
} from "./openai";
|
||||
import { APIFormatTransformer } from "./index";
|
||||
|
||||
const GoogleAIV1ContentSchema = z.object({
|
||||
parts: z
|
||||
.union([
|
||||
z.array(z.object({ text: z.string() })),
|
||||
z.object({ text: z.string() }),
|
||||
])
|
||||
// Google allows parts to be an array or a single object, which is really
|
||||
// annoying for downstream code. We will coerce it to an array here.
|
||||
.transform((val) => (Array.isArray(val) ? val : [val])),
|
||||
// TODO: add other media types
|
||||
role: z.enum(["user", "model"]).optional(),
|
||||
});
|
||||
|
||||
const SafetySettingsSchema = z
|
||||
.array(
|
||||
z.object({
|
||||
category: z.enum([
|
||||
"HARM_CATEGORY_HARASSMENT",
|
||||
"HARM_CATEGORY_HATE_SPEECH",
|
||||
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||
"HARM_CATEGORY_CIVIC_INTEGRITY",
|
||||
]),
|
||||
threshold: z.enum([
|
||||
"OFF",
|
||||
"BLOCK_NONE",
|
||||
"BLOCK_ONLY_HIGH",
|
||||
"BLOCK_MEDIUM_AND_ABOVE",
|
||||
"BLOCK_LOW_AND_ABOVE",
|
||||
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
||||
]),
|
||||
})
|
||||
)
|
||||
.optional();
|
||||
|
||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
|
||||
export const GoogleAIV1GenerateContentSchema = z
|
||||
.object({
|
||||
model: z.string().max(100), //actually specified in path but we need it for the router
|
||||
stream: z.boolean().optional().default(false), // also used for router
|
||||
contents: z.array(GoogleAIV1ContentSchema),
|
||||
tools: z.array(z.object({})).max(0).optional(),
|
||||
safetySettings: SafetySettingsSchema,
|
||||
systemInstruction: GoogleAIV1ContentSchema.optional(),
|
||||
// quick fix for SillyTavern, which uses camel case field names for everything
|
||||
// except for system_instruction where it randomly uses snake case.
|
||||
// google api evidently accepts either case.
|
||||
system_instruction: GoogleAIV1ContentSchema.optional(),
|
||||
generationConfig: z
|
||||
.object({
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
maxOutputTokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional()
|
||||
.default(16)
|
||||
.transform((v) => Math.min(v, 4096)), // TODO: Add config
|
||||
candidateCount: z.literal(1).optional(),
|
||||
topP: z.number().min(0).max(1).optional(),
|
||||
topK: z.number().min(1).max(40).optional(),
|
||||
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
||||
contents: z.array(
|
||||
z.object({
|
||||
parts: z.array(z.object({ text: z.string() })),
|
||||
role: z.enum(["user", "model"]),
|
||||
})
|
||||
.default({}),
|
||||
),
|
||||
tools: z.array(z.object({})).max(0).optional(),
|
||||
safetySettings: z.array(z.object({})).max(0).optional(),
|
||||
generationConfig: z.object({
|
||||
temperature: z.number().optional(),
|
||||
maxOutputTokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional()
|
||||
.default(16)
|
||||
.transform((v) => Math.min(v, 1024)), // TODO: Add config
|
||||
candidateCount: z.literal(1).optional(),
|
||||
topP: z.number().optional(),
|
||||
topK: z.number().optional(),
|
||||
stopSequences: z.array(z.string().max(500)).max(5).optional(),
|
||||
}),
|
||||
})
|
||||
.strip();
|
||||
export type GoogleAIChatMessage = z.infer<
|
||||
@@ -140,7 +103,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
stops = [...new Set(stops)].slice(0, 5);
|
||||
|
||||
return {
|
||||
model: req.body.model,
|
||||
model: "gemini-pro",
|
||||
stream: rest.stream,
|
||||
contents,
|
||||
tools: [],
|
||||
@@ -156,7 +119,6 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
|
||||
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
|
||||
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "BLOCK_NONE" },
|
||||
],
|
||||
};
|
||||
};
|
||||
|
||||
@@ -21,11 +21,8 @@ import {
|
||||
GoogleAIV1GenerateContentSchema,
|
||||
transformOpenAIToGoogleAI,
|
||||
} from "./google-ai";
|
||||
import {
|
||||
MistralAIV1ChatCompletionsSchema,
|
||||
MistralAIV1TextCompletionsSchema,
|
||||
transformMistralChatToText,
|
||||
} from "./mistral-ai";
|
||||
import { MistralAIV1ChatCompletionsSchema } from "./mistral-ai";
|
||||
import { CohereV1ChatSchema, transformOpenAIToCohere } from "./cohere";
|
||||
|
||||
export { OpenAIChatMessage } from "./openai";
|
||||
export {
|
||||
@@ -37,15 +34,29 @@ export {
|
||||
export { GoogleAIChatMessage } from "./google-ai";
|
||||
export { MistralAIChatMessage } from "./mistral-ai";
|
||||
|
||||
/** Represents a pair of API formats that can be transformed between. */
|
||||
type APIPair = `${APIFormat}->${APIFormat}`;
|
||||
/** Represents a map of API format pairs to transformer functions. */
|
||||
type TransformerMap = {
|
||||
[key in APIPair]?: APIFormatTransformer<any>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Represents a transformer function that takes a Request and returns a Promise
|
||||
* resolving to a value of the specified Zod schema type.
|
||||
*
|
||||
* @template Z The Zod schema type to transform the request into (from api-schemas).
|
||||
* @param req The incoming Request to transform.
|
||||
* @returns A Promise resolving to the transformed request body.
|
||||
*/
|
||||
export type APIFormatTransformer<Z extends z.ZodType<any, any>> = (
|
||||
req: Request
|
||||
) => Promise<z.infer<Z>>;
|
||||
|
||||
/**
|
||||
* Specifies possible translations between API formats and the corresponding
|
||||
* transformer functions to apply them.
|
||||
*/
|
||||
export const API_REQUEST_TRANSFORMERS: TransformerMap = {
|
||||
"anthropic-text->anthropic-chat": transformAnthropicTextToAnthropicChat,
|
||||
"openai->anthropic-chat": transformOpenAIToAnthropicChat,
|
||||
@@ -53,9 +64,12 @@ export const API_REQUEST_TRANSFORMERS: TransformerMap = {
|
||||
"openai->openai-text": transformOpenAIToOpenAIText,
|
||||
"openai->openai-image": transformOpenAIToOpenAIImage,
|
||||
"openai->google-ai": transformOpenAIToGoogleAI,
|
||||
"mistral-ai->mistral-text": transformMistralChatToText,
|
||||
"openai->cohere-chat": transformOpenAIToCohere,
|
||||
};
|
||||
|
||||
/**
|
||||
* Specifies the schema for each API format to validate incoming requests.
|
||||
*/
|
||||
export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
|
||||
"anthropic-chat": AnthropicV1MessagesSchema,
|
||||
"anthropic-text": AnthropicV1TextSchema,
|
||||
@@ -64,5 +78,5 @@ export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
|
||||
"openai-image": OpenAIV1ImagesGenerationSchema,
|
||||
"google-ai": GoogleAIV1GenerateContentSchema,
|
||||
"mistral-ai": MistralAIV1ChatCompletionsSchema,
|
||||
"mistral-text": MistralAIV1TextCompletionsSchema,
|
||||
"cohere-chat": CohereV1ChatSchema,
|
||||
};
|
||||
|
||||
@@ -1,34 +1,15 @@
|
||||
import { z } from "zod";
|
||||
import { OPENAI_OUTPUT_MAX } from "./openai";
|
||||
import { Template } from "@huggingface/jinja";
|
||||
import { APIFormatTransformer } from "./index";
|
||||
import { logger } from "../../logger";
|
||||
|
||||
const MistralChatMessageSchema = z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool"]), // TODO: implement tools
|
||||
content: z.string(),
|
||||
prefix: z.boolean().optional(),
|
||||
});
|
||||
|
||||
const MistralMessagesSchema = z.array(MistralChatMessageSchema).refine(
|
||||
(input) => {
|
||||
const prefixIdx = input.findIndex((msg) => Boolean(msg.prefix));
|
||||
if (prefixIdx === -1) return true; // no prefix messages
|
||||
const lastIdx = input.length - 1;
|
||||
const lastMsg = input[lastIdx];
|
||||
return prefixIdx === lastIdx && lastMsg.role === "assistant";
|
||||
},
|
||||
{
|
||||
message:
|
||||
"`prefix` can only be set to `true` on the last message, and only for an assistant message.",
|
||||
}
|
||||
);
|
||||
|
||||
// https://docs.mistral.ai/api#operation/createChatCompletion
|
||||
const BaseMistralAIV1CompletionsSchema = z.object({
|
||||
export const MistralAIV1ChatCompletionsSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: MistralMessagesSchema.optional(),
|
||||
prompt: z.string().optional(),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
role: z.enum(["system", "user", "assistant"]),
|
||||
content: z.string(),
|
||||
})
|
||||
),
|
||||
temperature: z.number().optional().default(0.7),
|
||||
top_p: z.number().optional().default(1),
|
||||
max_tokens: z.coerce
|
||||
@@ -37,50 +18,12 @@ const BaseMistralAIV1CompletionsSchema = z.object({
|
||||
.nullish()
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
stream: z.boolean().optional().default(false),
|
||||
// Mistral docs say that `stop` can be a string or array but AWS Mistral
|
||||
// blows up if a string is passed. We must convert it to an array.
|
||||
stop: z
|
||||
.union([z.string(), z.array(z.string())])
|
||||
.optional()
|
||||
.default([])
|
||||
.transform((v) => (Array.isArray(v) ? v : [v])),
|
||||
random_seed: z.number().int().min(0).optional(),
|
||||
response_format: z
|
||||
.object({ type: z.enum(["text", "json_object"]) })
|
||||
.optional(),
|
||||
safe_prompt: z.boolean().optional().default(false),
|
||||
random_seed: z.number().int().optional(),
|
||||
});
|
||||
|
||||
export const MistralAIV1ChatCompletionsSchema =
|
||||
BaseMistralAIV1CompletionsSchema.and(
|
||||
z.object({ messages: MistralMessagesSchema })
|
||||
);
|
||||
export const MistralAIV1TextCompletionsSchema =
|
||||
BaseMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
|
||||
|
||||
/*
|
||||
Slightly more strict version that only allows a subset of the parameters. AWS
|
||||
Mistral helpfully returns no details if unsupported parameters are passed so
|
||||
this list comes from trial and error as of 2024-08-12.
|
||||
*/
|
||||
const BaseAWSMistralAIV1CompletionsSchema =
|
||||
BaseMistralAIV1CompletionsSchema.pick({
|
||||
temperature: true,
|
||||
top_p: true,
|
||||
max_tokens: true,
|
||||
stop: true,
|
||||
random_seed: true,
|
||||
// response_format: true,
|
||||
// safe_prompt: true,
|
||||
}).strip();
|
||||
export const AWSMistralV1ChatCompletionsSchema =
|
||||
BaseAWSMistralAIV1CompletionsSchema.and(
|
||||
z.object({ messages: MistralMessagesSchema })
|
||||
);
|
||||
export const AWSMistralV1TextCompletionsSchema =
|
||||
BaseAWSMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
|
||||
|
||||
export type MistralAIChatMessage = z.infer<typeof MistralChatMessageSchema>;
|
||||
export type MistralAIChatMessage = z.infer<
|
||||
typeof MistralAIV1ChatCompletionsSchema
|
||||
>["messages"][0];
|
||||
|
||||
export function fixMistralPrompt(
|
||||
messages: MistralAIChatMessage[]
|
||||
@@ -88,11 +31,12 @@ export function fixMistralPrompt(
|
||||
// Mistral uses OpenAI format but has some additional requirements:
|
||||
// - Only one system message per request, and it must be the first message if
|
||||
// present.
|
||||
// - Final message must be a user message, unless it has `prefix: true`.
|
||||
// - Final message must be a user message.
|
||||
// - Cannot have multiple messages from the same role in a row.
|
||||
// While frontends should be able to handle this, we can fix it here in the
|
||||
// meantime.
|
||||
const fixed = messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
|
||||
|
||||
return messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
|
||||
if (acc.length === 0) {
|
||||
acc.push(msg);
|
||||
return acc;
|
||||
@@ -113,54 +57,4 @@ export function fixMistralPrompt(
|
||||
}
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
// If the last message is an assistant message, mark it as a prefix. An
|
||||
// assistant message at the end of the conversation without `prefix: true`
|
||||
// results in an error.
|
||||
if (fixed[fixed.length - 1].role === "assistant") {
|
||||
fixed[fixed.length - 1].prefix = true;
|
||||
}
|
||||
return fixed;
|
||||
}
|
||||
|
||||
let jinjaTemplate: Template;
|
||||
let renderTemplate: (messages: MistralAIChatMessage[]) => string;
|
||||
function renderMistralPrompt(messages: MistralAIChatMessage[]) {
|
||||
if (!jinjaTemplate) {
|
||||
logger.warn("Lazy loading mistral chat template...");
|
||||
const { chatTemplate, bosToken, eosToken } =
|
||||
require("./templates/mistral-template").MISTRAL_TEMPLATE;
|
||||
jinjaTemplate = new Template(chatTemplate);
|
||||
renderTemplate = (messages) =>
|
||||
jinjaTemplate.render({
|
||||
messages,
|
||||
bos_token: bosToken,
|
||||
eos_token: eosToken,
|
||||
});
|
||||
}
|
||||
|
||||
return renderTemplate(messages);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to convert a Mistral chat completions request to a text completions,
|
||||
* using the official prompt template published by Mistral.
|
||||
*/
|
||||
export const transformMistralChatToText: APIFormatTransformer<
|
||||
typeof MistralAIV1TextCompletionsSchema
|
||||
> = async (req) => {
|
||||
const { body } = req;
|
||||
const result = MistralAIV1ChatCompletionsSchema.safeParse(body);
|
||||
if (!result.success) {
|
||||
req.log.warn(
|
||||
{ issues: result.error.issues, body },
|
||||
"Invalid Mistral chat completions request"
|
||||
);
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
const { messages, ...rest } = result.data;
|
||||
const prompt = renderMistralPrompt(messages);
|
||||
|
||||
return { ...rest, prompt, messages: undefined };
|
||||
};
|
||||
|
||||
@@ -21,7 +21,7 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
model: z.string().max(100),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
role: z.enum(["developer", "system", "user", "assistant", "tool", "function"]),
|
||||
role: z.enum(["system", "user", "assistant", "tool", "function"]),
|
||||
content: z.union([z.string(), OpenAIV1ChatContentArraySchema]),
|
||||
name: z.string().optional(),
|
||||
tool_calls: z.array(z.any()).optional(),
|
||||
@@ -52,15 +52,8 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
.number()
|
||||
.int()
|
||||
.nullish()
|
||||
.default(Math.min(OPENAI_OUTPUT_MAX, 16384))
|
||||
.default(Math.min(OPENAI_OUTPUT_MAX, 4096))
|
||||
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||
// max_completion_tokens replaces max_tokens in the OpenAI API.
|
||||
// for backwards compatibility, we accept both and move the value in
|
||||
// max_tokens to max_completion_tokens in proxy middleware.
|
||||
max_completion_tokens: z.coerce
|
||||
.number()
|
||||
.int()
|
||||
.optional(),
|
||||
frequency_penalty: z.number().optional().default(0),
|
||||
presence_penalty: z.number().optional().default(0),
|
||||
logit_bias: z.any().optional(),
|
||||
@@ -78,7 +71,6 @@ export const OpenAIV1ChatCompletionSchema = z
|
||||
tool_choice: z.any().optional(),
|
||||
function_choice: z.any().optional(),
|
||||
response_format: z.any(),
|
||||
reasoning_effort: z.enum(["low", "medium", "high"]).optional(),
|
||||
})
|
||||
// Tool usage must be enabled via config because we currently have no way to
|
||||
// track quota usage for them or enforce limits.
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
export const MISTRAL_TEMPLATE = {
|
||||
bosToken: "<s>",
|
||||
eosToken: "</s>",
|
||||
chatTemplate: `"{%- if messages[0]["role"] == "system" %}
|
||||
{%- set system_message = messages[0]["content"] %}
|
||||
{%- set loop_messages = messages[1:] %}
|
||||
{%- else %}
|
||||
{%- set loop_messages = messages %}
|
||||
{%- endif %}
|
||||
{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
|
||||
|
||||
{%- for message in loop_messages %}
|
||||
{%- if (message["role"] == "user") != (loop.index0 % 2 == 0) %}
|
||||
{{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
{{- bos_token }}
|
||||
{%- for message in loop_messages %}
|
||||
{%- if message["role"] == "user" %}
|
||||
{%- if loop.last and system_message is defined %}
|
||||
{{- "[INST] " + system_message + "\\n\\n" + message["content"] + "[/INST]" }}
|
||||
{%- else %}
|
||||
{{- "[INST] " + message["content"] + "[/INST]" }}
|
||||
{%- endif %}
|
||||
{%- elif message["role"] == "assistant" %}
|
||||
{%- if loop.last and message.prefix is defined and message.prefix %}
|
||||
{{- " " + message["content"] }}
|
||||
{%- else %}
|
||||
{{- " " + message["content"] + eos_token}}
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
{{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
|
||||
{%- endif %}
|
||||
{%- endfor %}`,
|
||||
};
|
||||
Vendored
-2
@@ -5,7 +5,6 @@ import { Express } from "express-serve-static-core";
|
||||
import { APIFormat, Key } from "./key-management";
|
||||
import { User } from "./users/schema";
|
||||
import { LLMService, ModelFamily } from "./models";
|
||||
import { ProxyReqManager } from "../proxy/middleware/request/proxy-req-manager";
|
||||
|
||||
declare global {
|
||||
namespace Express {
|
||||
@@ -25,7 +24,6 @@ declare global {
|
||||
queueOutTime?: number;
|
||||
onAborted?: () => void;
|
||||
proceed: () => void;
|
||||
changeManager?: ProxyReqManager;
|
||||
heartbeatInterval?: NodeJS.Timeout;
|
||||
monitorInterval?: NodeJS.Timeout;
|
||||
promptTokens?: number;
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import axios from "axios";
|
||||
import express from "express";
|
||||
import { promises as fs } from "fs";
|
||||
import path from "path";
|
||||
import { v4 } from "uuid";
|
||||
import { USER_ASSETS_DIR } from "../../config";
|
||||
import { getAxiosInstance } from "../network";
|
||||
import { addToImageHistory } from "./image-history";
|
||||
import { libSharp } from "./index";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
export type OpenAIImageGenerationResult = {
|
||||
created: number;
|
||||
data: {
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import type firebase from "firebase-admin";
|
||||
import { config } from "../config";
|
||||
import { getHttpAgents } from "./network";
|
||||
|
||||
let firebaseApp: firebase.app.App | undefined;
|
||||
|
||||
export async function initializeFirebase() {
|
||||
const firebase = await import("firebase-admin");
|
||||
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
|
||||
const app = firebase.initializeApp({
|
||||
// RTDB doesn't actually seem to use this but respects `WS_PROXY` if set,
|
||||
// so we do that in the network module.
|
||||
httpAgent: getHttpAgents()[0],
|
||||
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
|
||||
databaseURL: config.firebaseRtdbUrl,
|
||||
});
|
||||
|
||||
await app.database().ref("connection-test").set(Date.now());
|
||||
|
||||
firebaseApp = app;
|
||||
}
|
||||
|
||||
export function getFirebaseApp(): firebase.app.App {
|
||||
if (!firebaseApp) {
|
||||
throw new Error("Firebase app not initialized.");
|
||||
}
|
||||
return firebaseApp;
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
/** Module for generating and verifying HMAC signatures. */
|
||||
|
||||
import crypto from "crypto";
|
||||
import { SECRET_SIGNING_KEY } from "../config";
|
||||
|
||||
/**
|
||||
* Generates a HMAC signature for the given message. Optionally salts the
|
||||
* key with a provided string.
|
||||
*/
|
||||
export function signMessage(msg: any, salt: string = ""): string {
|
||||
const hmac = crypto.createHmac("sha256", SECRET_SIGNING_KEY + salt);
|
||||
if (typeof msg === "object") {
|
||||
hmac.update(JSON.stringify(msg));
|
||||
} else {
|
||||
hmac.update(msg);
|
||||
}
|
||||
return hmac.digest("hex");
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
import { doubleCsrf } from "csrf-csrf";
|
||||
import express from "express";
|
||||
import { config, SECRET_SIGNING_KEY } from "../config";
|
||||
import { config, COOKIE_SECRET } from "../config";
|
||||
|
||||
const { generateToken, doubleCsrfProtection } = doubleCsrf({
|
||||
getSecret: () => SECRET_SIGNING_KEY,
|
||||
getSecret: () => COOKIE_SECRET,
|
||||
cookieName: "csrf",
|
||||
cookieOptions: {
|
||||
sameSite: "strict",
|
||||
|
||||
@@ -7,9 +7,8 @@ import * as userStore from "./users/user-store";
|
||||
export const injectLocals: RequestHandler = (req, res, next) => {
|
||||
// config-related locals
|
||||
const quota = config.tokenQuota;
|
||||
const sumOfQuotas = Object.values(quota).reduce((a, b) => a + b, 0);
|
||||
|
||||
res.locals.quotasEnabled = sumOfQuotas > 0;
|
||||
res.locals.quotasEnabled =
|
||||
quota.turbo > 0 || quota.gpt4 > 0 || quota.claude > 0;
|
||||
res.locals.quota = quota;
|
||||
res.locals.nextQuotaRefresh = userStore.getNextQuotaRefresh();
|
||||
res.locals.persistenceEnabled = config.gatekeeperStore !== "memory";
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import { AxiosError, AxiosResponse } from "axios";
|
||||
import { getAxiosInstance } from "../../network";
|
||||
import axios, { AxiosError, AxiosResponse } from "axios";
|
||||
import { KeyCheckerBase } from "../key-checker-base";
|
||||
import type { AnthropicKey, AnthropicKeyProvider } from "./provider";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
|
||||
const KEY_CHECK_PERIOD = 1000 * 60 * 60 * 6; // 6 hours
|
||||
const POST_MESSAGES_URL = "https://api.anthropic.com/v1/messages";
|
||||
@@ -125,7 +122,7 @@ export class AnthropicKeyChecker extends KeyCheckerBase<AnthropicKey> {
|
||||
{ key: key.hash, error: error.message },
|
||||
"Network error while checking key; trying this key again in a minute."
|
||||
);
|
||||
const oneMinute = 60 * 1000;
|
||||
const oneMinute = 10 * 1000;
|
||||
const next = Date.now() - (KEY_CHECK_PERIOD - oneMinute);
|
||||
this.updateKey(key.hash, { lastChecked: next });
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import crypto from "crypto";
|
||||
import { createGenericGetLockoutPeriod, Key, KeyProvider } from "..";
|
||||
import { Key, KeyProvider } from "..";
|
||||
import { config } from "../../../config";
|
||||
import { logger } from "../../../logger";
|
||||
import { AnthropicModelFamily, getClaudeModelFamily } from "../../models";
|
||||
@@ -23,6 +23,10 @@ type AnthropicKeyUsage = {
|
||||
export interface AnthropicKey extends Key, AnthropicKeyUsage {
|
||||
readonly service: "anthropic";
|
||||
readonly modelFamilies: AnthropicModelFamily[];
|
||||
/** The time at which this key was last rate limited. */
|
||||
rateLimitedAt: number;
|
||||
/** The time until which this key is rate limited. */
|
||||
rateLimitedUntil: number;
|
||||
/**
|
||||
* Whether this key requires a special preamble. For unclear reasons, some
|
||||
* Anthropic keys will throw an error if the prompt does not begin with a
|
||||
@@ -213,7 +217,22 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
|
||||
key[`${getClaudeModelFamily(model)}Tokens`] += tokens;
|
||||
}
|
||||
|
||||
getLockoutPeriod = createGenericGetLockoutPeriod(() => this.keys);
|
||||
public getLockoutPeriod() {
|
||||
const activeKeys = this.keys.filter((k) => !k.isDisabled);
|
||||
// Don't lock out if there are no keys available or the queue will stall.
|
||||
// Just let it through so the add-key middleware can throw an error.
|
||||
if (activeKeys.length === 0) return 0;
|
||||
|
||||
const now = Date.now();
|
||||
const rateLimitedKeys = activeKeys.filter((k) => now < k.rateLimitedUntil);
|
||||
const anyNotRateLimited = rateLimitedKeys.length < activeKeys.length;
|
||||
|
||||
if (anyNotRateLimited) return 0;
|
||||
|
||||
// If all keys are rate-limited, return the time until the first key is
|
||||
// ready.
|
||||
return Math.min(...activeKeys.map((k) => k.rateLimitedUntil - now));
|
||||
}
|
||||
|
||||
/**
|
||||
* This is called when we receive a 429, which means there are already five
|
||||
|
||||
@@ -1,37 +1,13 @@
|
||||
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||
import { SignatureV4 } from "@smithy/signature-v4";
|
||||
import { HttpRequest } from "@smithy/protocol-http";
|
||||
import { AxiosError, AxiosHeaders, AxiosRequestConfig } from "axios";
|
||||
import axios, { AxiosError, AxiosRequestConfig, AxiosHeaders } from "axios";
|
||||
import { URL } from "url";
|
||||
import { config } from "../../../config";
|
||||
import { getAwsBedrockModelFamily } from "../../models";
|
||||
import { getAxiosInstance } from "../../network";
|
||||
import { KeyCheckerBase } from "../key-checker-base";
|
||||
import type { AwsBedrockKey, AwsBedrockKeyProvider } from "./provider";
|
||||
import { AwsBedrockModelFamily } from "../../models";
|
||||
import { config } from "../../../config";
|
||||
|
||||
const axios = getAxiosInstance();
|
||||
|
||||
type ParentModelId = string;
|
||||
type AliasModelId = string;
|
||||
type ModuleAliasTuple = [ParentModelId, ...AliasModelId[]];
|
||||
|
||||
const KNOWN_MODEL_IDS: ModuleAliasTuple[] = [
|
||||
["anthropic.claude-instant-v1"],
|
||||
["anthropic.claude-v2", "anthropic.claude-v2:1"],
|
||||
["anthropic.claude-3-sonnet-20240229-v1:0"],
|
||||
["anthropic.claude-3-haiku-20240307-v1:0"],
|
||||
["anthropic.claude-3-5-haiku-20241022-v1:0"],
|
||||
["anthropic.claude-3-opus-20240229-v1:0"],
|
||||
["anthropic.claude-3-5-sonnet-20240620-v1:0"],
|
||||
["anthropic.claude-3-5-sonnet-20241022-v2:0"],
|
||||
["mistral.mistral-7b-instruct-v0:2"],
|
||||
["mistral.mixtral-8x7b-instruct-v0:1"],
|
||||
["mistral.mistral-large-2402-v1:0"],
|
||||
["mistral.mistral-large-2407-v1:0"],
|
||||
["mistral.mistral-small-2402-v1:0"], // Seems to return 400
|
||||
];
|
||||
|
||||
const KEY_CHECK_BATCH_SIZE = 2; // AWS checker needs to do lots of concurrent requests so should lower the batch size
|
||||
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
|
||||
const KEY_CHECK_PERIOD = 90 * 60 * 1000; // 90 minutes
|
||||
const AMZ_HOST =
|
||||
@@ -39,8 +15,6 @@ const AMZ_HOST =
|
||||
const GET_CALLER_IDENTITY_URL = `https://sts.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15`;
|
||||
const GET_INVOCATION_LOGGING_CONFIG_URL = (region: string) =>
|
||||
`https://bedrock.${region}.amazonaws.com/logging/modelinvocations`;
|
||||
const GET_LIST_INFERENCE_PROFILES_URL = (region: string) =>
|
||||
`https://bedrock.${region}.amazonaws.com/inference-profiles?maxResults=1000`;
|
||||
const POST_INVOKE_MODEL_URL = (region: string, model: string) =>
|
||||
`https://${AMZ_HOST.replace("%REGION%", region)}/model/${model}/invoke`;
|
||||
const TEST_MESSAGES = [
|
||||
@@ -50,22 +24,6 @@ const TEST_MESSAGES = [
|
||||
|
||||
type AwsError = { error: {} };
|
||||
|
||||
type GetInferenceProfilesResponse = {
|
||||
inferenceProfileSummaries: {
|
||||
inferenceProfileId: string;
|
||||
inferenceProfileName: string;
|
||||
inferenceProfileArn: string;
|
||||
description?: string;
|
||||
createdAt?: string;
|
||||
updatedAt?: string;
|
||||
status: "ACTIVE" | unknown;
|
||||
type: "SYSTEM_DEFINED" | unknown;
|
||||
models: {
|
||||
modelArn?: string;
|
||||
}[];
|
||||
}[];
|
||||
};
|
||||
|
||||
type GetLoggingConfigResponse = {
|
||||
loggingConfig: null | {
|
||||
cloudWatchConfig: null | unknown;
|
||||
@@ -84,68 +42,54 @@ export class AwsKeyChecker extends KeyCheckerBase<AwsBedrockKey> {
|
||||
service: "aws",
|
||||
keyCheckPeriod: KEY_CHECK_PERIOD,
|
||||
minCheckInterval: MIN_CHECK_INTERVAL,
|
||||
keyCheckBatchSize: KEY_CHECK_BATCH_SIZE,
|
||||
updateKey,
|
||||
});
|
||||
}
|
||||
|
||||
protected async testKeyOrFail(key: AwsBedrockKey) {
|
||||
// Only check models on startup. For now all models must be available to
|
||||
// the proxy because we don't route requests to different keys.
|
||||
let checks: Promise<boolean>[] = [];
|
||||
const isInitialCheck = !key.lastChecked;
|
||||
if (isInitialCheck) {
|
||||
checks = [
|
||||
this.invokeModel("anthropic.claude-v2", key),
|
||||
this.invokeModel("anthropic.claude-3-sonnet-20240229-v1:0", key),
|
||||
this.invokeModel("anthropic.claude-3-haiku-20240307-v1:0", key),
|
||||
this.invokeModel("anthropic.claude-3-opus-20240229-v1:0", key),
|
||||
];
|
||||
}
|
||||
checks.unshift(this.checkLoggingConfiguration(key));
|
||||
|
||||
const [_logging, claudeV2, sonnet, haiku, opus] = await Promise.all(checks);
|
||||
|
||||
if (isInitialCheck) {
|
||||
try {
|
||||
await this.checkInferenceProfiles(key);
|
||||
} catch (e) {
|
||||
const asError = e as AxiosError<AwsError>;
|
||||
const data = asError.response?.data;
|
||||
const families: AwsBedrockModelFamily[] = [];
|
||||
if (claudeV2 || sonnet || haiku) families.push("aws-claude");
|
||||
if (opus) families.push("aws-claude-opus");
|
||||
|
||||
if (families.length === 0) {
|
||||
this.log.warn(
|
||||
{ key: key.hash, error: e.message, data },
|
||||
"Cannot list inference profiles.\n\
|
||||
Principal may be missing `AmazonBedrockFullAccess`, or has no policy allowing action `bedrock:ListInferenceProfiles` against resource `arn:aws:bedrock:*:*:inference-profile/*`.\n\
|
||||
Requests will be made without inference profiles using on-demand quotas, which may be subject to more restrictive rate limits.\n\
|
||||
See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-prereq.html."
|
||||
{ key: key.hash },
|
||||
"Key does not have access to any models; disabling."
|
||||
);
|
||||
return this.updateKey(key.hash, { isDisabled: true });
|
||||
}
|
||||
|
||||
this.updateKey(key.hash, {
|
||||
sonnetEnabled: sonnet,
|
||||
haikuEnabled: haiku,
|
||||
modelFamilies: families,
|
||||
});
|
||||
}
|
||||
|
||||
// Perform checks for all parent model IDs
|
||||
// TODO: use allsettled
|
||||
const results = await Promise.all(
|
||||
KNOWN_MODEL_IDS.filter(([model]) =>
|
||||
// Skip checks for models that are disabled anyway
|
||||
config.allowedModelFamilies.includes(getAwsBedrockModelFamily(model))
|
||||
).map(async ([model, ...aliases]) => ({
|
||||
models: [model, ...aliases],
|
||||
success: await this.invokeModel(model, key),
|
||||
}))
|
||||
);
|
||||
|
||||
// Filter out models that are disabled
|
||||
const modelIds = results
|
||||
.filter(({ success }) => success)
|
||||
.flatMap(({ models }) => models);
|
||||
|
||||
if (modelIds.length === 0) {
|
||||
this.log.warn(
|
||||
{ key: key.hash },
|
||||
"Key does not have access to any models; disabling."
|
||||
);
|
||||
return this.updateKey(key.hash, { isDisabled: true });
|
||||
}
|
||||
|
||||
this.updateKey(key.hash, {
|
||||
modelIds,
|
||||
modelFamilies: Array.from(
|
||||
new Set(modelIds.map(getAwsBedrockModelFamily))
|
||||
),
|
||||
});
|
||||
|
||||
this.log.info(
|
||||
{
|
||||
key: key.hash,
|
||||
logged: key.awsLoggingStatus,
|
||||
sonnet,
|
||||
haiku,
|
||||
families: key.modelFamilies,
|
||||
models: key.modelIds,
|
||||
logged: key.awsLoggingStatus,
|
||||
},
|
||||
"Checked key."
|
||||
);
|
||||
@@ -183,9 +127,9 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
// not necessarily disabled. Retry in 10 seconds.
|
||||
this.log.warn(
|
||||
{ key: key.hash, errorType, error: error.response.data },
|
||||
"Key is rate limited. Rechecking in 30 seconds."
|
||||
"Key is rate limited. Rechecking in 10 seconds."
|
||||
);
|
||||
const next = Date.now() - (KEY_CHECK_PERIOD - 30 * 1000);
|
||||
const next = Date.now() - (KEY_CHECK_PERIOD - 10 * 1000);
|
||||
return this.updateKey(key.hash, { lastChecked: next });
|
||||
case "ValidationException":
|
||||
default:
|
||||
@@ -216,53 +160,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
* key has access to the model, false if it does not. Throws an error if the
|
||||
* key is disabled.
|
||||
*/
|
||||
private async invokeModel(
|
||||
model: string,
|
||||
key: AwsBedrockKey
|
||||
): Promise<boolean> {
|
||||
if (model.includes("claude")) {
|
||||
// If inference profiles are available, try testing model with them.
|
||||
// If they are not available or the invocation fails with the inference
|
||||
// profile, fall back to regular model ID.
|
||||
const { region } = AwsKeyChecker.getCredentialsFromKey(key);
|
||||
const continent = region.split("-")[0];
|
||||
const profile = key.inferenceProfileIds.find(
|
||||
(id) => `${continent}.${model}` === id
|
||||
);
|
||||
|
||||
if (profile) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, profile },
|
||||
"Testing model via inference profile."
|
||||
);
|
||||
let result: boolean;
|
||||
try {
|
||||
result = await this.testClaudeModel(key, profile);
|
||||
} catch (e) {
|
||||
this.log.error(
|
||||
{ key: key.hash, model, profile, error: e.message },
|
||||
"InvokeModel via inference profile returned an error; trying model ID directly."
|
||||
);
|
||||
result = false;
|
||||
}
|
||||
|
||||
// If the profile worked, we'll return success. Caller will add the
|
||||
// model (not the profile) to the list of enabled models, but the
|
||||
// profile will be used when the key is used for inference.
|
||||
if (result) return true;
|
||||
}
|
||||
this.log.debug({ key: key.hash, model }, "Testing model via model ID.");
|
||||
return this.testClaudeModel(key, model);
|
||||
} else if (model.includes("mistral")) {
|
||||
return this.testMistralModel(key, model);
|
||||
}
|
||||
throw new Error("AwsKeyChecker#invokeModel: no implementation for model");
|
||||
}
|
||||
|
||||
private async testClaudeModel(
|
||||
key: AwsBedrockKey,
|
||||
model: string
|
||||
): Promise<boolean> {
|
||||
private async invokeModel(model: string, key: AwsBedrockKey) {
|
||||
const creds = AwsKeyChecker.getCredentialsFromKey(key);
|
||||
// This is not a valid invocation payload, but a 400 response indicates that
|
||||
// the principal at least has permission to invoke the model.
|
||||
@@ -277,7 +175,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
method: "POST",
|
||||
url: POST_INVOKE_MODEL_URL(creds.region, model),
|
||||
data: payload,
|
||||
validateStatus: (status) => [400, 403, 404, 429, 503].includes(status),
|
||||
validateStatus: (status) => status === 400 || status === 403,
|
||||
};
|
||||
config.headers = new AxiosHeaders({
|
||||
"content-type": "application/json",
|
||||
@@ -289,59 +187,11 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
const errorType = (headers["x-amzn-errortype"] as string).split(":")[0];
|
||||
const errorMessage = data?.message;
|
||||
|
||||
// 503 ServiceUnavailableException errors are usually due to temporary
|
||||
// outages in the AWS infrastructure. However, because a 503 response also
|
||||
// indicates that the key can invoke the model, we can treat this as a
|
||||
// successful response.
|
||||
if (status === 503 && errorType.match(/ServiceUnavailableException/i)) {
|
||||
this.log.warn(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is accessible, but may be temporarily unavailable."
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// 429 ThrottlingException can suggest the model is available but the key
|
||||
// is being rate limited. I think if a key does not have access to the
|
||||
// model, it cannot receive a 429 response, so this should be a success.
|
||||
if (status === 429) {
|
||||
if (errorType.match(/ThrottlingException/i)) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is available but key is rate limited."
|
||||
);
|
||||
return true;
|
||||
} else {
|
||||
throw new AxiosError(
|
||||
`InvokeModel returned 429 of type ${errorType}`,
|
||||
`AWS_INVOKE_MODEL_RATE_LIMITED`,
|
||||
response.config,
|
||||
response.request,
|
||||
response
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// This message indicates the key is valid but this particular model is not
|
||||
// accessible. Other 403s may indicate the key is not usable.
|
||||
// We only allow one type of 403 error, and we only allow it for one model.
|
||||
if (
|
||||
status === 403 &&
|
||||
errorMessage?.match(/access to the model with the specified model ID/)
|
||||
) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is not available (principal does not have access)."
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
// ResourceNotFound typically indicates that the tested model cannot be used
|
||||
// on the configured region for this set of credentials.
|
||||
if (status === 404) {
|
||||
this.log.debug(
|
||||
{ region: creds.region, model, key: key.hash },
|
||||
"Model is not available (not supported in this AWS region)."
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -350,91 +200,23 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
const correctErrorType = errorType === "ValidationException";
|
||||
const correctErrorMessage = errorMessage?.match(/max_tokens/);
|
||||
if (!correctErrorType || !correctErrorMessage) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status },
|
||||
"Model is not available (request rejected)."
|
||||
);
|
||||
return false;
|
||||
// throw new AxiosError(
|
||||
// `Unexpected error when invoking model ${model}: ${errorMessage}`,
|
||||
// "AWS_ERROR",
|
||||
// response.config,
|
||||
// response.request,
|
||||
// response
|
||||
// );
|
||||
}
|
||||
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status },
|
||||
"Model is available."
|
||||
"AWS InvokeModel test successful."
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
private async testMistralModel(
|
||||
key: AwsBedrockKey,
|
||||
model: string
|
||||
): Promise<boolean> {
|
||||
const creds = AwsKeyChecker.getCredentialsFromKey(key);
|
||||
|
||||
const payload = {
|
||||
max_tokens: -1,
|
||||
prompt: "<s>[INST] What is your favourite condiment? [/INST]</s>",
|
||||
};
|
||||
const config: AxiosRequestConfig = {
|
||||
method: "POST",
|
||||
url: POST_INVOKE_MODEL_URL(creds.region, model),
|
||||
data: payload,
|
||||
validateStatus: (status) => [400, 403, 404].includes(status),
|
||||
headers: {
|
||||
"content-type": "application/json",
|
||||
accept: "*/*",
|
||||
},
|
||||
};
|
||||
await AwsKeyChecker.signRequestForAws(config, key);
|
||||
const response = await axios.request(config);
|
||||
const { data, status, headers } = response;
|
||||
const errorType = (headers["x-amzn-errortype"] as string).split(":")[0];
|
||||
const errorMessage = data?.message;
|
||||
|
||||
if (status === 403 || status === 404) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status },
|
||||
"Model is not available (no access or unsupported region)."
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
const isBadRequest = status === 400;
|
||||
const isValidationError = errorMessage?.match(/validation error/i);
|
||||
if (isBadRequest && !isValidationError) {
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status, headers },
|
||||
"Model is not available (request rejected)."
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
this.log.debug(
|
||||
{ key: key.hash, model, errorType, data, status },
|
||||
"Model is available."
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
private async checkInferenceProfiles(key: AwsBedrockKey) {
|
||||
const creds = AwsKeyChecker.getCredentialsFromKey(key);
|
||||
const req: AxiosRequestConfig = {
|
||||
method: "GET",
|
||||
url: GET_LIST_INFERENCE_PROFILES_URL(creds.region),
|
||||
headers: { accept: "application/json" },
|
||||
};
|
||||
await AwsKeyChecker.signRequestForAws(req, key);
|
||||
const { data } = await axios.request<GetInferenceProfilesResponse>(req);
|
||||
const { inferenceProfileSummaries } = data;
|
||||
const profileIds = inferenceProfileSummaries.map(
|
||||
(p) => p.inferenceProfileId
|
||||
);
|
||||
this.log.debug(
|
||||
{ key: key.hash, profileIds, region: creds.region },
|
||||
"Inference profiles found."
|
||||
);
|
||||
this.updateKey(key.hash, { inferenceProfileIds: profileIds });
|
||||
}
|
||||
|
||||
private async checkLoggingConfiguration(key: AwsBedrockKey) {
|
||||
if (config.allowAwsLogging) {
|
||||
// Don't check logging status if we're allowing it to reduce API calls.
|
||||
@@ -503,8 +285,7 @@ See https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-
|
||||
method,
|
||||
protocol: "https:",
|
||||
hostname: url.hostname,
|
||||
path: url.pathname,
|
||||
query: Object.fromEntries(url.searchParams),
|
||||
path: url.pathname + url.search,
|
||||
headers: { Host: url.hostname, ...plainHeaders },
|
||||
});
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user