1 Commits

Author SHA1 Message Date
nai-degen 84acc429d7 wip 2024-03-16 00:04:27 -05:00
149 changed files with 2878 additions and 9258 deletions
+8 -40
View File
@@ -40,28 +40,12 @@ NODE_ENV=production
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | dall-e | claude | claude-opus
# | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny | mistral-small
# | mistral-medium | mistral-large | aws-claude | aws-claude-opus | gcp-claude
# | gcp-claude-opus | azure-turbo | azure-gpt4 | azure-gpt4-32k
# | azure-gpt4-turbo | azure-gpt4o | azure-dall-e
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-dall-e
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
# ALLOWED_VISION_SERVICES=
# IP addresses or CIDR blocks from which requests will be blocked.
# IP_BLACKLIST=10.0.0.1/24
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
@@ -74,7 +58,7 @@ NODE_ENV=production
# Avoid short or common phrases as this tests the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="You can't say that here."
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
@@ -90,13 +74,6 @@ NODE_ENV=production
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
@@ -116,20 +93,18 @@ NODE_ENV=production
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value, replacing dashes with underscores.
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
# which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_DALL_E=0
# TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# TOKEN_QUOTA_GCP_CLAUDE=0
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_DALL_E=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
@@ -149,25 +124,18 @@ NODE_ENV=production
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
GCP_CREDENTIALS=project-id:client-email:region:private-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# To restrict access to the admin interface to specific IP addresses, set the
# ADMIN_WHITELIST environment variable to a comma-separated list of CIDR blocks.
# ADMIN_WHITELIST=0.0.0.0/0
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+4 -3
View File
@@ -1,10 +1,11 @@
{
"plugins": ["prettier-plugin-ejs"],
"overrides": [
{
"files": "*.ejs",
"files": [
"*.ejs"
],
"options": {
"printWidth": 120,
"printWidth": 160,
"bracketSameLine": true
}
}
+6 -3
View File
@@ -7,8 +7,9 @@ Reverse proxy server for various LLM APIs.
- [Features](#features)
- [Usage Instructions](#usage-instructions)
- [Self-hosting](#self-hosting)
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
- [Render (outdated, not advised)](#render-outdated-not-advised)
- [Alternatives](#alternatives)
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
- [Render (outdated, not advised)](#render-outdated-not-advised)
- [Local Development](#local-development)
## What is this?
@@ -19,7 +20,6 @@ This project allows you to run a reverse proxy server for various LLM APIs.
- [x] [OpenAI](https://openai.com/)
- [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
@@ -41,6 +41,9 @@ If you'd like to run your own instance of this server, you'll need to deploy it
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
### Alternatives
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
### Huggingface (outdated, not advised)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
-35
View File
@@ -1,35 +0,0 @@
# Configuring the proxy for Vertex AI (GCP)
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Setup Vertex AI](#setup-vertex-ai)
- [Supported model IDs](#supported-model-ids)
## Setting keys
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
For example:
```
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
```
## Setup Vertex AI
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
6. The required credential is in the JSON file you just downloaded.
## Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `claude-3-haiku@20240307`
- `claude-3-sonnet@20240229`
- `claude-3-opus@20240229`
- `claude-3-5-sonnet@20240620`
-135
View File
@@ -1,135 +0,0 @@
# Proof-of-work Verification
You can require users to complete a proof-of-work before they can access the
proxy. This can increase the cost of denial of service attacks and slow down
automated abuse.
When configured, users access the challenge UI and request a token. The server
sends a challenge to the client, which asks the user's browser to find a
solution to the challenge that meets a certain constraint (the difficulty
level). Once the user has found a solution, they can submit it to the server
and get a user token valid for a period you specify.
The proof-of-work challenge uses the argon2id hash function.
## Configuration
To enable proof-of-work verification, set the following environment variables:
```
GATEKEEPER=user_token
CAPTCHA_MODE=proof_of_work
# Validity of the token in hours
POW_TOKEN_HOURS=24
# Max number of IPs that can use a user_token issued via proof-of-work
POW_TOKEN_MAX_IPS=2
# The difficulty level of the proof-of-work challenge. You can use one of the
# predefined levels specified below, or you can specify a custom number of
# expected hash iterations.
POW_DIFFICULTY_LEVEL=low
# The time limit for solving the challenge, in minutes
POW_CHALLENGE_TIMEOUT=30
```
## Difficulty Levels
The difficulty level controls how long, on average, it will take for a user to
solve the proof-of-work challenge. Due to randomness, the actual time can very
significantly; lucky users may solve the challenge in a fraction of the average
time, while unlucky users may take much longer.
The difficulty level doesn't affect the speed of the hash function itself, only
the number of hashes that will need to be computed. Therefore, the time required
to complete the challenge scales linearly with the difficulty level's iteration
count.
You can adjust the difficulty level while the proxy is running from the admin
interface.
Be aware that there is a time limit for solving the challenge, by default set to
30 minutes. Above 'high' difficulty, you will probably need to increase the time
limit or it will be very hard for users with slow devices to find a solution
within the time limit.
### Low
- Average of 200 iterations required
- Default setting.
### Medium
- Average of 900 iterations required
### High
- Average of 1900 iterations required
### Extreme
- Average of 4000 iterations required
- Not recommended unless you are expecting very high levels of abuse
- May require increasing `POW_CHALLENGE_TIMEOUT`
### Custom
Setting `POW_DIFFICULTY_LEVEL` to an integer will use that number of iterations
as the difficulty level.
## Other challenge settings
- `POW_CHALLENGE_TIMEOUT`: The time limit for solving the challenge, in minutes.
Default is 30.
- `POW_TOKEN_HOURS`: The period of time for which a user token issued via proof-
of-work can be used. Default is 24 hours. Starts when the challenge is solved.
- `POW_TOKEN_MAX_IPS`: The maximum number of unique IPs that can use a single
user token issued via proof-of-work. Default is 2.
- `POW_TOKEN_PURGE_HOURS`: The period of time after which an expired user token
issued via proof-of-work will be removed from the database. Until it is
purged, users can refresh expired tokens by completing a half-difficulty
challenge. Default is 48 hours.
- `POW_MAX_TOKENS_PER_IP`: The maximum number of active user tokens that can
be associated with a single IP address. After this limit is reached, the
oldest token will be forcibly expired when a new token is issued. Set to 0
to disable this feature. Default is 0.
## Custom argon2id parameters
You can set custom argon2id parameters for the proof-of-work challenge.
Generally, you should not need to change these unless you have a specific
reason to do so.
The listed values are the defaults.
```
ARGON2_TIME_COST=8
ARGON2_MEMORY_KB=65536
ARGON2_PARALLELISM=1
ARGON2_HASH_LENGTH=32
```
Increasing parallelism will not do much except increase memory consumption for
both the client and server, because browser proof-of-work implementations are
single-threaded. It's better to increase the time cost if you want to increase
the difficulty.
Increasing memory too much may cause memory exhaustion on some mobile devices,
particularly on iOS due to the way Safari handles WebAssembly memory allocation.
## Tested hash rates
These were measured with the default argon2id parameters listed above. These
tests were not at all scientific so take them with a grain of salt.
Safari does not like large WASM memory usage, so concurrency is limited to 4 to
avoid overallocating memory on mobile WebKit browsers. Thermal throttling can
also significantly reduce hash rates on mobile devices.
- Intel Core i9-13900K (Chrome): 33-35 H/s
- Intel Core i9-13900K (Firefox): 29-32 H/s
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
- This is a 2019 phone almost matching an iPhone five years newer because of
bad Safari performance.
-10
View File
@@ -12,7 +12,6 @@ Several of these features require you to set secrets in your environment. If usi
- [Memory](#memory)
- [Firebase Realtime Database](#firebase-realtime-database)
- [Firebase setup instructions](#firebase-setup-instructions)
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
## No user management (`GATEKEEPER=none`)
@@ -62,12 +61,3 @@ To use Firebase Realtime Database to persist user data, set the following enviro
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
## Whitelisting admin IP addresses
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
You can provide a comma-separated list containing individual IPv4 or IPv6 addresses, or CIDR ranges.
To whitelist an entire IP range, use CIDR notation. For example, `192.168.0.1/24` would whitelist all addresses from `192.168.0.0` to `192.168.0.255`.
To disable the whitelist, set `ADMIN_WHITELIST=0.0.0.0/0,::0`, which will allow access from any IPv4 or IPv6 address. This is the default behavior.
+768 -1220
View File
File diff suppressed because it is too large Load Diff
+6 -14
View File
@@ -4,7 +4,6 @@
"description": "Reverse proxy for the OpenAI API",
"scripts": {
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"database:migrate": "ts-node scripts/migrate.ts",
"prepare": "husky install",
"start": "node build/server.js",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
@@ -20,36 +19,31 @@
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"@aws-crypto/sha256-js": "^5.2.0",
"@huggingface/jinja": "^0.3.0",
"@node-rs/argon2": "^1.8.3",
"@smithy/eventstream-codec": "^2.1.3",
"@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/types": "^2.10.1",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.7.4",
"better-sqlite3": "^10.0.0",
"axios": "^1.3.5",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1",
"cors": "^2.8.5",
"csrf-csrf": "^2.3.0",
"dotenv": "^16.3.1",
"ejs": "^3.1.10",
"ejs": "^3.1.9",
"express": "^4.18.2",
"express-session": "^1.17.3",
"firebase-admin": "^12.3.1",
"glob": "^10.3.12",
"firebase-admin": "^11.10.1",
"googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1",
"ipaddr.js": "^2.1.0",
"memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1",
"pino": "^8.11.0",
"pino-http": "^8.3.3",
"sanitize-html": "^2.13.0",
"sanitize-html": "2.12.1",
"sharp": "^0.32.6",
"showdown": "^2.1.0",
"source-map-support": "^0.5.21",
@@ -61,7 +55,6 @@
"zod-error": "^1.5.0"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13",
"@types/express": "^4.17.17",
@@ -79,13 +72,12 @@
"nodemon": "^3.0.1",
"pino-pretty": "^10.2.3",
"prettier": "^3.0.3",
"prettier-plugin-ejs": "^1.0.3",
"ts-node": "^10.9.1",
"typescript": "^5.4.2"
},
"overrides": {
"braces": "^3.0.3",
"fast-xml-parser": "^4.4.1",
"google-gax": "^3.6.1",
"postcss": "^8.4.31",
"follow-redirects": "^1.15.4"
}
}
-349
View File
@@ -1,349 +0,0 @@
/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
/* Document
========================================================================== */
/**
* 1. Correct the line height in all browsers.
* 2. Prevent adjustments of font size after orientation changes in iOS.
*/
html {
line-height: 1.15; /* 1 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/* Sections
========================================================================== */
/**
* Remove the margin in all browsers.
*/
body {
margin: 0;
}
/**
* Render the `main` element consistently in IE.
*/
main {
display: block;
}
/**
* Correct the font size and margin on `h1` elements within `section` and
* `article` contexts in Chrome, Firefox, and Safari.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/* Grouping content
========================================================================== */
/**
* 1. Add the correct box sizing in Firefox.
* 2. Show the overflow in Edge and IE.
*/
hr {
box-sizing: content-box; /* 1 */
height: 0; /* 1 */
overflow: visible; /* 2 */
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
pre {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/* Text-level semantics
========================================================================== */
/**
* Remove the gray background on active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* 1. Remove the bottom border in Chrome 57-
* 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
*/
abbr[title] {
border-bottom: none; /* 1 */
text-decoration: underline; /* 2 */
text-decoration: underline dotted; /* 2 */
}
/**
* Add the correct font weight in Chrome, Edge, and Safari.
*/
b,
strong {
font-weight: bolder;
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
code,
kbd,
samp {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/**
* Add the correct font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` elements from affecting the line height in
* all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sub {
bottom: -0.25em;
}
sup {
top: -0.5em;
}
/* Embedded content
========================================================================== */
/**
* Remove the border on images inside links in IE 10.
*/
img {
border-style: none;
}
/* Forms
========================================================================== */
/**
* 1. Change the font styles in all browsers.
* 2. Remove the margin in Firefox and Safari.
*/
button,
input,
optgroup,
select,
textarea {
font-family: inherit; /* 1 */
font-size: 100%; /* 1 */
line-height: 1.15; /* 1 */
margin: 0; /* 2 */
}
/**
* Show the overflow in IE.
* 1. Show the overflow in Edge.
*/
button,
input { /* 1 */
overflow: visible;
}
/**
* Remove the inheritance of text transform in Edge, Firefox, and IE.
* 1. Remove the inheritance of text transform in Firefox.
*/
button,
select { /* 1 */
text-transform: none;
}
/**
* Correct the inability to style clickable types in iOS and Safari.
*/
button,
[type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button;
}
/**
* Remove the inner border and padding in Firefox.
*/
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
border-style: none;
padding: 0;
}
/**
* Restore the focus styles unset by the previous rule.
*/
button:-moz-focusring,
[type="button"]:-moz-focusring,
[type="reset"]:-moz-focusring,
[type="submit"]:-moz-focusring {
outline: 1px dotted ButtonText;
}
/**
* Correct the padding in Firefox.
*/
fieldset {
padding: 0.35em 0.75em 0.625em;
}
/**
* 1. Correct the text wrapping in Edge and IE.
* 2. Correct the color inheritance from `fieldset` elements in IE.
* 3. Remove the padding so developers are not caught out when they zero out
* `fieldset` elements in all browsers.
*/
legend {
box-sizing: border-box; /* 1 */
color: inherit; /* 2 */
display: table; /* 1 */
max-width: 100%; /* 1 */
padding: 0; /* 3 */
white-space: normal; /* 1 */
}
/**
* Add the correct vertical alignment in Chrome, Firefox, and Opera.
*/
progress {
vertical-align: baseline;
}
/**
* Remove the default vertical scrollbar in IE 10+.
*/
textarea {
overflow: auto;
}
/**
* 1. Add the correct box sizing in IE 10.
* 2. Remove the padding in IE 10.
*/
[type="checkbox"],
[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Correct the cursor style of increment and decrement buttons in Chrome.
*/
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Correct the odd appearance in Chrome and Safari.
* 2. Correct the outline style in Safari.
*/
[type="search"] {
-webkit-appearance: textfield; /* 1 */
outline-offset: -2px; /* 2 */
}
/**
* Remove the inner padding in Chrome and Safari on macOS.
*/
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* 1. Correct the inability to style clickable types in iOS and Safari.
* 2. Change font properties to `inherit` in Safari.
*/
::-webkit-file-upload-button {
-webkit-appearance: button; /* 1 */
font: inherit; /* 2 */
}
/* Interactive
========================================================================== */
/*
* Add the correct display in Edge, IE 10+, and Firefox.
*/
details {
display: block;
}
/*
* Add the correct display in all browsers.
*/
summary {
display: list-item;
}
/* Misc
========================================================================== */
/**
* Add the correct display in IE 10+.
*/
template {
display: none;
}
/**
* Add the correct display in IE 10.
*/
[hidden] {
display: none;
}
-231
View File
@@ -1,231 +0,0 @@
/* modified https://github.com/oxalorg/sakura */
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #c9c9c9;
background-color: #222222;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0px;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: #ffffff;
}
a {
text-decoration: none;
color: #ffffff;
}
a:visited {
color: #e6e6e6;
}
a:hover {
color: #c9c9c9;
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0px;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0px;
margin-right: 0px;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid #ffffff;
margin-bottom: 2.5rem;
background-color: #4a4a4a;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0px;
margin-bottom: 2.5rem;
}
pre {
background-color: #4a4a4a;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0px;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #4a4a4a;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #4a4a4a;
}
input,
textarea {
border: 1px solid #c9c9c9;
}
input:focus,
textarea:focus {
border: 1px solid #ffffff;
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: #ffffff;
color: #222222;
border-radius: 1px;
border: 1px solid #ffffff;
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: #c9c9c9;
color: #222222;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #c9c9c9;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #4a4a4a;
border: 1px solid #4a4a4a;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid #ffffff;
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted #ffffff;
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
-237
View File
@@ -1,237 +0,0 @@
/* modified https://github.com/oxalorg/sakura */
:root {
--accent-color: #4a4a4a;
--accent-color-hover: #5a5a5a;
--link-color: #58739c;
--link-visted-color: #6f5e6f;
}
html {
font-size: 62.5%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
}
body {
font-size: 1.8rem;
line-height: 1.618;
max-width: 38em;
margin: auto;
color: #4a4a4a;
background-color: #f9f9f9;
padding: 13px;
}
@media (max-width: 684px) {
body {
font-size: 1.53rem;
}
}
@media (max-width: 382px) {
body {
font-size: 1.35rem;
}
}
h1,
h2,
h3,
h4,
h5,
h6 {
line-height: 1.1;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, "Noto Sans", sans-serif;
font-weight: 700;
margin-top: 3rem;
margin-bottom: 1.5rem;
overflow-wrap: break-word;
word-wrap: break-word;
-ms-word-break: break-all;
word-break: break-word;
}
h1 {
font-size: 2.35em;
}
h2 {
font-size: 2em;
}
h3 {
font-size: 1.75em;
}
h4 {
font-size: 1.5em;
}
h5 {
font-size: 1.25em;
}
h6 {
font-size: 1em;
}
p {
margin-top: 0;
margin-bottom: 2.5rem;
}
small,
sub,
sup {
font-size: 75%;
}
hr {
border-color: var(--accent-color);
}
a {
text-decoration: none;
color: var(--link-color);
}
a:visited {
color: var(--link-visted-color);
}
a:hover {
color: var(--accent-color-hover);
text-decoration: underline;
}
ul {
padding-left: 1.4em;
margin-top: 0;
margin-bottom: 2.5rem;
}
li {
margin-bottom: 0.4em;
}
blockquote {
margin-left: 0;
margin-right: 0;
padding-left: 1em;
padding-top: 0.8em;
padding-bottom: 0.8em;
padding-right: 0.8em;
border-left: 5px solid var(--accent-color);
margin-bottom: 2.5rem;
background-color: #f1f1f1;
}
blockquote p {
margin-bottom: 0;
}
img,
video {
height: auto;
max-width: 100%;
margin-top: 0;
margin-bottom: 2.5rem;
}
pre {
background-color: #f1f1f1;
display: block;
padding: 1em;
overflow-x: auto;
margin-top: 0;
margin-bottom: 2.5rem;
font-size: 0.9em;
}
code,
kbd,
samp {
font-size: 0.9em;
padding: 0 0.5em;
background-color: #f1f1f1;
white-space: pre-wrap;
}
pre > code {
padding: 0;
background-color: transparent;
white-space: pre;
font-size: 1em;
}
table {
text-align: justify;
width: 100%;
border-collapse: collapse;
margin-bottom: 2rem;
}
td,
th {
padding: 0.5em;
border-bottom: 1px solid #f1f1f1;
}
input,
textarea {
border: 1px solid #4a4a4a;
}
input:focus,
textarea:focus {
border: 1px solid var(--accent-color);
}
textarea {
width: 100%;
}
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"],
input[type="file"]::file-selector-button {
display: inline-block;
padding: 5px 10px;
text-align: center;
text-decoration: none;
white-space: nowrap;
background-color: var(--accent-color);
color: #f9f9f9;
border-radius: 2px;
border: 1px solid var(--accent-color);
cursor: pointer;
box-sizing: border-box;
}
.button[disabled],
button[disabled],
input[type="submit"][disabled],
input[type="reset"][disabled],
input[type="button"][disabled],
input[type="file"][disabled] {
cursor: default;
opacity: 0.5;
}
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
input[type="file"]::file-selector-button:hover {
background-color: var(--accent-color-hover);
color: #f9f9f9;
outline: 0;
}
.button:focus-visible,
button:focus-visible,
input[type="submit"]:focus-visible,
input[type="reset"]:focus-visible,
input[type="button"]:focus-visible,
input[type="file"]::file-selector-button:focus-visible {
outline-style: solid;
outline-width: 2px;
}
textarea,
select,
input {
color: #4a4a4a;
padding: 6px 10px;
margin-bottom: 10px;
background-color: #f1f1f1;
border: 1px solid #f1f1f1;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
}
textarea:focus,
select:focus,
input:focus {
border: 1px solid var(--accent-color);
outline: 0;
}
input[type="checkbox"]:focus {
outline: 1px dotted var(--accent-color);
}
label,
legend,
fieldset {
display: block;
margin-bottom: 0.5rem;
font-weight: 600;
}
-121
View File
@@ -1,121 +0,0 @@
importScripts(
"https://cdn.jsdelivr.net/npm/hash-wasm@4.11.0/dist/argon2.umd.min.js"
);
let active = false;
let nonce = 0;
let signature = "";
let lastNotify = 0;
let hashesSinceLastNotify = 0;
let params = {
salt: null,
hashLength: 0,
iterations: 0,
memorySize: 0,
parallelism: 0,
targetValue: BigInt(0),
safariFix: false,
};
self.onmessage = async (event) => {
const { data } = event;
switch (data.type) {
case "stop":
active = false;
self.postMessage({ type: "paused", hashes: hashesSinceLastNotify });
return;
case "start":
active = true;
signature = data.signature;
nonce = data.nonce;
const c = data.challenge;
// decode salt to Uint8Array
const salt = new Uint8Array(c.s.length / 2);
for (let i = 0; i < c.s.length; i += 2) {
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
}
params = {
salt: salt,
hashLength: c.hl,
iterations: c.t,
memorySize: c.m,
parallelism: c.p,
targetValue: BigInt(c.d.slice(0, -1)),
safariFix: data.isMobileWebkit,
};
console.log("Started", params);
self.postMessage({ type: "started" });
setTimeout(solve, 0);
break;
}
};
const doHash = async (password) => {
const { salt, hashLength, iterations, memorySize, parallelism } = params;
return await self.hashwasm.argon2id({
password,
salt,
hashLength,
iterations,
memorySize,
parallelism,
});
};
const checkHash = (hash) => {
const { targetValue } = params;
const hashValue = BigInt(`0x${hash}`);
return hashValue <= targetValue;
};
const solve = async () => {
if (!active) {
console.log("Stopped solver", nonce);
return;
}
// Safari WASM doesn't like multiple calls in one worker
const batchSize = 1;
const batch = [];
for (let i = 0; i < batchSize; i++) {
batch.push(nonce++);
}
try {
const results = await Promise.all(
batch.map(async (nonce) => {
const hash = await doHash(String(nonce));
return { hash, nonce };
})
);
hashesSinceLastNotify += batchSize;
const solution = results.find(({ hash }) => checkHash(hash));
if (solution) {
console.log("Solution found", solution, params.salt);
self.postMessage({ type: "solved", nonce: solution.nonce });
active = false;
} else {
if (Date.now() - lastNotify > 1000) {
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
lastNotify = Date.now();
hashesSinceLastNotify = 0;
}
setTimeout(solve, 10);
}
} catch (error) {
console.error("Error", error);
const stack = error.stack;
const debug = {
stack,
lastNonce: nonce,
targetValue: params.targetValue,
};
self.postMessage({ type: "error", error: error.message, debug });
active = false;
}
};
-39
View File
@@ -1,39 +0,0 @@
import Database from "better-sqlite3";
import { DATABASE_VERSION, migrateDatabase } from "../src/shared/database";
import { logger } from "../src/logger";
import { config } from "../src/config";
const log = logger.child({ module: "scripts/migrate" });
async function runMigration() {
let targetVersion = Number(process.argv[2]) || undefined;
if (!targetVersion) {
log.info("Enter target version or leave empty to use the latest version.");
process.stdin.resume();
process.stdin.setEncoding("utf8");
const input = await new Promise<string>((resolve) => {
process.stdin.on("data", (text) => {
resolve((String(text) || "").trim());
});
});
process.stdin.pause();
targetVersion = Number(input);
if (!targetVersion) {
targetVersion = DATABASE_VERSION;
}
}
const db = new Database(config.sqliteDataPath, {
verbose: (msg, ...args) => log.debug({ args }, String(msg)),
});
const currentVersion = db.pragma("user_version", { simple: true });
log.info({ currentVersion, targetVersion }, "Running migrations.");
migrateDatabase(targetVersion, db);
}
runMigration().catch((error) => {
log.error(error, "Migration failed.");
process.exit(1);
});
-33
View File
@@ -230,39 +230,6 @@ Content-Type: application/json
]
}
###
# @name Proxy / GCP Claude -- Native Completion
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
-102
View File
@@ -1,102 +0,0 @@
import Database from "better-sqlite3";
import { v4 as uuidv4 } from "uuid";
import { config } from "../src/config";
function generateRandomIP() {
return (
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255) +
"." +
Math.floor(Math.random() * 255)
);
}
function generateRandomDate() {
const end = new Date();
const start = new Date(end);
start.setDate(end.getDate() - 90);
const randomDate = new Date(
start.getTime() + Math.random() * (end.getTime() - start.getTime())
);
return randomDate.toISOString();
}
function generateMockSHA256() {
const characters = 'abcdef0123456789';
let hash = '';
for (let i = 0; i < 64; i++) {
const randomIndex = Math.floor(Math.random() * characters.length);
hash += characters[randomIndex];
}
return hash;
}
function getRandomModelFamily() {
const modelFamilies = [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"aws-claude-opus",
"gcp-claude",
"gcp-claude-opus",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"dall-e",
"azure-dall-e",
];
return modelFamilies[Math.floor(Math.random() * modelFamilies.length)];
}
(async () => {
const db = new Database(config.sqliteDataPath);
const numRows = 100;
const insertStatement = db.prepare(`
INSERT INTO events (type, ip, date, model, family, hashes, userToken, inputTokens, outputTokens)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`);
const users = Array.from({ length: 10 }, () => uuidv4());
function getRandomUser() {
return users[Math.floor(Math.random() * users.length)];
}
const transaction = db.transaction(() => {
for (let i = 0; i < numRows; i++) {
insertStatement.run(
"chat_completion",
generateRandomIP(),
generateRandomDate(),
getRandomModelFamily() + "-" + Math.floor(Math.random() * 100),
getRandomModelFamily(),
Array.from(
{ length: Math.floor(Math.random() * 10) },
generateMockSHA256
).join(","),
getRandomUser(),
Math.floor(Math.random() * 500),
Math.floor(Math.random() * 6000)
);
}
});
transaction();
console.log(`Inserted ${numRows} rows into the events table.`);
db.close();
})();
-118
View File
@@ -1,118 +0,0 @@
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
import axios from "axios";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
// Copied from amazon bedrock docs
// List models
// ListFoundationModels
// Service: Amazon Bedrock
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
// Bedrock User Guide.
// Request Syntax
// GET /foundation-models?
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
// HTTP/1.1
// URI Request Parameters
// The request uses the following URI parameters.
// byCustomizationType (p. 38)
// List by customization type.
// Valid Values: FINE_TUNING
// byInferenceType (p. 38)
// List by inference type.
// Valid Values: ON_DEMAND | PROVISIONED
// byOutputModality (p. 38)
// List by output modality type.
// Valid Values: TEXT | IMAGE | EMBEDDING
// byProvider (p. 38)
// A Bedrock model provider.
// Pattern: ^[a-z0-9-]{1,63}$
// Request Body
// The request does not have a request body
// Run inference on a text model
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
// parameter to accept any content type in the response.
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
// -H accept: */*
// -H content-type: application/json
// Payload
// {"inputText": "Hello world"}
// Example response
// Response for the above request.
// -H content-type: application/json
// Payload
// <the model response>
const AMZ_REGION = "us-east-1";
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
async function listModels() {
const httpRequest = new HttpRequest({
method: "GET",
protocol: "https:",
hostname: AMZ_HOST,
path: "/foundation-models",
headers: { ["Host"]: AMZ_HOST },
});
const signedRequest = await signRequest(httpRequest);
const response = await axios.get(
`https://${signedRequest.hostname}${signedRequest.path}`,
{ headers: signedRequest.headers }
);
console.log(response.data);
}
async function invokeModel() {
const model = "anthropic.claude-v1";
const httpRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: AMZ_HOST,
path: `/model/${model}/invoke`,
headers: {
["Host"]: AMZ_HOST,
["accept"]: "*/*",
["content-type"]: "application/json",
},
body: JSON.stringify({
temperature: 0.5,
prompt: "\n\nHuman:Hello world\n\nAssistant:",
max_tokens_to_sample: 10,
}),
});
console.log("httpRequest", httpRequest);
const signedRequest = await signRequest(httpRequest);
const response = await axios.post(
`https://${signedRequest.hostname}${signedRequest.path}`,
signedRequest.body,
{ headers: signedRequest.headers }
);
console.log(response.status);
console.log(response.headers);
console.log(response.data);
console.log("full url", response.request.res.responseUrl);
}
async function signRequest(request: HttpRequest) {
const signer = new SignatureV4({
sha256: Sha256,
credentials: {
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY,
},
region: AMZ_REGION,
service: "bedrock",
});
return await signer.sign(request, { signingDate: new Date() });
}
// listModels();
// invokeModel();
-49
View File
@@ -1,49 +0,0 @@
import { Router } from "express";
import { z } from "zod";
import { encodeCursor, decodeCursor } from "../../shared/utils";
import { eventsRepo } from "../../shared/database/repos/event";
const router = Router();
/**
* Returns events for the given user token.
* GET /admin/events/:token
* @query first - The number of events to return.
* @query after - The cursor to start returning events from (exclusive).
*/
router.get("/:token", (req, res) => {
const schema = z.object({
token: z.string(),
first: z.coerce.number().int().positive().max(200).default(25),
after: z
.string()
.optional()
.transform((v) => {
try {
return decodeCursor(v);
} catch {
return null;
}
})
.nullable(),
sort: z.string().optional(),
});
const args = schema.safeParse({ ...req.params, ...req.query });
if (!args.success) {
return res.status(400).json({ error: args.error });
}
const data = eventsRepo
.getUserEvents(args.data.token, {
limit: args.data.first,
cursor: args.data.after,
})
.map((e) => ({ node: e, cursor: encodeCursor(e.date) }));
res.json({
data,
endCursor: data[data.length - 1]?.cursor,
});
});
export { router as eventsApiRouter };
+4 -19
View File
@@ -1,31 +1,17 @@
import express, { Router } from "express";
import { createWhitelistMiddleware } from "../shared/cidr";
import { authorize } from "./auth";
import { HttpError } from "../shared/errors";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { injectLocals } from "../shared/inject-locals";
import { withSession } from "../shared/with-session";
import { config } from "../config";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { renderPage } from "../info-page";
import { buildInfo } from "../service-info";
import { authorize } from "./auth";
import { loginRouter } from "./login";
import { eventsApiRouter } from "./api/events";
import { usersApiRouter } from "./api/users";
import { usersApiRouter as apiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
import { logger } from "../logger";
const adminRouter = Router();
const whitelist = createWhitelistMiddleware(
"ADMIN_WHITELIST",
config.adminWhitelist
);
if (!whitelist.ranges.length && config.adminKey?.length) {
logger.error("ADMIN_WHITELIST is empty. No admin requests will be allowed. Set 0.0.0.0/0 to allow all.");
}
adminRouter.use(whitelist);
adminRouter.use(
express.json({ limit: "20mb" }),
express.urlencoded({ extended: true, limit: "20mb" })
@@ -33,8 +19,7 @@ adminRouter.use(
adminRouter.use(withSession);
adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
+6 -194
View File
@@ -1,5 +1,4 @@
import { Router } from "express";
import ipaddr from "ipaddr.js";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
@@ -16,8 +15,6 @@ import {
UserTokenCounts,
} from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
import { invalidatePowHmacKey } from "../../user/web/pow-captcha";
const router = Router();
@@ -43,74 +40,6 @@ router.get("/create-user", (req, res) => {
});
});
router.get("/anti-abuse", (_req, res) => {
const wl = [...whitelists.entries()];
const bl = [...blacklists.entries()];
res.render("admin_anti-abuse", {
captchaMode: config.captchaMode,
difficulty: config.powDifficultyLevel,
whitelists: wl.map((w) => ({
name: w[0],
mode: "whitelist",
ranges: w[1].ranges,
})),
blacklists: bl.map((b) => ({
name: b[0],
mode: "blacklist",
ranges: b[1].ranges,
})),
});
});
router.post("/cidr", (req, res) => {
const body = req.body;
const valid = z
.object({
action: z.enum(["add", "remove"]),
mode: z.enum(["whitelist", "blacklist"]),
name: z.string().min(1),
mask: z.string().min(1),
})
.safeParse(body);
if (!valid.success) {
throw new HttpError(
400,
valid.error.issues.flatMap((issue) => issue.message).join(", ")
);
}
const { mode, name, mask } = valid.data;
const list = (mode === "whitelist" ? whitelists : blacklists).get(name);
if (!list) {
throw new HttpError(404, "List not found");
}
if (valid.data.action === "remove") {
const newRanges = new Set(list.ranges);
newRanges.delete(mask);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
} else if (valid.data.action === "add") {
const result = parseCidrs(mask);
if (result.length === 0) {
throw new HttpError(400, "Invalid CIDR mask");
}
const newRanges = new Set([...list.ranges, mask]);
list.updateRanges([...newRanges]);
req.session.flash = {
type: "success",
message: `${mode} ${name} updated`,
};
return res.redirect("/admin/manage/anti-abuse");
}
});
router.post("/create-user", (req, res) => {
const body = req.body;
@@ -268,13 +197,7 @@ router.post("/maintenance", (req, res) => {
let flash = { type: "", message: "" };
switch (action) {
case "recheck": {
const checkable: LLMService[] = [
"openai",
"anthropic",
"aws",
"gcp",
"azure",
];
const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
checkable.forEach((s) => keyPool.recheck(s));
const keyCount = keyPool
.list()
@@ -300,14 +223,10 @@ router.post("/maintenance", (req, res) => {
break;
}
case "downloadImageMetadata": {
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
generations: getLastNImages(),
},
null,
2
);
const data = JSON.stringify({
exportedAt: new Date().toISOString(),
generations: getLastNImages()
}, null, 2);
res.setHeader(
"Content-Disposition",
`attachment; filename=image-metadata-${new Date().toISOString()}.json`
@@ -315,121 +234,14 @@ router.post("/maintenance", (req, res) => {
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
case "expireTempTokens": {
const users = userStore.getUsers();
const temps = users.filter((u) => u.type === "temporary");
temps.forEach((user) => {
user.expiresAt = Date.now();
user.disabledReason = "Admin forced expiration.";
userStore.upsertUser(user);
});
invalidatePowHmacKey();
flash.type = "success";
flash.message = `${temps.length} temporary users marked for expiration.`;
break;
}
case "cleanTempTokens": {
const users = userStore.getUsers();
const disabledTempUsers = users.filter(
(u) => u.type === "temporary" && u.expiresAt && u.expiresAt < Date.now()
);
disabledTempUsers.forEach((user) => {
user.disabledAt = 1; //will be cleaned up by the next cron job
userStore.upsertUser(user);
});
flash.type = "success";
flash.message = `${disabledTempUsers.length} disabled temporary users marked for cleanup.`;
break;
}
case "setDifficulty": {
const selected = req.body["pow-difficulty"];
const valid = ["low", "medium", "high", "extreme"];
if (!selected || !valid.includes(selected)) {
throw new HttpError(400, "Invalid difficulty" + selected);
}
config.powDifficultyLevel = selected;
break;
}
case "generateTempIpReport": {
const tempUsers = userStore
.getUsers()
.filter((u) => u.type === "temporary");
const ipv4RangeMap = new Map<string, Set<string>>();
const ipv6RangeMap = new Map<string, Set<string>>();
tempUsers.forEach((u) => {
u.ip.forEach((ip) => {
try {
const parsed = ipaddr.parse(ip);
if (parsed.kind() === "ipv4") {
const subnet =
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
".0/24";
const userSet = ipv4RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv4RangeMap.set(subnet, userSet);
} else if (parsed.kind() === "ipv6") {
const subnet =
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
"::/48";
const userSet = ipv6RangeMap.get(subnet) || new Set();
userSet.add(u.token);
ipv6RangeMap.set(subnet, userSet);
}
} catch (e) {
req.log.warn(
{ ip, error: e.message },
"Invalid IP address; skipping"
);
}
});
});
const ipv4Ranges = Array.from(ipv4RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => b.distinctTokens - a.distinctTokens);
const ipv6Ranges = Array.from(ipv6RangeMap.entries())
.map(([subnet, userSet]) => ({
subnet,
distinctTokens: userSet.size,
}))
.sort((a, b) => {
if (a.distinctTokens === b.distinctTokens) {
return a.subnet.localeCompare(b.subnet);
}
return b.distinctTokens - a.distinctTokens;
});
const data = JSON.stringify(
{
exportedAt: new Date().toISOString(),
ipv4Ranges,
ipv6Ranges,
},
null,
2
);
res.setHeader(
"Content-Disposition",
`attachment; filename=temp-ip-report-${new Date().toISOString()}.json`
);
res.setHeader("Content-Type", "application/json");
return res.send(data);
}
default: {
throw new HttpError(400, "Invalid action");
}
}
req.session.flash = flash;
const referer = req.get("referer");
return res.redirect(referer || "/admin/manage");
return res.redirect(`/admin/manage`);
});
router.get("/download-stats", (_req, res) => {
-140
View File
@@ -1,140 +0,0 @@
<%- include("partials/shared_header", { title: "Proof of Work Verification Settings - OAI Reverse Proxy Admin" }) %>
<style>
details {
margin-top: 1em;
}
details summary {
font-weight: bold;
cursor: pointer;
}
details p {
margin-left: 1em;
}
#token-manage {
display: flex;
width: 100%;
}
#token-manage button {
flex-grow: 1;
margin: 0 0.5em;
}
</style>
<h1>Abuse Mitigation Settings</h1>
<div>
<h2>Proof-of-Work Verification</h2>
<p>
The Proof-of-Work difficulty level is used to determine how much work a client must perform to earn a temporary user
token. Higher difficulty levels require more work, which can help mitigate abuse by making it more expensive for
attackers to generate tokens. However, higher difficulty levels can also make it more difficult for legitimate users
to generate tokens. Refer to documentation for guidance.
</p>
<%if (captchaMode === "none") { %>
<p>
<strong>PoW verification is not enabled. Set <code>CAPTCHA_MODE=proof_of_work</code> to enable.</strong>
</p>
<% } else { %>
<h3>Difficulty Level</h3>
<div>
<label for="difficulty">Difficulty Level:</label>
<span id="currentDifficulty">Current: <%= difficulty %></span>
<select name="difficulty" id="difficulty">
<option value="low">Low</option>
<option value="medium">Medium</option>
<option value="high">High</option>
<option value="extreme">Extreme</option>
</select>
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
</div>
<% } %>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<input id="hiddenDifficulty" type="hidden" name="pow-difficulty" value="" />
</form>
<h3>Manage Temporary User Tokens</h3>
<div id="token-manage">
<p><button onclick='doAction("expireTempTokens")'>🕒 Expire All Temp Tokens</button></p>
<p><button onclick='doAction("cleanTempTokens")'>🧹 Delete Expired Temp Tokens</button></p>
<p><button onclick='doAction("generateTempIpReport")'>📊 Generate Temp Token IP Report</button></p>
</div>
</div>
<div>
<h2>IP Whitelists and Blacklists</h2>
<p>
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Note that changes here are not
persisted across server restarts. If you want to make changes permanent, you can copy the values to your deployment
configuration.
</p>
<p>
Entries can be specified as single addresses or
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
supported but not recommended for use with the current version of the proxy.
</p>
<% for (let i = 0; i < whitelists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
<% } %>
<% for (let i = 0; i < blacklists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: blacklists[i] }) %>
<% } %>
<form action="/admin/manage/cidr" method="post" id="cidrForm">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input type="hidden" name="action" value="add" />
<input type="hidden" name="name" value="" />
<input type="hidden" name="mode" value="" />
<input type="hidden" name="mask" value="" />
</form>
<details>
<summary>Copy environment variables</summary>
<p>
If you have made changes with the UI, you can copy the values below to your deployment configuration to persist
them across server restarts.
</p>
<pre>
<% for (let i = 0; i < whitelists.length; i++) { %><%= whitelists[i].name %>=<%= whitelists[i].ranges.join(",") %><% } %>
<% for (let i = 0; i < blacklists.length; i++) { %><%= blacklists[i].name %>=<%= blacklists[i].ranges.join(",") %><% } %>
</pre>
</details>
</div>
<script>
function doAction(action) {
document.getElementById("hiddenAction").value = action;
if (action === "setDifficulty") {
document.getElementById("hiddenDifficulty").value = document.getElementById("difficulty").value;
}
document.getElementById("maintenanceForm").submit();
}
function onAddCidr(event) {
const list = event.target.dataset;
const newMask = prompt("Enter the IP or CIDR range to add to the list:");
if (!newMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "add";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = newMask;
form.submit();
}
function onRemoveCidr(event) {
const list = event.target.dataset;
const removeMask = event.target.dataset.mask;
if (!removeMask) {
return;
}
const form = document.getElementById("cidrForm");
form["action"].value = "remove";
form["name"].value = list.name;
form["mode"].value = list.mode;
form["mask"].value = removeMask;
form.submit();
}
</script>
<%- include("partials/admin-footer") %>
+3 -2
View File
@@ -51,8 +51,9 @@
<legend>Temporary User Options</legend>
<div class="temporary-user-fieldset">
<p class="full-width">
Temporary users will be disabled after the specified duration, and their records will be permanently deleted after some time.
These options apply only to new temporary users; existing ones use whatever options were in effect when they were created.
Temporary users will be disabled after the specified duration, and their records will be deleted 72 hours after that.
These options apply only to new
temporary users; existing ones use whatever options were in effect when they were created.
</p>
<label for="temporaryUserDuration" class="full-width">Access duration (in minutes)</label>
<input type="number" name="temporaryUserDuration" id="temporaryUserDuration" value="60" class="full-width" />
+36 -27
View File
@@ -5,6 +5,18 @@
flex-direction: column;
}
#statsForm div {
display: flex;
flex-direction: row;
margin-bottom: 0.5em;
}
#statsForm div label {
width: 6em;
text-align: right;
margin-right: 1em;
}
#statsForm ul {
margin: 0;
padding-left: 2em;
@@ -21,17 +33,17 @@
}
</style>
<h1>Download Stats</h1>
<p>Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.</p>
<p>
Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.
</p>
<div>
<h3>Options</h3>
<form
id="statsForm"
action="/admin/manage/generate-stats"
method="post"
style="display: flex; flex-direction: column">
<form id="statsForm" action="/admin/manage/generate-stats" method="post"
style="display: flex; flex-direction: column;">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<div>
<label for="anon"><input id="anon" type="checkbox" name="anon" value="true" /> <span>Anonymize</span></label>
<label for="anon">Anonymize</label>
<input id="anon" type="checkbox" name="anon" value="true" />
</div>
<div>
<label for="sort">Sort</label>
@@ -52,12 +64,11 @@
</select>
</div>
<div>
<label for="format">Custom Format</label>
<ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul>
<label for="format">Custom Format <ul>
<li><code>{{header}}</code></li>
<li><code>{{stats}}</code></li>
<li><code>{{time}}</code></li>
</ul></label>
<textarea id="format" name="format" rows="10" cols="50" placeholder="{{stats}}">
# Stats
{{header}}
@@ -104,35 +115,33 @@
loadDefaults();
async function fetchAndCopy() {
const form = document.getElementById("statsForm");
const form = document.getElementById('statsForm');
const formData = new FormData(form);
const response = await fetch(form.action, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
credentials: "same-origin",
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
credentials: 'same-origin',
body: new URLSearchParams(formData),
});
if (response.ok) {
const content = await response.text();
copyToClipboard(content);
} else {
throw new Error("Failed to fetch generated stats. Try reloading the page.");
throw new Error('Failed to fetch generated stats. Try reloading the page.');
}
}
function copyToClipboard(text) {
navigator.clipboard
.writeText(text)
.then(() => {
alert("Copied to clipboard");
})
.catch((err) => {
alert("Failed to copy to clipboard. Try downloading the file instead.");
});
navigator.clipboard.writeText(text).then(() => {
alert('Copied to clipboard');
}).catch(err => {
alert('Failed to copy to clipboard. Try downloading the file instead.');
});
}
document.getElementById("copyButton").addEventListener("click", fetchAndCopy);
document.getElementById('copyButton').addEventListener('click', fetchAndCopy);
</script>
<%- include("partials/admin-footer") %>
+2 -3
View File
@@ -25,14 +25,13 @@
<li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li>
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
<li><a href="/admin/manage/anti-abuse">Abuse Mitigation Settings</a></li>
<li><a href="/admin/service-info">Service Info</a></li>
</ul>
<h3>Maintenance</h3>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input id="hiddenAction" type="hidden" name="action" value="" />
<div>
<div display="flex" flex-direction="column">
<fieldset>
<legend>Key Recheck</legend>
<button id="recheck-keys" type="button" onclick="submitForm('recheck')">Force Key Recheck</button>
@@ -43,7 +42,7 @@
<legend>Bulk Quota Management</legend>
<p>
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
Immediately refreshes all users' quotas by the configured amounts.
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
</p>
<p>
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
+3 -2
View File
@@ -4,8 +4,9 @@
<% if (users.length === 0) { %>
<p>No users found.</p>
<% } else { %>
<label for="toggle-nicknames"><input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" /> Show Nicknames</label>
<table class="striped full-width">
<input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
<label for="toggle-nicknames">Show Nicknames</label>
<table class="striped">
<thead>
<tr>
<th>User</th>
+14 -33
View File
@@ -55,9 +55,8 @@
<td><%- user.disabledReason %></td>
<% if (user.disabledAt) { %>
<td class="actions">
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason" data-token="<%= user.token %>"
>✏️</a
>
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason"
data-token="<%= user.token %>">✏️</a>
</td>
<% } %>
</tr>
@@ -73,8 +72,7 @@
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: false }) %></td>
</tr>
<tr>
<th scope="row">
Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
<th scope="row">Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
</th>
<td><%- user.adminNote ?? "none" %></td>
<td class="actions">
@@ -87,24 +85,14 @@
<td colspan="2"><%- user.expiresAt %></td>
</tr>
<% } %>
<% if (user.meta) { %>
<tr>
<th scope="row">Meta</th>
<td colspan="2"><%- JSON.stringify(user.meta) %></td>
</tr>
<% } %>
</tbody>
</table>
<form style="display: none" id="current-values">
<form style="display:none" id="current-values">
<input type="hidden" name="token" value="<%- user.token %>" />
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
<% }); %>
<!-- tokenRefresh_ keys are dynamically generated -->
<% Object.entries(quota).forEach(([family]) => { %>
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
<% }); %>
</form>
<h3>Quota Information</h3>
@@ -114,8 +102,7 @@
<input type="hidden" name="_csrf" value="<%- csrfToken %>" />
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
</form>
<% } %>
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
<% } %> <%- include("partials/shared_quota-info", { quota, user }) %>
<p><a href="/admin/manage/list-users">Back to User List</a></p>
@@ -126,25 +113,18 @@
const token = a.dataset.token;
const field = a.dataset.field;
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
let value = prompt(`Enter new value for '${field}':`, existingValue);
let value = prompt(`Enter new value for '${field}'':`, existingValue);
if (value !== null) {
if (value === "") {
value = null;
}
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
if (field.startsWith("tokenRefresh_")) {
const family = field.slice("tokenRefresh_".length);
payload.tokenRefresh = { [family]: Number(value) };
} else {
payload[field] = value;
}
fetch(`/admin/manage/edit-user/${token}`, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify(payload),
body: JSON.stringify({
[field]: value,
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
}),
headers: { "Content-Type": "application/json", Accept: "application/json" },
})
.then((res) => Promise.all([res.ok, res.json()]))
@@ -152,7 +132,9 @@
const url = new URL(window.location.href);
const params = new URLSearchParams();
if (!ok) {
alert(`Failed to edit user: ${json.message}`);
params.set("flash", `error: ${json.error.message}`);
} else {
params.set("flash", `success: User's ${field} updated.`);
}
url.search = params.toString();
window.location.assign(url);
@@ -162,5 +144,4 @@
});
</script>
<%- include("partials/admin-ban-xhr-script") %>
<%- include("partials/admin-footer") %>
<%- include("partials/admin-ban-xhr-script") %> <%- include("partials/admin-footer") %>
@@ -1,13 +0,0 @@
<h3>
<%= list.name %>
(<%= list.mode %>)
</h3>
<ul>
<% list.ranges.forEach(function(mask) { %>
<li>
<%= mask %>
<button class="remove" data-mode="<%= list.mode %>" data-name="<%= list.name %>" data-mask="<%= mask %>" onclick="onRemoveCidr(event)">Remove</button>
</li>
<% }); %>
</ul>
<button class="add" data-mode="<%= list.mode %>" data-name="<%= list.name %>" onclick="onAddCidr(event)">Add</button>
+33 -238
View File
@@ -1,9 +1,8 @@
import crypto from "crypto";
import dotenv from "dotenv";
import type firebase from "firebase-admin";
import path from "path";
import pino from "pino";
import type { LLMService, ModelFamily } from "./shared/models";
import type { ModelFamily } from "./shared/models";
import { MODEL_FAMILIES } from "./shared/models";
dotenv.config();
@@ -45,13 +44,6 @@ type Config = {
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/
awsCredentials?: string;
/**
* Comma-delimited list of GCP credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and GCP region.
*
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
*/
gcpCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
@@ -115,70 +107,9 @@ type Config = {
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
*/
maxIpsAutoBan: boolean;
/**
* Which captcha verification mode to use. Requires `user_token` gatekeeper.
* Allows users to automatically obtain a token by solving a captcha.
* - `none`: No captcha verification; tokens are issued manually.
* - `proof_of_work`: Users must solve an Argon2 proof of work to obtain a
* temporary usertoken valid for a limited period.
*/
captchaMode: "none" | "proof_of_work";
/**
* Duration (in hours) for which a PoW-issued temporary user token is valid.
*/
powTokenHours: number;
/**
* The maximum number of IPs from which a single temporary user token can be
* used. Upon reaching the limit, the `maxIpsAutoBan` behavior is triggered.
*/
powTokenMaxIps: number;
/**
* Difficulty level for the proof-of-work challenge.
* - `low`: 200 iterations
* - `medium`: 900 iterations
* - `high`: 1900 iterations
* - `extreme`: 4000 iterations
* - `number`: A custom number of iterations to use.
*
* Difficulty level only affects the number of iterations used in the PoW,
* not the complexity of the hash itself. Therefore, the average time-to-solve
* will scale linearly with the number of iterations.
*
* Refer to docs/proof-of-work.md for guidance and hashrate benchmarks.
*/
powDifficultyLevel: "low" | "medium" | "high" | "extreme" | number;
/**
* Duration (in minutes) before a PoW challenge expires. Users' browsers must
* solve the challenge within this time frame or it will be rejected. Should
* be kept somewhat low to prevent abusive clients from working on many
* challenges in parallel, but you may need to increase this value for higher
* difficulty levels or older devices will not be able to solve the challenge
* in time.
*
* Defaults to 30 minutes.
*/
powChallengeTimeout: number;
/**
* Duration (in hours) before expired temporary user tokens are purged from
* the user database. Users can refresh expired tokens by solving a faster PoW
* challenge as long as the original token has not been purged. Once purged,
* the user must solve a full PoW challenge to obtain a new token.
*
* Defaults to 48 hours. At 0, tokens are purged immediately upon expiry.
*/
powTokenPurgeHours: number;
/**
* Maximum number of active temporary user tokens that can be associated with
* a single IP address. Note that this may impact users sending requests from
* hosted AI chat clients such as Agnaistic or RisuAI, as they may share IPs.
*
* When the limit is reached, the oldest token with the same IP will be
* expired. At 0, no limit is enforced. Defaults to 0.
*/
// powMaxTokensPerIp: number;
/** Per-user limit for requests per minute to text and chat models. */
/** Per-IP limit for requests per minute to text and chat models. */
textModelRateLimit: number;
/** Per-user limit for requests per minute to image generation models. */
/** Per-IP limit for requests per minute to image generation models. */
imageModelRateLimit: number;
/**
* For OpenAI, the maximum number of context tokens (prompt + max output) a
@@ -215,38 +146,10 @@ type Config = {
* key and can't attach the policy, you can set this to true.
*/
allowAwsLogging?: boolean;
/**
* Path to the SQLite database file for storing data such as event logs. By
* default, the database will be stored at `data/database.sqlite`.
*
* Ensure target is writable by the server process, and be careful not to
* select a path that is served publicly. The default path is safe.
*/
sqliteDataPath?: string;
/**
* Whether to log events, such as generated completions, to the database.
* Events are associated with IP+user token pairs. If user_token mode is
* disabled, no events will be logged.
*
* Currently there is no pruning mechanism for the events table, so it will
* grow indefinitely. You may want to periodically prune the table manually.
*/
eventLogging?: boolean;
/**
* When hashing prompt histories, how many messages to trim from the end.
* If zero, only the full prompt hash will be stored.
* If greater than zero, for each number N, a hash of the prompt with the
* last N messages removed will be stored.
*
* Experimental function, config may change in future versions.
*/
eventLoggingTrim?: number;
/** Whether prompts and responses should be logged to persistent storage. */
promptLogging?: boolean;
/** Which prompt logging backend to use. */
promptLoggingBackend?: "google_sheets" | "file";
/** Prefix for prompt logging files when using the file backend. */
promptLoggingFilePrefix?: string;
promptLoggingBackend?: "google_sheets";
/** Base64-encoded Google Sheets API key. */
googleSheetsKey?: string;
/** Google Sheets spreadsheet ID. */
@@ -346,38 +249,11 @@ type Config = {
* risk.
*/
allowOpenAIToolUsage?: boolean;
/**
* Which services will accept prompts containing images, for use with
* multimodal models. Users with `special` role are exempt from this
* restriction.
*
* Do not enable this feature for untrusted users, as malicious users could
* send images which violate your provider's terms of service or local laws.
*
* Defaults to no services, meaning image prompts are disabled. Use a comma-
* separated list. Available services are:
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure
*/
allowedVisionServices: LLMService[];
/**
* Allows overriding the default proxy endpoint route. Defaults to /proxy.
* A leading slash is required.
*/
proxyEndpointRoute: string;
/**
* If set, only requests from these IP addresses will be permitted to use the
* admin API and UI. Provide a comma-separated list of IP addresses or CIDR
* ranges. If not set, the admin API and UI will be open to all requests.
*/
adminWhitelist: string[];
/**
* If set, requests from these IP addresses will be blocked from using the
* application. Provide a comma-separated list of IP addresses or CIDR ranges.
* If not set, no IP addresses will be blocked.
*
* Takes precedence over the adminWhitelist.
*/
ipBlacklist: string[];
};
// To change configs, create a file called .env in the root directory.
@@ -390,27 +266,14 @@ export const config: Config = {
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
sqliteDataPath: getEnvWithDefault(
"SQLITE_DATA_PATH",
path.join(DATA_DIR, "database.sqlite")
),
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
powTokenHours: getEnvWithDefault("POW_TOKEN_HOURS", 24),
powTokenMaxIps: getEnvWithDefault("POW_TOKEN_MAX_IPS", 2),
powDifficultyLevel: getEnvWithDefault("POW_DIFFICULTY_LEVEL", "low"),
powChallengeTimeout: getEnvWithDefault("POW_CHALLENGE_TIMEOUT", 30),
powTokenPurgeHours: getEnvWithDefault("POW_TOKEN_PURGE_HOURS", 48),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", true),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
@@ -428,10 +291,24 @@ export const config: Config = {
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
400
),
allowedModelFamilies: getEnvWithDefault(
"ALLOWED_MODEL_FAMILIES",
getDefaultModelFamilies()
),
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-turbo",
"azure-gpt4-32k",
]),
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE",
@@ -443,10 +320,6 @@ export const config: Config = {
allowAwsLogging: getEnvWithDefault("ALLOW_AWS_LOGGING", false),
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
promptLoggingFilePrefix: getEnvWithDefault(
"PROMPT_LOGGING_FILE_PREFIX",
"prompt-logs"
),
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
googleSheetsSpreadsheetId: getEnvWithDefault(
"GOOGLE_SHEETS_SPREADSHEET_ID",
@@ -475,51 +348,20 @@ export const config: Config = {
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
trustedProxies: getEnvWithDefault("TRUSTED_PROXIES", 1),
allowOpenAIToolUsage: getEnvWithDefault("ALLOW_OPENAI_TOOL_USAGE", false),
allowedVisionServices: parseCsv(
getEnvWithDefault("ALLOWED_VISION_SERVICES", "")
) as LLMService[],
proxyEndpointRoute: getEnvWithDefault("PROXY_ENDPOINT_ROUTE", "/proxy"),
adminWhitelist: parseCsv(
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
),
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
} as const;
function generateSigningKey() {
function generateCookieSecret() {
if (process.env.COOKIE_SECRET !== undefined) {
// legacy, replaced by SIGNING_KEY
return process.env.COOKIE_SECRET;
} else if (process.env.SIGNING_KEY !== undefined) {
return process.env.SIGNING_KEY;
}
const secrets = [
config.adminKey,
config.openaiKey,
config.anthropicKey,
config.googleAIKey,
config.mistralAIKey,
config.awsCredentials,
config.gcpCredentials,
config.azureCredentials,
];
if (secrets.filter((s) => s).length === 0) {
startupLogger.warn(
"No SIGNING_KEY or secrets are set. All sessions, cookies, and proofs of work will be invalidated on restart."
);
return crypto.randomBytes(32).toString("hex");
}
startupLogger.info("No SIGNING_KEY set; one will be generated from secrets.");
startupLogger.info(
"It's recommended to set SIGNING_KEY explicitly to ensure users' sessions and cookies always persist across restarts."
);
const seed = secrets.map((s) => s || "n/a").join("");
const seed = "" + config.adminKey + config.openaiKey + config.anthropicKey;
const crypto = require("crypto");
return crypto.createHash("sha256").update(seed).digest("hex");
}
const signingKey = generateSigningKey();
export const COOKIE_SECRET = signingKey;
export const COOKIE_SECRET = generateCookieSecret();
export async function assertConfigIsValid() {
if (process.env.MODEL_RATE_LIMIT !== undefined) {
@@ -535,23 +377,6 @@ export async function assertConfigIsValid() {
);
}
if (process.env.ALLOW_IMAGE_PROMPTS === "true") {
const hasAllowedServices = config.allowedVisionServices.length > 0;
if (!hasAllowedServices) {
config.allowedVisionServices = ["openai", "anthropic"];
startupLogger.warn(
{ allowedVisionServices: config.allowedVisionServices },
"ALLOW_IMAGE_PROMPTS is deprecated. Use ALLOWED_VISION_SERVICES instead."
);
}
}
if (config.promptLogging && !config.promptLoggingBackend) {
throw new Error(
"Prompt logging is enabled but no backend is configured. Set PROMPT_LOGGING_BACKEND to 'google_sheets' or 'file'."
);
}
if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) {
throw new Error(
`Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.`
@@ -564,35 +389,18 @@ export async function assertConfigIsValid() {
);
}
if (
config.captchaMode === "proof_of_work" &&
config.gatekeeper !== "user_token"
) {
throw new Error(
"Captcha mode 'proof_of_work' requires gatekeeper mode 'user_token'."
);
}
if (config.captchaMode === "proof_of_work") {
const val = config.powDifficultyLevel;
const isDifficulty =
typeof val === "string" &&
["low", "medium", "high", "extreme"].includes(val);
const isIterations =
typeof val === "number" && Number.isInteger(val) && val > 0;
if (!isDifficulty && !isIterations) {
throw new Error(
"Invalid POW_DIFFICULTY_LEVEL. Must be one of: low, medium, high, extreme, or a positive integer."
);
}
}
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
throw new Error(
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
);
}
if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
throw new Error(
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
);
}
if (
config.gatekeeperStore === "firebase_rtdb" &&
(!config.firebaseKey || !config.firebaseRtdbUrl)
@@ -638,7 +446,6 @@ export const OMITTED_KEYS = [
"googleAIKey",
"mistralAIKey",
"awsCredentials",
"gcpCredentials",
"azureCredentials",
"proxyKey",
"adminKey",
@@ -646,13 +453,9 @@ export const OMITTED_KEYS = [
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
"promptLoggingFilePrefix",
"googleSheetsKey",
"firebaseKey",
"firebaseRtdbUrl",
"sqliteDataPath",
"eventLogging",
"eventLoggingTrim",
"gatekeeperStore",
"maxIpsPerUser",
"blockedOrigins",
@@ -666,9 +469,6 @@ export const OMITTED_KEYS = [
"allowedModelFamilies",
"trustedProxies",
"proxyEndpointRoute",
"adminWhitelist",
"ipBlacklist",
"powTokenPurgeHours",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
@@ -729,7 +529,6 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
"ANTHROPIC_KEY",
"GOOGLE_AI_KEY",
"AWS_CREDENTIALS",
"GCP_CREDENTIALS",
"AZURE_CREDENTIALS",
].includes(String(env))
) {
@@ -780,7 +579,3 @@ function parseCsv(val: string): string[] {
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter((f) => !f.includes("dall-e")) as ModelFamily[];
}
+18 -45
View File
@@ -12,40 +12,29 @@ import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
turbo: "GPT-4o Mini / 3.5 Turbo",
turbo: "GPT-3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
gpt4o: "GPT-4o",
"dall-e": "DALL-E",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-flash": "Gemini Flash",
"gemini-pro": "Gemini Pro",
"gemini-ultra": "Gemini Ultra",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mistral Nemo",
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"aws-claude-opus": "AWS Claude (Opus)",
"aws-mistral-tiny": "AWS Mistral 7B",
"aws-mistral-small": "AWS Mistral Nemo",
"aws-mistral-medium": "AWS Mistral Medium",
"aws-mistral-large": "AWS Mistral Large",
"gcp-claude": "GCP Claude (Sonnet)",
"gcp-claude-opus": "GCP Claude (Opus)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-gpt4o": "Azure GPT-4o",
"azure-dall-e": "Azure DALL-E",
};
const converter = new showdown.Converter();
const customGreeting = fs.existsSync("greeting.md")
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
: "";
let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0;
@@ -71,42 +60,36 @@ export function renderPage(info: ServiceInfo) {
const title = getServerTitle();
const headerHtml = buildInfoPageHeader(info);
return `<!doctype html>
return `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
<style>
body {
font-family: sans-serif;
background-color: #f0f0f0;
padding: 1em;
max-width: 900px;
margin: 0;
}
.self-service-links {
display: flex;
justify-content: center;
margin-bottom: 1em;
padding: 0.5em;
font-size: 0.8em;
}
.self-service-links a {
margin: 0 0.5em;
@media (prefers-color-scheme: dark) {
body {
background-color: #222;
color: #eee;
}
a:link, a:visited {
color: #bbe;
}
}
</style>
</head>
<body>
${headerHtml}
<hr />
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
${getSelfServiceLinks()}
</body>
</html>`;
}
@@ -160,15 +143,7 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
function getSelfServiceLinks() {
if (config.gatekeeper !== "user_token") return "";
const links = [["Check your user token", "/user/lookup"]];
if (config.captchaMode !== "none") {
links.unshift(["Request a user token", "/user/captcha"]);
}
return `<div class="self-service-links">${links
.map(([text, link]) => `<a target="_blank" href="${link}">${text}</a>`)
.join(" | ")}</div>`;
return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
}
function getServerTitle() {
@@ -215,7 +190,7 @@ function buildRecentImageSection() {
</div>`;
}
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`
return html;
}
@@ -226,9 +201,7 @@ function escapeHtml(unsafe: string) {
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;")
.replace(/\[/g, "&#91;")
.replace(/]/g, "&#93;");
.replace(/'/g, "&#39;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
-9
View File
@@ -1,9 +0,0 @@
import { NextFunction, Request, Response } from "express";
export function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/") && !req.path.startsWith("/v1beta/")) {
req.url = `/v1${req.url}`;
}
next();
}
+2 -3
View File
@@ -46,7 +46,6 @@ const getModelsResponse = () => {
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-5-sonnet-20240620"
];
const models = claudeVariants.map((id) => ({
@@ -129,7 +128,7 @@ export function transformAnthropicChatResponseToAnthropicText(
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
export function transformAnthropicTextResponseToOpenAI(
function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any>,
req: Request
): Record<string, any> {
@@ -157,7 +156,7 @@ export function transformAnthropicTextResponseToOpenAI(
};
}
export function transformAnthropicChatResponseToOpenAI(
function transformAnthropicChatResponseToOpenAI(
anthropicBody: Record<string, any>
): Record<string, any> {
return {
-253
View File
@@ -1,253 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signAwsRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import {
transformAnthropicChatResponseToAnthropicText,
transformAnthropicChatResponseToOpenAI,
} from "./anthropic";
/** Only used for non-streaming requests. */
const awsResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsClaudeProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError,
},
}),
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
oaiToAwsChatPreprocessor(req, res, next);
} else {
oaiToAwsTextPreprocessor(req, res, next);
}
};
const awsClaudeRouter = Router();
// Native(ish) Anthropic text completion endpoint.
awsClaudeRouter.post(
"/v1/complete",
ipLimiter,
preprocessAwsTextRequest,
awsClaudeProxy
);
// Native Anthropic chat completion endpoint.
awsClaudeRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsClaudeProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsClaudeRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
awsClaudeProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends AWS model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-AWS model name to AWS model ID.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.includes("anthropic.claude")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620-v1:0
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
const match = model.match(pattern);
// If there's no match, fallback to Claude v2 as it is most likely to be
// available on AWS.
if (!match) {
req.body.model = `anthropic.claude-v2:1`;
return;
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, _rev] = match;
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "1":
case "1.0":
req.body.model = "anthropic.claude-v1";
return;
case "2":
case "2.0":
req.body.model = "anthropic.claude-v2";
return;
case "3":
case "3.0":
if (name.includes("opus")) {
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
} else if (name.includes("haiku")) {
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
} else {
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
}
return;
case "3.5":
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
return;
}
// Fallback to Claude 2.1
req.body.model = `anthropic.claude-v2:1`;
return;
}
export const awsClaude = awsClaudeRouter;
-110
View File
@@ -1,110 +0,0 @@
import { Request } from "express";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { createQueueMiddleware } from "./queue";
import {
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { createProxyMiddleware } from "http-proxy-middleware";
import { logger } from "../logger";
import { handleProxyError } from "./middleware/common";
import { Router } from "express";
import { ipLimiter } from "./rate-limit";
import { detectMistralInputApi, transformMistralTextToMistralChat } from "./mistral-ai";
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
newBody = transformMistralTextToMistralChat(body);
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const awsMistralProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsMistralBlockingResponseHandler]),
error: handleProxyError,
},
}),
});
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.startsWith("mistral.")) {
return;
}
// Mistral 7B Instruct
else if (model.includes("7b")) {
req.body.model = "mistral.mistral-7b-instruct-v0:2";
}
// Mistral 8x7B Instruct
else if (model.includes("8x7b")) {
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
}
// Mistral Large (Feb 2024)
else if (model.includes("large-2402")) {
req.body.model = "mistral.mistral-large-2402-v1:0";
}
// Mistral Large 2 (July 2024)
else if (model.includes("large")) {
req.body.model = "mistral.mistral-large-2407-v1:0";
}
// Mistral Small (Feb 2024)
else if (model.includes("small")) {
req.body.model = "mistral.mistral-small-2402-v1:0";
} else {
throw new Error(
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
);
}
}
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
{
beforeTransform: [detectMistralInputApi],
afterTransform: [maybeReassignModel],
}
);
const awsMistralRouter = Router();
awsMistralRouter.post(
"/v1/chat/completions",
ipLimiter,
nativeMistralChatPreprocessor,
awsMistralProxy
);
export const awsMistral = awsMistralRouter;
+291 -59
View File
@@ -1,75 +1,307 @@
/* Shared code between AWS Claude and AWS Mistral endpoints. */
import { Request, Response, Router } from "express";
import { Request, RequestHandler, Response, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { addV1 } from "./add-v1";
import { awsClaude } from "./aws-claude";
import { awsMistral } from "./aws-mistral";
import { AwsBedrockKey, keyPool } from "../shared/key-management";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signAwsRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { transformAnthropicChatResponseToAnthropicText } from "./anthropic";
import { sendErrorToClient } from "./middleware/response/error-generator";
const awsRouter = Router();
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
awsRouter.use("/claude", addV1, awsClaude);
awsRouter.use("/mistral", addV1, awsMistral);
const LATEST_AWS_V2_MINOR_VERSION = "1";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
const MODELS_CACHE_TTL = 10000;
let modelsCache: Record<string, any> = {};
let modelsCacheTime: Record<string, number> = {};
function handleModelsRequest(req: Request, res: Response) {
if (!config.awsCredentials) return { object: "list", data: [] };
const vendor = req.params.vendor?.length
? req.params.vendor === "claude"
? "anthropic"
: req.params.vendor
: "all";
const cacheTime = modelsCacheTime[vendor] || 0;
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
return res.json(modelsCache[vendor]);
}
const availableModelIds = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "aws") continue;
(key as AwsBedrockKey).modelIds.forEach((id) => availableModelIds.add(id));
}
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const models = [
const variants = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-opus-20240229-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-large-2407-v1:0",
"mistral.mistral-small-2402-v1:0",
]
.filter((id) => availableModelIds.has(id))
.map((id) => {
const vendor = id.match(/^(.*)\./)?.[1];
return {
id,
object: "model",
created: new Date().getTime(),
owned_by: vendor,
permission: [],
root: vendor,
parent: null,
};
});
];
modelsCache[vendor] = {
object: "list",
data: models.filter((m) => vendor === "all" || m.root === vendor),
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const awsResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
// case "openai<-anthropic-chat":
// todo: implement this
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
modelsCacheTime[vendor] = new Date().getTime();
}
return res.json(modelsCache[vendor]);
const awsProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError,
},
}),
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const awsTextCompletionRouter: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const awsRouter = Router();
awsRouter.get("/v1/models", handleModelRequest);
// Native(ish) Anthropic text completion endpoint.
awsRouter.post("/v1/complete", ipLimiter, awsTextCompletionRouter, awsProxy);
// Native Anthropic chat completion endpoint.
awsRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
// Temporary force-Claude3 endpoint
awsRouter.post(
"/v1/sonnet/:action(complete|messages)",
ipLimiter,
handleCompatibilityRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "aws",
}),
awsProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If client already specified an AWS Claude model ID, use it
if (model.includes("anthropic.claude")) {
return;
}
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?)(\d*)/i;
const match = model.match(pattern);
// If there's no match, return the latest v2 model
if (!match) {
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
const instant = match[2];
const major = match[4];
const minor = match[6];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
// There's only one v1 model
if (major === "1") {
req.body.model = "anthropic.claude-v1";
return;
}
// Try to map Anthropic API v2 models to AWS v2 models
if (major === "2") {
if (minor === "0") {
req.body.model = "anthropic.claude-v2";
return;
}
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
// AWS currently only supports one v3 model.
const variant = match[8]; // sonnet or opus
const variantVersion = match[9];
if (major === "3") {
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
}
// Fallback to latest v2 model
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
export function handleCompatibilityRequest(
req: Request,
res: Response,
next: any
) {
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
req.log.info(
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling AWS compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/aws/claude/sonnet",
correct_endpoint: "/aws/claude",
},
},
});
}
req.body.model = compatModel;
next();
}
export const aws = awsRouter;
+10 -51
View File
@@ -1,7 +1,6 @@
import type { Request, Response, RequestHandler } from "express";
import type { Request, RequestHandler } from "express";
import { config } from "../config";
import { authenticate, getUser } from "../shared/users/user-store";
import { sendErrorToClient } from "./middleware/response/error-generator";
const GATEKEEPER = config.gatekeeper;
const PROXY_KEY = config.proxyKey;
@@ -12,7 +11,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
// pass the _proxy_ key in this header too, instead of providing it as a
// Bearer token in the Authorization header. So we need to check both.
// Prefer the Authorization header if both are present.
// Google AI uses a key querystring parameter.
if (req.headers.authorization) {
const token = req.headers.authorization?.slice("Bearer ".length);
@@ -25,12 +23,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
delete req.headers["x-api-key"];
return token;
}
if (req.query.key) {
const token = req.query.key?.toString();
delete req.query.key;
return token;
}
return undefined;
}
@@ -58,9 +50,9 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
// IP alone to distinguish between them and prevent usertoken sharing.
// Risu sends a signed token in the request headers with an anonymous user
// ID that we can instead use to associate requests with an individual.
const ip = req.risuToken?.length
? `risu${req.risuToken}-${req.ip}`
: req.ip;
const ip = req.risuToken?.length ?
`risu${req.risuToken}-${req.ip}` :
req.ip;
const { user, result } = authenticate(token, ip);
@@ -69,50 +61,17 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
req.user = user;
return next();
case "limited":
return sendError(
req,
res,
403,
`Forbidden: no more IP addresses allowed for this user token`,
{ currentIp: ip, maxIps: user?.maxIps }
);
return res.status(403).json({
error: `Forbidden: no more IPs can authenticate with this token`,
});
case "disabled":
const bannedUser = getUser(token);
if (bannedUser?.disabledAt) {
const reason = bannedUser.disabledReason || "User token disabled";
return sendError(req, res, 403, `Forbidden: ${reason}`);
const reason = bannedUser.disabledReason || "Token disabled";
return res.status(403).json({ error: `Forbidden: ${reason}` });
}
}
}
sendError(req, res, 401, "Unauthorized");
res.status(401).json({ error: "Unauthorized" });
};
function sendError(
req: Request,
res: Response,
status: number,
message: string,
data: any = {}
) {
const isPost = req.method === "POST";
const hasBody = isPost && req.body;
const hasModel = hasBody && req.body.model;
if (!hasModel) {
return res.status(status).json({ error: message });
}
sendErrorToClient({
req,
res,
options: {
title: `Proxy gatekeeper error (HTTP ${status})`,
message,
format: "unknown",
statusCode: status,
reqId: req.id,
obj: data,
},
});
}
-196
View File
@@ -1,196 +0,0 @@
import { Request, RequestHandler, Response, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signGcpRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
import { sendErrorToClient } from "./middleware/response/error-generator";
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.gcpCredentials) return { object: "list", data: [] };
// https://docs.anthropic.com/en/docs/about-claude/models
const variants = [
"claude-3-haiku@20240307",
"claude-3-sonnet@20240229",
"claude-3-opus@20240229",
"claude-3-5-sonnet@20240620",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const gcpResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const gcpProxy = createQueueMiddleware({
beforeProxy: signGcpRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([gcpResponseHandler]),
error: handleProxyError,
},
}),
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
oaiToChatPreprocessor(req, res, next);
};
const gcpRouter = Router();
gcpRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
gcpRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
),
gcpProxy
);
// OpenAI-to-GCP Anthropic compatibility endpoint.
gcpRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
gcpProxy
);
/**
* Tries to deal with:
* - frontends sending GCP model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that GCP doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends GCP model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-GCP model name to GCP model ID.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an GCP model, use it as-is
// if (model.includes("anthropic.claude")) {
if (model.startsWith("claude-") && model.includes("@")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620-v1:0
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)([.-](\d{1}))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(\d*)/i;
const match = model.match(pattern);
// If there's no match, fallback to Claude3 Sonnet as it is most likely to be
// available on GCP.
if (!match) {
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
return;
}
const [_, _cl, instant, _v, major, _sep, minor, _ctx, name, _rev] = match;
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "3":
case "3.0":
if (name.includes("opus")) {
req.body.model = "claude-3-opus@20240229";
} else if (name.includes("haiku")) {
req.body.model = "claude-3-haiku@20240307";
} else {
req.body.model = "claude-3-sonnet@20240229";
}
return;
case "3.5":
req.body.model = "claude-3-5-sonnet@20240620";
return;
}
// Fallback to Claude3 Sonnet
req.body.model = `claude-3-sonnet@${LATEST_GCP_SONNET_MINOR_VERSION}`;
return;
}
export const gcp = gcpRouter;
+8 -80
View File
@@ -16,7 +16,6 @@ import {
ProxyResHandlerWithBody,
} from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
import { GoogleAIKey, keyPool } from "../shared/key-management";
let modelsCache: any = null;
let modelsCacheTime = 0;
@@ -31,19 +30,9 @@ const getModelsResponse = () => {
if (!config.googleAIKey) return { object: "list", data: [] };
const keys = keyPool
.list()
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
if (keys.length === 0) {
modelsCache = { object: "list", data: [] };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
const modelIds = Array.from(
new Set(keys.map((k) => k.modelIds).flat())
).filter((id) => id.startsWith("models/gemini"));
const models = modelIds.map((id) => ({
const models = googleAIVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
@@ -120,17 +109,7 @@ const googleAIProxy = createQueueMiddleware({
},
changeOrigin: true,
selfHandleResponse: true,
// Prevent logging of the API key by HPM
logger: logger.child(
{},
{
redact: {
paths: ["*"],
censor: (v) =>
typeof v === "string" ? v.replace(/key=\S+/g, "key=xxxxxxx") : v,
},
}
),
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
@@ -141,67 +120,16 @@ const googleAIProxy = createQueueMiddleware({
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/v1beta/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{
inApi: "google-ai",
outApi: "google-ai",
service: "google-ai",
},
{ afterTransform: [maybeReassignModel, setStreamFlag] }
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{ afterTransform: [maybeReassignModel] }
),
createPreprocessorMiddleware({
inApi: "openai",
outApi: "google-ai",
service: "google-ai",
}),
googleAIProxy
);
function setStreamFlag(req: Request) {
const isStreaming = req.url.includes("streamGenerateContent");
if (isStreaming) {
req.body.stream = true;
req.isStreaming = true;
} else {
req.body.stream = false;
req.isStreaming = false;
}
}
/**
* Replaces requests for non-Google AI models with gemini-pro-1.5-latest.
* Also strips models/ from the beginning of the model IDs.
**/
function maybeReassignModel(req: Request) {
// Ensure model is on body as a lot of middleware will expect it.
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
if (!model) {
throw new Error("You must specify a model with your request.");
}
req.body.model = model;
const requested = model;
if (requested.startsWith("models/")) {
req.body.model = requested.slice("models/".length);
}
if (requested.includes("gemini")) {
return;
}
req.log.info({ requested }, "Reassigning model to gemini-pro-1.5-latest");
req.body.model = "gemini-pro-1.5-latest";
}
export const googleAI = googleAIRouter;
+12 -15
View File
@@ -3,10 +3,10 @@ import http from "http";
import httpProxy from "http-proxy";
import { ZodError } from "zod";
import { generateErrorMessage } from "zod-error";
import { HttpError } from "../../shared/errors";
import { assertNever } from "../../shared/utils";
import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
import { sendErrorToClient } from "./response/error-generator";
import { HttpError } from "../../shared/errors";
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
@@ -54,13 +54,13 @@ export function sendProxyError(
const msg =
statusCode === 500
? `The proxy encountered an error while trying to process your prompt.`
: `The proxy encountered an error while trying to send your prompt to the API.`;
: `The proxy encountered an error while trying to send your prompt to the upstream service.`;
sendErrorToClient({
options: {
format: req.inboundApi,
title: `Proxy error (HTTP ${statusCode} ${statusMessage})`,
message: `${msg} Further details are provided below.`,
message: `${msg} Further technical details are provided below.`,
obj: errorPayload,
reqId: req.id,
model: req.body?.model,
@@ -221,12 +221,9 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
switch (format) {
case "openai":
case "mistral-ai":
// Few possible values:
// - choices[0].message.content
// - choices[0].message with no content if model is invoking a tool
return body.choices?.[0]?.message?.content || "";
case "mistral-text":
return body.outputs?.[0]?.text || "";
// Can be null if the model wants to invoke tools rather than return a
// completion.
return body.choices[0].message.content || "";
case "openai-text":
return body.choices[0].text;
case "anthropic-chat":
@@ -263,22 +260,22 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
}
}
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
export function getModelFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "openai-text":
return resBody.model;
case "mistral-ai":
case "mistral-text":
return body.model;
case "openai-image":
case "google-ai":
// These formats don't have a model in the response body.
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return resBody.model || req.body.model;
return body.model || req.body.model;
case "google-ai":
// Google doesn't confirm the model in the response.
return req.body.model;
default:
assertNever(format);
}
+2 -4
View File
@@ -11,18 +11,16 @@ export {
// Express middleware (runs before http-proxy-middleware, can be async)
export { addAzureKey } from "./preprocessors/add-azure-key";
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { languageFilter } from "./preprocessors/language-filter";
export { setApiFormat } from "./preprocessors/set-api-format";
export { signAwsRequest } from "./preprocessors/sign-aws-request";
export { signGcpRequest } from "./preprocessors/sign-vertex-ai-request";
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { validateVision } from "./preprocessors/validate-vision";
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
export { checkModelFamily } from "./onproxyreq/check-model-family";
export { finalizeBody } from "./onproxyreq/finalize-body";
@@ -1,5 +1,3 @@
import { AnthropicChatMessage } from "../../../../shared/api-schemas";
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { HPMRequestCallback } from "../index";
@@ -21,27 +19,17 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
throw new Error("You must specify a model with your request.");
}
let needsMultimodal = false;
if (outboundApi === "anthropic-chat") {
needsMultimodal = containsImageContent(
body.messages as AnthropicChatMessage[]
);
}
if (inboundApi === outboundApi) {
assignedKey = keyPool.get(body.model, service, needsMultimodal);
assignedKey = keyPool.get(body.model, service);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
// for the user, because the provided model is for the inbound API.
// TODO: This whole else condition is probably no longer needed since API
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-text":
case "anthropic-chat":
case "mistral-ai":
case "mistral-text":
case "google-ai":
assignedKey = keyPool.get(body.model, service);
case "anthropic-text":
assignedKey = keyPool.get("claude-v1", service);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
@@ -50,8 +38,10 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
assignedKey = keyPool.get("dall-e-3", service);
break;
case "openai":
case "google-ai":
case "mistral-ai":
throw new Error(
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
`add-key should not be called for outbound API ${outboundApi}`
);
default:
assertNever(outboundApi);
@@ -84,7 +74,6 @@ export const addKey: HPMRequestCallback = (proxyReq, req) => {
proxyReq.setHeader("api-key", azureKey);
break;
case "aws":
case "gcp":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
@@ -1,7 +1,7 @@
import type { HPMRequestCallback } from "../index";
/**
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
* For AWS/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
@@ -7,15 +7,10 @@ import { HPMRequestCallback } from "../index";
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
proxyReq.removeHeader("tailscale-user-login");
proxyReq.removeHeader("tailscale-user-name");
proxyReq.removeHeader("tailscale-headers-info");
proxyReq.removeHeader("tailscale-user-profile-pic")
proxyReq.removeHeader("cf-connecting-ip");
proxyReq.removeHeader("forwarded");
proxyReq.removeHeader("true-client-ip");
proxyReq.removeHeader("x-forwarded-for");
proxyReq.removeHeader("x-forwarded-host");
proxyReq.removeHeader("x-forwarded-proto");
proxyReq.removeHeader("x-real-ip");
};
@@ -4,12 +4,11 @@ import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common";
import {
RequestPreprocessor,
validateContextSize,
countPromptTokens,
languageFilter,
setApiFormat,
transformOutboundPayload,
validateContextSize,
validateVision,
languageFilter,
} from ".";
type RequestPreprocessorOptions = {
@@ -51,7 +50,6 @@ export const createPreprocessorMiddleware = (
languageFilter,
...(afterTransform ?? []),
validateContextSize,
validateVision,
];
return async (...args) => executePreprocessors(preprocessors, args);
};
@@ -86,7 +84,7 @@ async function executePreprocessors(
const msg = error?.issues
?.map((issue: ZodIssue) => issue.message)
.join("; ");
req.log.warn({ issues: msg }, "Prompt validation failed.");
req.log.info(msg, "Prompt validation failed.");
} else {
req.log.error(error, "Error while executing request preprocessor");
}
@@ -143,7 +141,7 @@ const handleTestMessage: RequestHandler = (req, res) => {
};
function isTestMessage(body: any) {
const { messages, prompt, contents } = body;
const { messages, prompt } = body;
if (messages) {
return (
@@ -151,11 +149,6 @@ function isTestMessage(body: any) {
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else if (contents) {
return (
contents.length === 1 &&
contents[0].parts[0]?.text === "Hi"
);
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
@@ -2,38 +2,39 @@ import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addGoogleAIKey: RequestPreprocessor = (req) => {
const inboundValid =
req.inboundApi === "openai" || req.inboundApi === "google-ai";
const outboundValid = req.outboundApi === "google-ai";
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!inboundValid || !outboundValid || !serviceValid) {
if (!apisValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model;
req.isStreaming = req.isStreaming || req.body.stream;
req.key = keyPool.get(model, "google-ai");
req.log.info(
{ key: req.key.hash, model, stream: req.isStreaming },
{ key: req.key.hash, model },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
const payload = { ...req.body, stream: undefined, model: undefined };
req.isStreaming = req.isStreaming || req.body.stream;
delete req.body.stream;
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${
req.isStreaming ? "streamGenerateContent" : "generateContent"
}?key=${req.key.key}`,
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(payload),
body: JSON.stringify(req.body),
};
};
@@ -2,10 +2,11 @@ import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import {
AnthropicChatMessage,
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-schemas";
} from "../../../../shared/api-support";
/**
* Given a request with an already-transformed body, counts the number of
@@ -30,10 +31,7 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
}
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
const prompt = {
system: req.body.system ?? "",
messages: req.body.messages,
};
const prompt: AnthropicChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
@@ -49,11 +47,9 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai":
case "mistral-text": {
case "mistral-ai": {
req.outputTokens = req.body.max_tokens;
const prompt: string | MistralAIChatMessage[] =
req.body.messages ?? req.body.prompt;
const prompt: MistralAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
@@ -7,7 +7,7 @@ import {
MistralAIChatMessage,
OpenAIChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas";
} from "../../../../shared/api-support";
const rejectedClients = new Map<string, number>();
@@ -56,6 +56,8 @@ function getPromptFromRequest(req: Request) {
switch (service) {
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "anthropic-text":
return body.prompt;
case "openai":
case "mistral-ai":
return body.messages
@@ -70,10 +72,8 @@ function getPromptFromRequest(req: Request) {
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "anthropic-text":
case "openai-text":
case "openai-image":
case "mistral-text":
return body.prompt;
case "google-ai":
return body.prompt.text;
@@ -1,17 +1,13 @@
import express, { Request } from "express";
import express from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-schemas";
} from "../../../../shared/api-support";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
import {
AWSMistralV1ChatCompletionsSchema,
AWSMistralV1TextCompletionsSchema,
} from "../../../../shared/api-schemas/mistral-ai";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
@@ -33,6 +29,37 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
req.body.prompt = preamble + req.body.prompt;
}
// AWS uses mostly the same parameters as Anthropic, with a few removed params
// and much stricter validation on unused parameters. Rather than treating it
// as a separate schema we will use the anthropic ones and strip the unused
// parameters.
// TODO: This should happen in transform-outbound-payload.ts
let strippedParams: Record<string, unknown>;
if (req.outboundApi === "anthropic-chat") {
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
} else {
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
@@ -50,7 +77,7 @@ export const signAwsRequest: RequestPreprocessor = async (req) => {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(applyAwsStrictValidation(req)),
body: JSON.stringify(strippedParams),
});
if (stream) {
@@ -100,48 +127,3 @@ async function sign(request: HttpRequest, credential: Credential) {
return signer.sign(request);
}
function applyAwsStrictValidation(req: Request): unknown {
// AWS uses vendor API formats but imposes additional (more strict) validation
// rules, namely that extraneous parameters are not allowed. We will validate
// using the vendor's zod schema but apply `.strip` to ensure that any
// extraneous parameters are removed.
let strippedParams: Record<string, unknown> = {};
switch (req.outboundApi) {
case "anthropic-text":
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
break;
case "anthropic-chat":
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
break;
case "mistral-ai":
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
break;
case "mistral-text":
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
break;
default:
throw new Error("Unexpected outbound API for AWS.");
}
return strippedParams;
}
@@ -1,201 +0,0 @@
import express from "express";
import crypto from "crypto";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
export const signGcpRequest: RequestPreprocessor = async (req) => {
const serviceValid = req.service === "gcp";
if (!serviceValid) {
throw new Error("addVertexAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const { model, stream } = req.body;
req.key = keyPool.get(model, "gcp");
req.log.info({ key: req.key.hash, model }, "Assigned GCP key to request");
req.isStreaming = String(stream) === "true";
// TODO: This should happen in transform-outbound-payload.ts
// TODO: Support tools
let strippedParams: Record<string, unknown>;
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
stream: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "vertex-2023-10-16";
const [accessToken, credential] = await getAccessToken(req);
const host = GCP_HOST.replace("%REGION%", credential.region);
// GCP doesn't use the anthropic-version header, but we set it to ensure the
// stream adapter selects the correct transformer.
req.headers["anthropic-version"] = "2023-06-01";
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: host,
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
headers: {
["host"]: host,
["content-type"]: "application/json",
["authorization"]: `Bearer ${accessToken}`,
},
body: JSON.stringify(strippedParams),
};
};
async function getAccessToken(
req: express.Request
): Promise<[string, Credential]> {
// TODO: access token caching to reduce latency
const credential = getCredentialParts(req);
const signedJWT = await createSignedJWT(
credential.clientEmail,
credential.privateKey
);
const [accessToken, jwtError] = await exchangeJwtForAccessToken(signedJWT);
if (accessToken === null) {
req.log.warn(
{ key: req.key!.hash, jwtError },
"Unable to get the access token"
);
throw new Error("The access token is invalid.");
}
return [accessToken, credential];
}
async function createSignedJWT(email: string, pkey: string): Promise<string> {
let cryptoKey = await crypto.subtle.importKey(
"pkcs8",
str2ab(atob(pkey)),
{
name: "RSASSA-PKCS1-v1_5",
hash: { name: "SHA-256" },
},
false,
["sign"]
);
const authUrl = "https://www.googleapis.com/oauth2/v4/token";
const issued = Math.floor(Date.now() / 1000);
const expires = issued + 600;
const header = {
alg: "RS256",
typ: "JWT",
};
const payload = {
iss: email,
aud: authUrl,
iat: issued,
exp: expires,
scope: "https://www.googleapis.com/auth/cloud-platform",
};
const encodedHeader = urlSafeBase64Encode(JSON.stringify(header));
const encodedPayload = urlSafeBase64Encode(JSON.stringify(payload));
const unsignedToken = `${encodedHeader}.${encodedPayload}`;
const signature = await crypto.subtle.sign(
"RSASSA-PKCS1-v1_5",
cryptoKey,
str2ab(unsignedToken)
);
const encodedSignature = urlSafeBase64Encode(signature);
return `${unsignedToken}.${encodedSignature}`;
}
async function exchangeJwtForAccessToken(
signedJwt: string
): Promise<[string | null, string]> {
const authUrl = "https://www.googleapis.com/oauth2/v4/token";
const params = {
grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer",
assertion: signedJwt,
};
const r = await fetch(authUrl, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: Object.entries(params)
.map(([k, v]) => `${k}=${v}`)
.join("&"),
}).then((res) => res.json());
if (r.access_token) {
return [r.access_token, ""];
}
return [null, JSON.stringify(r)];
}
function str2ab(str: string): ArrayBuffer {
const buffer = new ArrayBuffer(str.length);
const bufferView = new Uint8Array(buffer);
for (let i = 0; i < str.length; i++) {
bufferView[i] = str.charCodeAt(i);
}
return buffer;
}
function urlSafeBase64Encode(data: string | ArrayBuffer): string {
let base64: string;
if (typeof data === "string") {
base64 = btoa(
encodeURIComponent(data).replace(/%([0-9A-F]{2})/g, (match, p1) =>
String.fromCharCode(parseInt("0x" + p1, 16))
)
);
} else {
base64 = btoa(String.fromCharCode(...new Uint8Array(data)));
}
return base64.replace(/\+/g, "-").replace(/\//g, "_").replace(/=+$/, "");
}
type Credential = {
projectId: string;
clientEmail: string;
region: string;
privateKey: string;
};
function getCredentialParts(req: express.Request): Credential {
const [projectId, clientEmail, region, rawPrivateKey] =
req.key!.key.split(":");
if (!projectId || !clientEmail || !region || !rawPrivateKey) {
req.log.error(
{ key: req.key!.hash },
"GCP_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
const privateKey = rawPrivateKey
.replace(
/-----BEGIN PRIVATE KEY-----|-----END PRIVATE KEY-----|\r|\n|\\n/g,
""
)
.trim();
return { projectId, clientEmail, region, privateKey };
}
@@ -1,35 +1,40 @@
import { Request } from "express";
import {
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-schemas";
} from "../../../../shared/api-support";
import { BadRequestError } from "../../../../shared/errors";
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
import {
isImageGenerationRequest,
isTextGenerationRequest,
} from "../../common";
import { RequestPreprocessor } from "../index";
import { fixMistralPrompt } from "../../../../shared/api-support/kits/mistral-ai/request-transformers";
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed || notTransformable) return;
applyMistralPromptFixes(req);
// TODO: this should be an APIFormatTransformer
if (req.inboundApi === "mistral-ai") {
const messages = req.body.messages;
req.body.messages = fixMistralPrompt(messages);
req.log.info(
{ old: messages.length, new: req.body.messages.length },
"Fixed Mistral prompt"
);
}
// Native prompts are those which were already provided by the client in the
// target API format. We don't need to transform them.
const isNativePrompt = req.inboundApi === req.outboundApi;
if (isNativePrompt) {
if (sameService) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body: req.body },
"Native prompt request validation failed."
"Request validation failed"
);
throw result.error;
}
@@ -37,12 +42,11 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
return;
}
// Prompt requires translation from one API format to another.
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request...");
req.log.info({ transformation }, "Transforming request");
req.body = await transFn(req);
return;
}
@@ -51,34 +55,3 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
// handles weird cases that don't fit into our abstractions
function applyMistralPromptFixes(req: Request): void {
if (req.inboundApi === "mistral-ai") {
// Mistral Chat is very similar to OpenAI but not identical and many clients
// don't properly handle the differences. We will try to validate the
// mistral prompt and try to fix it if it fails. It will be re-validated
// after this function returns.
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
req.body.messages = fixMistralPrompt(result.messages);
req.log.info(
{ n: req.body.messages.length, prev: result.messages.length },
"Applied Mistral chat prompt fixes."
);
// If the prompt relies on `prefix: true` for the last message, we need to
// convert it to a text completions request because Mistral support for
// this feature is limited (and completely broken on AWS Mistral).
const { messages } = req.body;
const lastMessage = messages && messages[messages.length - 1];
if (lastMessage && lastMessage.role === "assistant") {
// enable prefix if client forgot, otherwise the template will insert an
// eos token which is very unlikely to be what the client wants.
lastMessage.prefix = true;
req.outboundApi = "mistral-text";
req.log.info(
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
);
}
}
}
@@ -6,9 +6,8 @@ import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
// todo: make configurable
const GOOGLE_AI_MAX_CONTEXT = 1024000;
const MISTRAL_AI_MAX_CONTENT = 131072;
const GOOGLE_AI_MAX_CONTEXT = 32000;
const MISTRAL_AI_MAX_CONTENT = 32768;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
@@ -38,7 +37,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
proxyMax = GOOGLE_AI_MAX_CONTEXT;
break;
case "mistral-ai":
case "mistral-text":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
@@ -48,18 +46,9 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
}
proxyMax ||= Number.MAX_SAFE_INTEGER;
if (req.user?.type === "special") {
req.log.debug("Special user, not enforcing proxy context limit.");
proxyMax = Number.MAX_SAFE_INTEGER;
}
let modelMax: number;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/^gpt-4o/)) {
modelMax = 128000;
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-(0125|1106)(-preview)?$/)) {
@@ -67,7 +56,7 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 16384;
modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
@@ -82,19 +71,17 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^gemini-/)) {
modelMax = 1024000;
} else if (model.match(/^anthropic\.claude-3/)) {
} else if (model.match(/^gemini-\d{3}$/)) {
modelMax = GOOGLE_AI_MAX_CONTEXT;
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
modelMax = MISTRAL_AI_MAX_CONTENT;
} else if (model.match(/^anthropic\.claude-3-sonnet/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
modelMax = 100000;
} else if (model.match(/tral/)) {
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
// no name convention and wildly different context windows so this is a
// catch-all
modelMax = MISTRAL_AI_MAX_CONTENT;
} else {
req.log.warn({ model }, "Unknown model, using 200k token limit.");
modelMax = 200000;
@@ -1,44 +0,0 @@
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
import { ForbiddenError } from "../../../../shared/errors";
/**
* Rejects prompts containing images if multimodal prompts are disabled.
*/
export const validateVision: RequestPreprocessor = async (req) => {
if (req.service === undefined) {
throw new Error("Request service must be set before validateVision");
}
if (req.user?.type === "special") return;
if (config.allowedVisionServices.includes(req.service)) return;
// vision not allowed for req's service, block prompts with images
let hasImage = false;
switch (req.outboundApi) {
case "openai":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "anthropic-chat":
hasImage = containsImageContentAnthropic(req.body.messages);
break;
case "anthropic-text":
case "google-ai":
case "mistral-ai":
case "mistral-text":
case "openai-image":
case "openai-text":
return;
default:
assertNever(req.outboundApi);
}
if (hasImage) {
throw new ForbiddenError(
"Prompts containing images are not permitted. Disable 'Send Inline Images' in your client and try again."
);
}
};
@@ -31,41 +31,24 @@ function getMessageContent({
}
```
*/
const note = obj?.proxy_note || obj?.error?.message || "";
const header = `### **${title}**`;
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
const serializedObj = obj
? ["```", JSON.stringify(obj, null, 2), "```"].join("\n")
: "";
const { stack } = JSON.parse(JSON.stringify(obj ?? {}));
let prettyTrace = "";
if (stack && obj) {
prettyTrace = [
"Include this trace when reporting an issue.",
"```",
stack,
"```",
].join("\n");
delete obj.stack;
const friendlyMessage = note ? `${message}\n\n***\n\n*${note}*` : message;
const details = JSON.parse(JSON.stringify(obj ?? {}));
let stack = "";
if (details.stack) {
stack = `\n\nInclude this trace when reporting an issue.\n\`\`\`\n${details.stack}\n\`\`\``;
delete details.stack;
}
return [
header,
friendlyMessage,
serializedObj,
prettyTrace,
"<!-- oai-proxy-error -->",
].join("\n\n");
return `\n\n**${title}**\n${friendlyMessage}${
obj ? `\n\`\`\`\n${JSON.stringify(obj, null, 2)}\n\`\`\`\n${stack}` : ""
}`;
}
type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: Record<string, any>;
obj?: object;
reqId: string | number | object;
model?: string;
statusCode?: number;
@@ -95,23 +78,6 @@ export function tryInferFormat(body: any): APIFormat | "unknown" {
return "unknown";
}
// avoid leaking upstream hostname on dns resolution error
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
if (!options.message.includes("getaddrinfo")) return options;
const redacted = { ...options };
redacted.message = "Could not resolve hostname";
if (typeof redacted.obj?.error === "object") {
redacted.obj = {
...redacted.obj,
error: { message: "Could not resolve hostname" },
};
}
return redacted;
}
export function sendErrorToClient({
options,
req,
@@ -121,28 +87,24 @@ export function sendErrorToClient({
req: express.Request;
res: express.Response;
}) {
const redactedOpts = redactHostname(options);
const { format: inputFormat } = redactedOpts;
const { format: inputFormat } = options;
// This is an error thrown before we know the format of the request, so we
// can't send a response in the format the client expects.
const format =
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
if (format === "unknown") {
return res.status(redactedOpts.statusCode || 400).json({
error: redactedOpts.message,
details: redactedOpts.obj,
return res.status(options.statusCode || 400).json({
error: options.message,
details: options.obj,
});
}
const completion = buildSpoofedCompletion({ ...redactedOpts, format });
const event = buildSpoofedSSE({ ...redactedOpts, format });
const completion = buildSpoofedCompletion({ ...options, format });
const event = buildSpoofedSSE({ ...options, format });
const isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
if (!res.headersSent) {
res.setHeader("x-oai-proxy-error", redactedOpts.title);
res.setHeader("x-oai-proxy-error-status", redactedOpts.statusCode || 500);
}
if (isStreaming) {
if (!res.headersSent) {
initializeSseStream(res);
@@ -189,11 +151,6 @@ export function buildSpoofedCompletion({
},
],
};
case "mistral-text":
return {
outputs: [{ text: content, stop_reason: title }],
model,
}
case "openai-text":
return {
id: "error-" + id,
@@ -225,7 +182,13 @@ export function buildSpoofedCompletion({
stop_sequence: null,
};
case "google-ai":
// TODO: Native Google AI non-streaming responses are not supported, this
// is an untested guess at what the response should look like.
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
@@ -272,11 +235,6 @@ export function buildSpoofedSSE({
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-text":
event = {
outputs: [{ text: content, stop_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
@@ -306,10 +264,7 @@ export function buildSpoofedSSE({
};
break;
case "google-ai":
// TODO: google ai supports two streaming transports, SSE and JSON.
// we currently only support SSE.
// return JSON.stringify({
event = {
return JSON.stringify({
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
@@ -319,8 +274,7 @@ export function buildSpoofedSSE({
safetyRatings: [],
},
],
};
break;
});
case "openai-image":
return JSON.stringify(obj);
default:
@@ -1,76 +0,0 @@
import util from "util";
import zlib from "zlib";
import { sendProxyError } from "../common";
import type { RawResponseBodyHandler } from "./index";
const DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
};
const isSupportedContentEncoding = (
contentEncoding: string
): contentEncoding is keyof typeof DECODER_MAP => {
return contentEncoding in DECODER_MAP;
};
/**
* Handles the response from the upstream service and decodes the body if
* necessary. If the response is JSON, it will be parsed and returned as an
* object. Otherwise, it will be returned as a string. Does not handle streaming
* responses.
* @throws {Error} Unsupported content-encoding or invalid application/json body
*/
export const handleBlockingResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
if (req.isStreaming) {
const err = new Error(
"handleBlockingResponse called for a streaming request."
);
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
}
return new Promise<string>((resolve, reject) => {
let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => {
let body = Buffer.concat(chunks);
const contentEncoding = proxyRes.headers["content-encoding"];
if (contentEncoding) {
if (isSupportedContentEncoding(contentEncoding)) {
const decoder = DECODER_MAP[contentEncoding];
// @ts-ignore - started failing after upgrading TypeScript, don't care
// as it was never a problem.
body = await decoder(body);
} else {
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
sendProxyError(req, res, 500, "Internal Server Error", {
error,
contentEncoding,
});
return reject(error);
}
}
try {
if (proxyRes.headers["content-type"]?.includes("application/json")) {
const json = JSON.parse(body.toString());
return resolve(json);
}
return resolve(body.toString());
} catch (e) {
const msg = `Proxy received response with invalid JSON: ${e.message}`;
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
return reject(msg);
}
});
});
};
@@ -3,38 +3,36 @@ import { pipeline, Readable, Transform } from "stream";
import StreamArray from "stream-json/streamers/StreamArray";
import { StringDecoder } from "string_decoder";
import { promisify } from "util";
import type { logger } from "../../../logger";
import { BadRequestError, RetryableError } from "../../../shared/errors";
import { APIFormat, keyPool } from "../../../shared/key-management";
import {
copySseResponseHeaders,
initializeSseStream,
} from "../../../shared/streaming";
import { reenqueueRequest } from "../../queue";
import type { RawResponseBodyHandler } from ".";
import { handleBlockingResponse } from "./handle-blocking-response";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import type { logger } from "../../../logger";
import { enqueue } from "../../queue";
import { decodeResponseBody, RawResponseBodyHandler, RetryableError } from ".";
import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { buildSpoofedSSE, sendErrorToClient } from "./error-generator";
import { BadRequestError } from "../../../shared/errors";
const pipelineAsync = promisify(pipeline);
/**
* `handleStreamedResponse` consumes a streamed response from the upstream API,
* decodes chunk-by-chunk into a stream of events, transforms those events into
* the client's requested format, and forwards the result to the client.
*
* `handleStreamedResponse` consumes and transforms a streamed response from the
* upstream service, forwarding events to the client in their requested format.
* After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response (to count output tokens, track usage, etc).
* a non-streaming response.
*
* In the event of an error, the request's streaming flag is unset and the
* request is bounced back to the non-streaming response handler. If the error
* is retryable, that handler will re-enqueue the request and also reset the
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
* places, so it's hard to keep track of.
* In the event of an error, the request's streaming flag is unset and the non-
* streaming response handler is called instead.
*
* If the error is retryable, that handler will re-enqueue the request and also
* reset the streaming flag. Unfortunately the streaming flag is set and unset
* in multiple places, so it's hard to keep track of.
*/
export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes,
@@ -52,7 +50,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
{ statusCode: proxyRes.statusCode, key: hash },
`Streaming request returned error status code. Falling back to non-streaming response handler.`
);
return handleBlockingResponse(proxyRes, req, res);
return decodeResponseBody(proxyRes, req, res);
}
req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
@@ -71,21 +69,13 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
logger: req.log,
};
// While the request is streaming, aggregator collects all events so that we
// can compile them into a single response object and publish that to the
// remaining middleware. Because we have an OpenAI transformer for every
// supported format, EventAggregator always consumes OpenAI events so that we
// only have to write one aggregator (OpenAI input) for each output format.
const aggregator = new EventAggregator(req);
// Decoder reads from the raw response buffer and produces a stream of
// discrete events in some format (text/event-stream, vnd.amazon.event-stream,
// streaming JSON, etc).
// Decoder turns the raw response stream into a stream of events in some
// format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// Adapter consumes the decoded events and produces server-sent events so we
// have a standard event format for the client and to translate between API
// message formats.
// Adapter transforms the decoded events into server-sent events.
const adapter = new SSEStreamAdapter(streamOptions);
// Aggregator compiles all events into a single response object.
const aggregator = new EventAggregator({ format: req.outboundApi });
// Transformer converts server-sent events from one vendor's API message
// format to another.
const transformer = new SSEMessageTransformer({
@@ -115,7 +105,12 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
} catch (err) {
if (err instanceof RetryableError) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
req.log.warn(
{ key: req.key!.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error during streaming response.`
);
req.retryCount++;
await enqueue(req);
} else if (err instanceof BadRequestError) {
sendErrorToClient({
req,
@@ -143,17 +138,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
res.write(`data: [DONE]\n\n`);
res.end();
}
// At this point the response is closed. If the request resulted in any
// tokens being consumed (suggesting a mid-stream error), we will resolve
// and continue the middleware chain so tokens can be counted.
if (aggregator.hasEvents()) {
return aggregator.getFinalResponse();
} else {
// If there is nothing, then this was a completely failed prompt that
// will not have billed any tokens. Throw to stop the middleware chain.
throw err;
}
throw err;
}
};
+153 -162
View File
@@ -1,9 +1,10 @@
/* This file is fucking horrendous, sorry */
// TODO: extract all per-service error response handling into its own modules
import { Request, Response } from "express";
import * as http from "http";
import { config } from "../../../config";
import { HttpError, RetryableError } from "../../../shared/errors";
import util from "util";
import zlib from "zlib";
import { enqueue, trackWaitTime } from "../../queue";
import { HttpError } from "../../../shared/errors";
import { keyPool } from "../../../shared/key-management";
import { getOpenAIModelFamily } from "../../../shared/models";
import { countTokens } from "../../../shared/tokenization";
@@ -12,7 +13,6 @@ import {
incrementTokenCount,
} from "../../../shared/users/user-store";
import { assertNever } from "../../../shared/utils";
import { reenqueueRequest, trackWaitTime } from "../../queue";
import { refundLastAttempt } from "../../rate-limit";
import {
getCompletionFromBody,
@@ -20,23 +20,39 @@ import {
isTextGenerationRequest,
sendProxyError,
} from "../common";
import { handleBlockingResponse } from "./handle-blocking-response";
import { handleStreamedResponse } from "./handle-streamed-response";
import { logPrompt } from "./log-prompt";
import { logEvent } from "./log-event";
import { saveImage } from "./save-image";
import { config } from "../../../config";
const DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
};
const isSupportedContentEncoding = (
contentEncoding: string
): contentEncoding is keyof typeof DECODER_MAP => {
return contentEncoding in DECODER_MAP;
};
export class RetryableError extends Error {
constructor(message: string) {
super(message);
this.name = "RetryableError";
}
}
/**
* Either decodes or streams the entire response body and then resolves with it.
* @returns The response body as a string or parsed JSON object depending on the
* response's content-type.
* Either decodes or streams the entire response body and then passes it as the
* last argument to the rest of the middleware stack.
*/
export type RawResponseBodyHandler = (
proxyRes: http.IncomingMessage,
req: Request,
res: Response
) => Promise<string | Record<string, any>>;
export type ProxyResHandlerWithBody = (
proxyRes: http.IncomingMessage,
req: Request,
@@ -60,10 +76,6 @@ export type ProxyResMiddleware = ProxyResHandlerWithBody[];
* middleware from executing as it consumes the stream and forwards events to
* the client. Once the stream is closed, the finalized body will be attached
* to res.body and the remaining middleware will execute.
*
* @param apiMiddleware - Custom middleware to execute after the common response
* handlers. These *only* execute for non-streaming responses, so should be used
* to transform non-streaming responses into the desired format.
*/
export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
return async (
@@ -71,35 +83,36 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
req: Request,
res: Response
) => {
const initialHandler: RawResponseBodyHandler = req.isStreaming
const initialHandler = req.isStreaming
? handleStreamedResponse
: handleBlockingResponse;
: decodeResponseBody;
let lastMiddleware = initialHandler.name;
try {
const body = await initialHandler(proxyRes, req, res);
const middlewareStack: ProxyResMiddleware = [];
if (req.isStreaming) {
// Handlers for streaming requests must never write to the response.
// `handleStreamedResponse` writes to the response and ends it, so
// we can only execute middleware that doesn't write to the response.
middlewareStack.push(
trackKeyRateLimit,
trackRateLimit,
countResponseTokens,
incrementUsage,
logPrompt,
logEvent
logPrompt
);
} else {
middlewareStack.push(
trackKeyRateLimit,
injectProxyInfo,
trackRateLimit,
addProxyInfo,
handleUpstreamErrors,
countResponseTokens,
incrementUsage,
copyHttpHeaders,
saveImage,
logPrompt,
logEvent,
...apiMiddleware
);
}
@@ -141,6 +154,72 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
};
};
async function reenqueueRequest(req: Request) {
req.log.info(
{ key: req.key?.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error`
);
req.retryCount++;
await enqueue(req);
}
/**
* Handles the response from the upstream service and decodes the body if
* necessary. If the response is JSON, it will be parsed and returned as an
* object. Otherwise, it will be returned as a string.
* @throws {Error} Unsupported content-encoding or invalid application/json body
*/
export const decodeResponseBody: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
if (req.isStreaming) {
const err = new Error("decodeResponseBody called for a streaming request.");
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
throw err;
}
return new Promise<string>((resolve, reject) => {
let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => {
let body = Buffer.concat(chunks);
const contentEncoding = proxyRes.headers["content-encoding"];
if (contentEncoding) {
if (isSupportedContentEncoding(contentEncoding)) {
const decoder = DECODER_MAP[contentEncoding];
// @ts-ignore - started failing after upgrading TypeScript, don't care
// as it was never a problem.
body = await decoder(body);
} else {
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
sendProxyError(req, res, 500, "Internal Server Error", {
error,
contentEncoding,
});
return reject(error);
}
}
try {
if (proxyRes.headers["content-type"]?.includes("application/json")) {
const json = JSON.parse(body.toString());
return resolve(json);
}
return resolve(body.toString());
} catch (e) {
const msg = `Proxy received response with invalid JSON: ${e.message}`;
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
return reject(msg);
}
});
});
};
type ProxiedErrorPayload = {
error?: Record<string, any>;
message?: string;
@@ -163,9 +242,15 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
) => {
const statusCode = proxyRes.statusCode || 500;
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
let errorPayload: ProxiedErrorPayload;
if (statusCode < 400) return;
if (statusCode < 400) {
return;
}
let errorPayload: ProxiedErrorPayload;
const tryAgainMessage = keyPool.available(req.body?.model)
? `There may be more keys available for this model; try again in a few seconds.`
: "There are no more keys available for this model.";
try {
assertJsonResponse(body);
@@ -186,13 +271,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
throw new HttpError(statusCode, parseError.message);
}
const service = req.key!.service;
if (service === "gcp") {
if (Array.isArray(errorPayload)) {
errorPayload = errorPayload[0];
}
}
const errorType =
errorPayload.error?.code ||
errorPayload.error?.type ||
@@ -203,23 +281,19 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
`Received error response from upstream. (${proxyRes.statusMessage})`
);
// TODO: split upstream error handling into separate modules for each service,
// this is out of control.
const service = req.key!.service;
if (service === "aws") {
// Try to standardize the error format for AWS
errorPayload.error = { message: errorPayload.message, type: errorType };
delete errorPayload.message;
} else if (service === "gcp") {
// Try to standardize the error format for GCP
if (errorPayload.error?.code) { // GCP Error
errorPayload.error = { message: errorPayload.error.message, type: errorPayload.error.status || errorPayload.error.code };
}
}
if (statusCode === 400) {
// Bad request. For OpenAI, this is usually due to prompt length.
// For Anthropic, this is usually due to missing preamble.
switch (service) {
case "openai":
case "google-ai":
case "mistral-ai":
case "azure":
const filteredCodes = ["content_policy_violation", "content_filter"];
@@ -229,18 +303,14 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
} else if (errorPayload.error?.code === "billing_hard_limit_reached") {
// For some reason, some models return this 400 error instead of the
// same 429 billing error that other models return.
await handleOpenAIRateLimitError(req, errorPayload);
await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
} else {
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
}
break;
case "anthropic":
case "aws":
case "gcp":
await handleAnthropicAwsBadRequestError(req, errorPayload);
break;
case "google-ai":
await handleGoogleAIBadRequestError(req, errorPayload);
await handleAnthropicBadRequestError(req, errorPayload);
break;
default:
assertNever(service);
@@ -248,61 +318,38 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
} else if (statusCode === 401) {
// Key is invalid or was revoked
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
} else if (statusCode === 403) {
switch (service) {
case "anthropic":
if (
errorType === "permission_error" &&
errorPayload.error?.message?.toLowerCase().includes("multimodal")
) {
req.log.warn(
{ key: req.key?.hash },
"This Anthropic key does not support multimodal prompts."
);
keyPool.update(req.key!, { allowsMultimodality: false });
await reenqueueRequest(req);
throw new RetryableError(
"Claude request re-enqueued because key does not support multimodality."
);
} else {
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
}
return;
case "aws":
switch (errorType) {
case "UnrecognizedClientException":
// Key is invalid.
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
break;
case "AccessDeniedException":
const isModelAccessError =
errorPayload.error?.message?.includes(`specified model ID`);
if (!isModelAccessError) {
req.log.error(
{ key: req.key?.hash, model: req.body?.model },
"Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions."
);
keyPool.disable(req.key!, "revoked");
}
errorPayload.proxy_note = `API key doesn't have access to the requested resource. Model ID: ${req.body?.model}`;
break;
default:
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
}
return;
case "mistral-ai":
case "gcp":
if (service === "anthropic") {
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
return;
}
switch (errorType) {
case "UnrecognizedClientException":
// Key is invalid.
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
return;
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
break;
case "AccessDeniedException":
const isModelAccessError =
errorPayload.error?.message?.includes(`specified model ID`);
if (!isModelAccessError) {
req.log.error(
{ key: req.key?.hash, model: req.body?.model },
"Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions."
);
keyPool.disable(req.key!, "revoked");
}
errorPayload.proxy_note = `API key doesn't have access to the requested resource. Model ID: ${req.body?.model}`;
break;
default:
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
}
} else if (statusCode === 429) {
switch (service) {
case "openai":
await handleOpenAIRateLimitError(req, errorPayload);
await handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
break;
case "anthropic":
await handleAnthropicRateLimitError(req, errorPayload);
@@ -310,9 +357,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "aws":
await handleAwsRateLimitError(req, errorPayload);
break;
case "gcp":
await handleGcpRateLimitError(req, errorPayload);
break;
case "azure":
case "mistral-ai":
await handleAzureRateLimitError(req, errorPayload);
@@ -349,9 +393,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "aws":
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
break;
case "gcp":
errorPayload.proxy_note = `The requested GCP resource might not exist, or the key might not have access to it.`;
break;
case "azure":
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
break;
@@ -376,7 +417,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
throw new HttpError(statusCode, errorPayload.error?.message);
};
async function handleAnthropicAwsBadRequestError(
async function handleAnthropicBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
@@ -411,16 +452,14 @@ async function handleAnthropicAwsBadRequestError(
return;
}
const isDisabled =
error?.message?.match(/organization has been disabled/i) ||
error?.message?.match(/^operation not allowed/i);
const isDisabled = error?.message?.match(/organization has been disabled/i);
if (isDisabled) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic/AWS key has been disabled."
"Anthropic key has been disabled."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
errorPayload.proxy_note = `Assigned key has been disabled. ${error?.message}`;
return;
}
@@ -458,21 +497,9 @@ async function handleAwsRateLimitError(
}
}
async function handleGcpRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
if (errorPayload.error?.type === "RESOURCE_EXHAUSTED") {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("GCP rate-limited request re-enqueued.");
} else {
errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from GCP.`;
}
}
async function handleOpenAIRateLimitError(
req: Request,
tryAgainMessage: string,
errorPayload: ProxiedErrorPayload
): Promise<Record<string, any>> {
const type = errorPayload.error?.type;
@@ -481,17 +508,17 @@ async function handleOpenAIRateLimitError(
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
// Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. Please try again.`;
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
break;
case "access_terminated":
// Account banned (key is dead, disable it)
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. Please try again.`;
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`;
break;
case "billing_not_active":
// Key valid but account billing is delinquent
keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. Please try again.`;
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. ${tryAgainMessage}`;
break;
case "requests":
case "tokens":
@@ -556,7 +583,7 @@ async function handleOpenAIRateLimitError(
// keyPool.markRateLimited(req.key!);
// break;
default:
errorPayload.proxy_note = `This is likely a temporary error with the API. Try again in a few seconds.`;
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
break;
}
return errorPayload;
@@ -578,42 +605,6 @@ async function handleAzureRateLimitError(
}
}
//{"error":{"code":400,"message":"API Key not found. Please pass a valid API key.","status":"INVALID_ARGUMENT","details":[{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]}}
//{"error":{"code":400,"message":"Gemini API free tier is not available in your country. Please enable billing on your project in Google AI Studio.","status":"FAILED_PRECONDITION"}}
async function handleGoogleAIBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const error = errorPayload.error || {};
const { message, status, details } = error;
if (status === "INVALID_ARGUMENT") {
const reason = details?.[0]?.reason;
if (reason === "API_KEY_INVALID") {
req.log.warn(
{ key: req.key?.hash, status, reason, msg: error.message },
"Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid.`;
}
} else if (status === "FAILED_PRECONDITION") {
if (message.match(/please enable billing/i)) {
req.log.warn(
{ key: req.key?.hash, status, msg: error.message },
"Cannot use key due to billing restrictions."
);
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key cannot be used.`;
}
} else {
req.log.warn(
{ key: req.key?.hash, status, msg: error.message },
"Received unexpected 400 error from Google AI."
);
}
}
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
async function handleGoogleAIRateLimitError(
req: Request,
@@ -693,7 +684,7 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
}
};
const trackKeyRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
const trackRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
keyPool.updateRateLimits(req.key!, proxyRes.headers);
};
@@ -723,7 +714,7 @@ const copyHttpHeaders: ProxyResHandlerWithBody = async (
* or transformed.
* Only used for non-streaming requests.
*/
const injectProxyInfo: ProxyResHandlerWithBody = async (
const addProxyInfo: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -1,81 +0,0 @@
import { createHash } from "crypto";
import { config } from "../../../config";
import { eventLogger } from "../../../shared/prompt-logging";
import { getModelFromBody, isTextGenerationRequest } from "../common";
import { ProxyResHandlerWithBody } from ".";
import {
OpenAIChatMessage,
AnthropicChatMessage,
} from "../../../shared/api-schemas";
/** If event logging is enabled, logs a chat completion event. */
export const logEvent: ProxyResHandlerWithBody = async (
_proxyRes,
req,
_res,
responseBody
) => {
if (!config.eventLogging) {
return;
}
if (typeof responseBody !== "object") {
throw new Error("Expected body to be an object");
}
if (!["openai", "anthropic-chat"].includes(req.outboundApi)) {
// only chat apis are supported
return;
}
if (!req.user) {
return;
}
const loggable = isTextGenerationRequest(req);
if (!loggable) return;
const messages = req.body.messages as
| OpenAIChatMessage[]
| AnthropicChatMessage[];
let hashes = [];
hashes.push(hashMessages(messages));
for (
let i = 1;
i <= Math.min(config.eventLoggingTrim!, messages.length);
i++
) {
hashes.push(hashMessages(messages.slice(0, -i)));
}
const model = getModelFromBody(req, responseBody);
const userToken = req.user!.token;
const family = req.modelFamily!;
eventLogger.logEvent({
ip: req.ip,
type: "chat_completion",
model,
family,
hashes,
userToken,
inputTokens: req.promptTokens ?? 0,
outputTokens: req.outputTokens ?? 0,
});
};
const hashMessages = (
messages: OpenAIChatMessage[] | AnthropicChatMessage[]
): string => {
let hasher = createHash("sha256");
let messageTexts = [];
for (const msg of messages) {
if (!["system", "user", "assistant"].includes(msg.role)) continue;
if (typeof msg.content === "string") {
messageTexts.push(msg.content);
} else if (Array.isArray(msg.content)) {
if (msg.content[0].type === "text") {
messageTexts.push(msg.content[0].text);
}
}
}
hasher.update(messageTexts.join("<|im_sep|>"));
return hasher.digest("hex");
};
+13 -41
View File
@@ -12,10 +12,10 @@ import { assertNever } from "../../../shared/utils";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../shared/api-schemas";
} from "../../../shared/api-support";
import { APIFormat } from "../../../shared/key-management";
/** If prompt logging is enabled, enqueues the prompt for logging. */
export const logPrompt: ProxyResHandlerWithBody = async (
@@ -36,7 +36,7 @@ export const logPrompt: ProxyResHandlerWithBody = async (
if (!loggable) return;
const promptPayload = getPromptForRequest(req, responseBody);
const promptFlattened = flattenMessages(promptPayload);
const promptFlattened = flattenMessages(promptPayload, req.outboundApi);
const response = getCompletionFromBody(req, responseBody);
const model = getModelFromBody(req, responseBody);
@@ -63,8 +63,7 @@ const getPromptForRequest = (
):
| string
| OpenAIChatMessage[]
| { contents: GoogleAIChatMessage[] }
| { system: string; messages: AnthropicChatMessage[] }
| AnthropicChatMessage[]
| MistralAIChatMessage[]
| OaiImageResult => {
// Since the prompt logger only runs after the request has been proxied, we
@@ -73,12 +72,9 @@ const getPromptForRequest = (
switch (req.outboundApi) {
case "openai":
case "mistral-ai":
return req.body.messages;
case "anthropic-chat":
return { system: req.body.system, messages: req.body.messages };
return req.body.messages;
case "openai-text":
case "anthropic-text":
case "mistral-text":
return req.body.prompt;
case "openai-image":
return {
@@ -88,8 +84,10 @@ const getPromptForRequest = (
quality: req.body.quality,
revisedPrompt: responseBody.data[0].revised_prompt,
};
case "anthropic-text":
return req.body.prompt;
case "google-ai":
return { contents: req.body.contents };
return req.body.prompt.text;
default:
assertNever(req.outboundApi);
}
@@ -100,24 +98,15 @@ const flattenMessages = (
| string
| OaiImageResult
| OpenAIChatMessage[]
| { contents: GoogleAIChatMessage[] }
| { system: string; messages: AnthropicChatMessage[] }
| MistralAIChatMessage[]
| AnthropicChatMessage[]
| MistralAIChatMessage[],
format: APIFormat
): string => {
if (typeof val === "string") {
return val.trim();
}
if (isAnthropicChatPrompt(val)) {
const { system, messages } = val;
return `System: ${system}\n\n${flattenAnthropicMessages(messages)}`;
}
if (isGoogleAIChatPrompt(val)) {
return val.contents
.map(({ parts, role }) => {
const text = parts.map((p) => p.text).join("\n");
return `${role}: ${text}`;
})
.join("\n");
if (format === "anthropic-chat") {
return flattenAnthropicMessages(val as AnthropicChatMessage[]);
}
if (Array.isArray(val)) {
return val
@@ -138,20 +127,3 @@ const flattenMessages = (
}
return val.prompt.trim();
};
function isGoogleAIChatPrompt(
val: unknown
): val is { contents: GoogleAIChatMessage[] } {
return typeof val === "object" && val !== null && "contents" in val;
}
function isAnthropicChatPrompt(
val: unknown
): val is { system: string; messages: AnthropicChatMessage[] } {
return (
typeof val === "object" &&
val !== null &&
"system" in val &&
"messages" in val
);
}
@@ -1,39 +0,0 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralChatCompletionResponse = {
choices: {
index: number;
message: { role: string; content: string };
finish_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralChat(
events: OpenAIChatCompletionStreamEvent[]
): MistralChatCompletionResponse {
let merged: MistralChatCompletionResponse = {
choices: [
{ index: 0, message: { role: "", content: "" }, finish_reason: "" },
],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.choices[0].message.role = event.choices[0].delta.role ?? "assistant";
return acc;
}
acc.choices[0].finish_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.choices[0].message.content += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -1,33 +0,0 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralTextCompletionResponse = {
outputs: {
text: string;
stop_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral text completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralText(
events: OpenAIChatCompletionStreamEvent[]
): MistralTextCompletionResponse {
let merged: MistralTextCompletionResponse = {
outputs: [{ text: "", stop_reason: "" }],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
return acc;
}
acc.outputs[0].text += event.choices[0].delta.content ?? "";
acc.outputs[0].stop_reason = event.choices[0].finish_reason ?? "";
return acc;
}, merged);
return merged;
}
@@ -24,7 +24,7 @@ export function getAwsEventStreamDecoder(params: {
if (eventType === "chunk") {
result = input[eventType];
} else {
// AWS unmarshaller treats non-chunk events (errors and exceptions) oddly.
// AWS unmarshaller treats non-chunk (errors and exceptions) oddly.
result = { [eventType]: input[eventType] } as any;
}
return result;
@@ -1,4 +1,3 @@
import express from "express";
import { APIFormat } from "../../../../shared/key-management";
import { assertNever } from "../../../../shared/utils";
import {
@@ -7,13 +6,8 @@ import {
mergeEventsForAnthropicText,
mergeEventsForOpenAIChat,
mergeEventsForOpenAIText,
mergeEventsForMistralChat,
mergeEventsForMistralText,
AnthropicV2StreamEvent,
OpenAIChatCompletionStreamEvent,
mistralAIToOpenAI,
MistralAIStreamEvent,
MistralChatCompletionEvent,
} from "./index";
/**
@@ -21,70 +15,45 @@ import {
* compiles them into a single finalized response for downstream middleware.
*/
export class EventAggregator {
private readonly model: string;
private readonly requestFormat: APIFormat;
private readonly responseFormat: APIFormat;
private readonly format: APIFormat;
private readonly events: OpenAIChatCompletionStreamEvent[];
constructor({ body, inboundApi, outboundApi }: express.Request) {
constructor({ format }: { format: APIFormat }) {
this.events = [];
this.requestFormat = inboundApi;
this.responseFormat = outboundApi;
this.model = body.model;
this.format = format;
}
addEvent(
event:
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralAIStreamEvent
) {
addEvent(event: OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent) {
if (eventIsOpenAIEvent(event)) {
this.events.push(event);
} else {
// horrible special case. previously all transformers' target format was
// openai, so the event aggregator could conveniently assume all incoming
// events were in openai format.
// now we have added some transformers that convert between non-openai
// formats, so aggregator needs to know how to collapse for more than
// just openai.
// because writing aggregation logic for every possible output format is
// annoying, we will just transform any non-openai output events to openai
// format (even if the client did not request openai at all) so that we
// still only need to write aggregators for openai SSEs.
let openAIEvent: OpenAIChatCompletionStreamEvent | undefined;
switch (this.requestFormat) {
case "anthropic-text":
assertIsAnthropicV2Event(event);
openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "fallback-" + Date.now(),
fallbackModel: event.model || this.model || "fallback-claude-3",
})?.event;
break;
case "mistral-ai":
assertIsMistralChatEvent(event);
openAIEvent = mistralAIToOpenAI({
data: `data: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: "fallback-" + Date.now(),
fallbackModel: this.model || "fallback-mistral",
})?.event;
break;
}
if (openAIEvent) {
this.events.push(openAIEvent);
// now we have added anthropic-chat-to-text, so aggregator needs to know
// how to collapse events from two formats.
// because that is annoying, we will simply transform anthropic events to
// openai (even if the client didn't ask for openai) so we don't have to
// write aggregation logic for anthropic chat (which is also a troublesome
// stateful format).
const openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "event-aggregator-fallback",
fallbackModel: event.model || "claude-3-fallback",
});
if (openAIEvent.event) {
this.events.push(openAIEvent.event);
}
}
}
getFinalResponse() {
switch (this.responseFormat) {
switch (this.format) {
case "openai":
case "google-ai": // TODO: this is probably wrong now that we support native Google Makersuite prompts
case "google-ai":
case "mistral-ai":
return mergeEventsForOpenAIChat(this.events);
case "openai-text":
return mergeEventsForOpenAIText(this.events);
@@ -92,22 +61,12 @@ export class EventAggregator {
return mergeEventsForAnthropicText(this.events);
case "anthropic-chat":
return mergeEventsForAnthropicChat(this.events);
case "mistral-ai":
return mergeEventsForMistralChat(this.events);
case "mistral-text":
return mergeEventsForMistralText(this.events);
case "openai-image":
throw new Error(
`SSE aggregation not supported for ${this.responseFormat}`
);
throw new Error(`SSE aggregation not supported for ${this.format}`);
default:
assertNever(this.responseFormat);
assertNever(this.format);
}
}
hasEvents() {
return this.events.length > 0;
}
}
function eventIsOpenAIEvent(
@@ -115,17 +74,3 @@ function eventIsOpenAIEvent(
): event is OpenAIChatCompletionStreamEvent {
return event?.object === "chat.completion.chunk";
}
function assertIsAnthropicV2Event(event: any): asserts event is AnthropicV2StreamEvent {
if (!event?.completion) {
throw new Error(`Bad event for Anthropic V2 SSE aggregation`);
}
}
function assertIsMistralChatEvent(
event: any
): asserts event is MistralChatCompletionEvent {
if (!event?.choices) {
throw new Error(`Bad event for Mistral SSE aggregation`);
}
}
@@ -7,25 +7,6 @@ export type SSEResponseTransformArgs<S = Record<string, any>> = {
state?: S;
};
export type MistralChatCompletionEvent = {
choices: {
index: number;
message: { role: string; content: string };
stop_reason: string | null;
}[];
};
export type MistralTextCompletionEvent = {
outputs: { text: string; stop_reason: string | null }[];
};
export type MistralAIStreamEvent = {
"amazon-bedrock-invocationMetrics"?: {
inputTokenCount: number;
outputTokenCount: number;
invocationLatency: number;
firstByteLatency: number;
};
} & (MistralChatCompletionEvent | MistralTextCompletionEvent);
export type AnthropicV2StreamEvent = {
log_id?: string;
model?: string;
@@ -60,12 +41,8 @@ export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
export { mistralAIToOpenAI } from "./transformers/mistral-ai-to-openai";
export { mistralTextToMistralChat } from "./transformers/mistral-text-to-mistral-chat";
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
export { mergeEventsForMistralChat } from "./aggregators/mistral-chat";
export { mergeEventsForMistralText } from "./aggregators/mistral-text";
@@ -11,11 +11,8 @@ import {
googleAIToOpenAI,
OpenAIChatCompletionStreamEvent,
openAITextToOpenAIChat,
mistralAIToOpenAI,
mistralTextToMistralChat,
passthroughToOpenAI,
StreamingCompletionTransformer,
MistralChatCompletionEvent,
} from "./index";
type SSEMessageTransformerOptions = TransformOptions & {
@@ -38,9 +35,7 @@ export class SSEMessageTransformer extends Transform {
private readonly inputFormat: APIFormat;
private readonly transformFn: StreamingCompletionTransformer<
// TODO: Refactor transformers to not assume only OpenAI events as output
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralChatCompletionEvent
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
>;
private readonly log;
private readonly fallbackId: string;
@@ -126,17 +121,16 @@ function eventIsOpenAIEvent(
function getTransformer(
responseApi: APIFormat,
version?: string,
// In most cases, we are transforming back to OpenAI. Some responses can be
// translated between two non-OpenAI formats, eg Anthropic Chat -> Anthropic
// Text, or Mistral Text -> Mistral Chat.
// There's only one case where we're not transforming back to OpenAI, which is
// Anthropic Chat response -> Anthropic Text request. This parameter is only
// used for that case.
requestApi: APIFormat = "openai"
): StreamingCompletionTransformer<
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralChatCompletionEvent
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
> {
switch (responseApi) {
case "openai":
case "mistral-ai":
return passthroughToOpenAI;
case "openai-text":
return openAITextToOpenAIChat;
@@ -146,16 +140,10 @@ function getTransformer(
: anthropicV2ToOpenAI;
case "anthropic-chat":
return requestApi === "anthropic-text"
? anthropicChatToAnthropicV2 // User's legacy text prompt was converted to chat, and response must be converted back to text
? anthropicChatToAnthropicV2
: anthropicChatToOpenAI;
case "google-ai":
return googleAIToOpenAI;
case "mistral-ai":
return mistralAIToOpenAI;
case "mistral-text":
return requestApi === "mistral-ai"
? mistralTextToMistralChat // User's chat request was converted to text, and response must be converted back to chat
: mistralAIToOpenAI;
case "openai-image":
throw new Error(`SSE transformation not supported for ${responseApi}`);
default:
@@ -2,8 +2,9 @@ import pino from "pino";
import { Transform, TransformOptions } from "stream";
import { Message } from "@smithy/eventstream-codec";
import { APIFormat } from "../../../../shared/key-management";
import { RetryableError } from "../index";
import { buildSpoofedSSE } from "../error-generator";
import { BadRequestError, RetryableError } from "../../../../shared/errors";
import { BadRequestError } from "../../../../shared/errors";
type SSEStreamAdapterOptions = TransformOptions & {
contentType?: string;
@@ -55,10 +56,8 @@ export class SSEStreamAdapter extends Transform {
if ("completion" in eventObj) {
return ["event: completion", `data: ${event}`].join(`\n`);
} else if (eventObj.type) {
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
} else {
return `data: ${event}`;
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
}
}
// noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
@@ -118,7 +117,7 @@ export class SSEStreamAdapter extends Transform {
try {
const hasParts = candidates[0].content?.parts?.length > 0;
if (hasParts) {
return `data: ${JSON.stringify(data.value ?? data)}`;
return `data: ${JSON.stringify(data)}`;
} else {
this.log.error({ event: data }, "Received bad Google AI event");
return `data: ${buildSpoofedSSE({
@@ -34,7 +34,7 @@ export const anthropicChatToOpenAI: StreamingCompletionTransformer = (
model: params.fallbackModel,
choices: [
{
index: 0,
index: params.index,
delta: { content: deltaEvent.delta.text },
finish_reason: null,
},
@@ -1,76 +0,0 @@
import { logger } from "../../../../../logger";
import { MistralAIStreamEvent, SSEResponseTransformArgs } from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
const log = logger.child({
module: "sse-transformer",
transformer: "mistral-ai-to-openai",
});
export const mistralAIToOpenAI = (params: SSEResponseTransformArgs) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data || rawEvent.data === "[DONE]") {
return { position: -1 };
}
const completionEvent = asCompletion(rawEvent);
if (!completionEvent) {
return { position: -1 };
}
if ("choices" in completionEvent) {
const newChatEvent = {
id: params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: completionEvent.choices[0].index,
delta: { content: completionEvent.choices[0].message.content },
finish_reason: completionEvent.choices[0].stop_reason,
},
],
};
return { position: -1, event: newChatEvent };
} else if ("outputs" in completionEvent) {
const newTextEvent = {
id: params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: 0,
delta: { content: completionEvent.outputs[0].text },
finish_reason: completionEvent.outputs[0].stop_reason,
},
],
};
return { position: -1, event: newTextEvent };
}
// should never happen
return { position: -1 };
};
function asCompletion(event: ServerSentEvent): MistralAIStreamEvent | null {
try {
const parsed = JSON.parse(event.data);
if (
(Array.isArray(parsed.choices) &&
parsed.choices[0].message !== undefined) ||
(Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined)
) {
return parsed;
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Missing required fields");
}
} catch (error) {
log.warn({ error: error.stack, event }, "Received invalid data event");
}
return null;
}
@@ -1,63 +0,0 @@
import {
MistralChatCompletionEvent,
MistralTextCompletionEvent,
StreamingCompletionTransformer,
} from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
import { logger } from "../../../../../logger";
const log = logger.child({
module: "sse-transformer",
transformer: "mistral-text-to-mistral-chat",
});
/**
* Transforms an incoming Mistral Text SSE to an equivalent Mistral Chat SSE.
* This is generally used when a client sends a Mistral Chat prompt, but we
* convert it to Mistral Text before sending it to the API to work around
* some bugs in Mistral/AWS prompt templating. In these cases we need to convert
* the response back to Mistral Chat.
*/
export const mistralTextToMistralChat: StreamingCompletionTransformer<
MistralChatCompletionEvent
> = (params) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data) {
return { position: -1 };
}
const textCompletion = asTextCompletion(rawEvent);
if (!textCompletion) {
return { position: -1 };
}
const chatEvent: MistralChatCompletionEvent = {
choices: [
{
index: 0,
message: { role: "assistant", content: textCompletion.outputs[0].text },
stop_reason: textCompletion.outputs[0].stop_reason,
},
],
};
return { position: -1, event: chatEvent };
};
function asTextCompletion(
event: ServerSentEvent
): MistralTextCompletionEvent | null {
try {
const parsed = JSON.parse(event.data);
if (Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined) {
return parsed;
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Missing required fields");
}
} catch (error: any) {
log.warn({ error: error.stack, event }, "Received invalid data event");
}
return null;
}
+20 -74
View File
@@ -1,4 +1,4 @@
import express, { Request, RequestHandler, Router } from "express";
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
@@ -22,46 +22,27 @@ import {
ProxyResHandlerWithBody,
} from "./middleware/response";
// Mistral can't settle on a single naming scheme and deprecates models within
// months of releasing them so this list is hard to keep up to date. 2024-07-28
// https://docs.mistral.ai/platform/endpoints
export const KNOWN_MISTRAL_AI_MODELS = [
/*
Mistral Nemo
"A 12B model built with the partnership with Nvidia. It is easy to use and a
drop-in replacement in any system using Mistral 7B that it supersedes."
*/
"open-mistral-nemo",
"open-mistral-nemo-2407",
/*
Mistral Large
"Our flagship model with state-of-the-art reasoning, knowledge, and coding
capabilities."
*/
"mistral-large-latest",
"mistral-large-2407",
"mistral-large-2402", // deprecated
/*
Codestral
"A cutting-edge generative model that has been specifically designed and
optimized for code generation tasks, including fill-in-the-middle and code
completion."
note: this uses a separate bidi completion endpoint that is not implemented
*/
"codestral-latest",
"codestral-2405",
/* So-called "Research Models" */
// Mistral 7b (open weight, legacy)
"open-mistral-7b",
"mistral-tiny-2312",
// Mixtral 8x7b (open weight, legacy)
"open-mixtral-8x7b",
"open-mistral-8x22b",
"open-codestral-mamba",
/* Deprecated production models */
"mistral-small-2312",
// Mixtral Small (newer 8x7b, closed weight)
"mistral-small-latest",
"mistral-small-2402",
// Mistral Medium
"mistral-medium-latest",
"mistral-medium-2312",
// Mistral Large
"mistral-large-latest",
"mistral-large-2402",
// Deprecated identifiers (2024-05-01)
"mistral-tiny",
"mistral-tiny-2312",
"mistral-small",
"mistral-medium",
];
let modelsCache: any = null;
@@ -89,7 +70,7 @@ export function generateModelList(models = KNOWN_MISTRAL_AI_MODELS) {
}
const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
if (new Date().getTime() - modelsCacheTime < 1000 * 60){
return res.status(200).json(modelsCache);
}
const result = generateModelList();
@@ -108,24 +89,9 @@ const mistralAIResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-text" && req.outboundApi === "mistral-ai") {
newBody = transformMistralTextToMistralChat(body);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
res.status(200).json({ ...body, proxy: body.proxy });
};
export function transformMistralTextToMistralChat(textBody: any) {
return {
...textBody,
choices: [
{ message: { content: textBody.outputs[0].text, role: "assistant" } },
],
outputs: undefined,
};
}
const mistralAIProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.mistral.ai",
@@ -148,32 +114,12 @@ mistralAIRouter.get("/v1/models", handleModelRequest);
mistralAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{
inApi: "mistral-ai",
outApi: "mistral-ai",
service: "mistral-ai",
},
{ beforeTransform: [detectMistralInputApi] }
),
createPreprocessorMiddleware({
inApi: "mistral-ai",
outApi: "mistral-ai",
service: "mistral-ai",
}),
mistralAIProxy
);
/**
* We can't determine if a request is Mistral text or chat just from the path
* because they both use the same endpoint. We need to check the request body
* for either `messages` or `prompt`.
* @param req
*/
export function detectMistralInputApi(req: Request) {
const { messages, prompt } = req.body;
if (messages) {
req.inboundApi = "mistral-ai";
req.outboundApi = "mistral-ai";
} else if (prompt) {
req.inboundApi = "mistral-text";
req.outboundApi = "mistral-text";
}
}
export const mistralAI = mistralAIRouter;
+12 -30
View File
@@ -28,42 +28,24 @@ import {
// https://platform.openai.com/docs/models/overview
export const KNOWN_OPENAI_MODELS = [
// GPT4o
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
// GPT4o Mini
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
// GPT4 Turbo (superceded by GPT4o)
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09", // gpt4-turbo stable, with vision
"gpt-4-turbo-preview", // alias for latest turbo preview
"gpt-4-0125-preview", // gpt4-turbo preview 2
"gpt-4-1106-preview", // gpt4-turbo preview 1
// Launch GPT4
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0613",
"gpt-4-0314", // legacy
// GPT3.5 Turbo (superceded by GPT4o Mini)
"gpt-4-0314", // EOL 2024-06-13
"gpt-4-32k",
"gpt-4-32k-0314", // EOL 2024-06-13
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125", // latest turbo
"gpt-3.5-turbo-1106", // older turbo
// Text Completion
"gpt-3.5-turbo-0301", // EOL 2024-06-13
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
// Embeddings
"text-embedding-ada-002",
// Known deprecated models
"gpt-4-32k", // alias for 0613
"gpt-4-32k-0314", // EOL 2025-06-06
"gpt-4-32k-0613", // EOL 2025-06-06
"gpt-4-vision-preview", // EOL 2024-12-06
"gpt-4-1106-vision-preview", // EOL 2024-12-06
"gpt-3.5-turbo-0613", // EOL 2024-09-13
"gpt-3.5-turbo-0301", // not on the website anymore, maybe unavailable
"gpt-3.5-turbo-16k", // alias for 0613
"gpt-3.5-turbo-16k-0613", // EOL 2024-09-13
];
let modelsCache: any = null;
+5 -14
View File
@@ -12,7 +12,7 @@
*/
import crypto from "crypto";
import { Handler, Request } from "express";
import type { Handler, Request } from "express";
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
import { keyPool } from "../shared/key-management";
import {
@@ -30,10 +30,10 @@ import { sendErrorToClient } from "./middleware/response/error-generator";
const queue: Request[] = [];
const log = logger.child({ module: "request-queue" });
/** Maximum number of queue slots for individual users. */
const USER_CONCURRENCY_LIMIT = parseInt(process.env.USER_CONCURRENCY_LIMIT ?? "1");
/** Maximum number of queue slots for Agnai.chat requests. */
const AGNAI_CONCURRENCY_LIMIT = USER_CONCURRENCY_LIMIT * 5;
const AGNAI_CONCURRENCY_LIMIT = 5;
/** Maximum number of queue slots for individual users. */
const USER_CONCURRENCY_LIMIT = 1;
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
const MAX_HEARTBEAT_SIZE =
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
@@ -67,7 +67,7 @@ const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
async function enqueue(req: Request) {
export async function enqueue(req: Request) {
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
let isGuest = req.user?.token === undefined;
@@ -136,15 +136,6 @@ async function enqueue(req: Request) {
}
}
export async function reenqueueRequest(req: Request) {
req.log.info(
{ key: req.key?.hash, retryCount: req.retryCount },
`Re-enqueueing request due to retryable error`
);
req.retryCount++;
await enqueue(req);
}
function getQueueForPartition(partition: ModelFamily): Request[] {
return queue
.filter((req) => getModelFamilyForRequest(req) === partition)
+19 -25
View File
@@ -1,55 +1,42 @@
import express from "express";
import { addV1 } from "./add-v1";
import { anthropic } from "./anthropic";
import { aws } from "./aws";
import { azure } from "./azure";
import { checkRisuToken } from "./check-risu-token";
import express, { Request, Response, NextFunction } from "express";
import { gatekeeper } from "./gatekeeper";
import { gcp } from "./gcp";
import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { checkRisuToken } from "./check-risu-token";
import { openai } from "./openai";
import { openaiImage } from "./openai-image";
import { anthropic } from "./anthropic";
import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { aws } from "./aws";
import { azure } from "./azure";
import { sendErrorToClient } from "./middleware/response/error-generator";
const proxyRouter = express.Router();
// Remove `expect: 100-continue` header from requests due to incompatibility
// with node-http-proxy.
proxyRouter.use((req, _res, next) => {
if (req.headers.expect) {
// node-http-proxy does not like it when clients send `expect: 100-continue`
// and will stall. none of the upstream APIs use this header anyway.
delete req.headers.expect;
}
next();
});
// Apply body parsers.
proxyRouter.use(
express.json({ limit: "100mb" }),
express.urlencoded({ extended: true, limit: "100mb" })
);
// Apply auth/rate limits.
proxyRouter.use(gatekeeper);
proxyRouter.use(checkRisuToken);
// Initialize request queue metadata.
proxyRouter.use((req, _res, next) => {
req.startTime = Date.now();
req.retryCount = 0;
next();
});
// Proxy endpoints.
proxyRouter.use("/openai", addV1, openai);
proxyRouter.use("/openai-image", addV1, openaiImage);
proxyRouter.use("/anthropic", addV1, anthropic);
proxyRouter.use("/google-ai", addV1, googleAI);
proxyRouter.use("/mistral-ai", addV1, mistralAI);
proxyRouter.use("/aws", aws);
proxyRouter.use("/gcp/claude", addV1, gcp);
proxyRouter.use("/aws/claude", addV1, aws);
proxyRouter.use("/azure/openai", addV1, azure);
// Redirect browser requests to the homepage.
proxyRouter.get("*", (req, res, next) => {
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
@@ -59,8 +46,7 @@ proxyRouter.get("*", (req, res, next) => {
next();
}
});
// Send a fake client error if user specifies an invalid proxy endpoint.
// Handle 404s.
proxyRouter.use((req, res) => {
sendErrorToClient({
req,
@@ -81,3 +67,11 @@ proxyRouter.use((req, res) => {
});
export { proxyRouter as proxyRouter };
function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/")) {
req.url = `/v1${req.url}`;
}
next();
}
+1 -45
View File
@@ -8,7 +8,6 @@ import pinoHttp from "pino-http";
import os from "os";
import childProcess from "child_process";
import { logger } from "./logger";
import { createBlacklistMiddleware } from "./shared/cidr";
import { setupAssetsDir } from "./shared/file-storage/setup-assets-dir";
import { keyPool } from "./shared/key-management";
import { adminRouter } from "./admin/routes";
@@ -22,7 +21,6 @@ import { init as initUserStore } from "./shared/users/user-store";
import { init as initTokenizers } from "./shared/tokenization";
import { checkOrigin } from "./proxy/check-origin";
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
import { initializeDatabase, getDatabase } from "./shared/database";
const PORT = config.port;
const BIND_ADDRESS = config.bindAddress;
@@ -33,19 +31,13 @@ app.use(
pinoHttp({
quietReqLogger: true,
logger,
autoLogging: {
ignore: ({ url }) => {
const ignoreList = ["/health", "/res", "/user_content"];
return ignoreList.some((path) => (url as string).startsWith(path));
},
},
autoLogging: { ignore: ({ url }) => ["/health"].includes(url as string) },
redact: {
paths: [
"req.headers.cookie",
'res.headers["set-cookie"]',
"req.headers.authorization",
'req.headers["x-api-key"]',
'req.headers["api-key"]',
// Don't log the prompt text on transform errors
"body.messages",
"body.prompt",
@@ -70,32 +62,12 @@ app.set("views", [
]);
app.use("/user_content", express.static(USER_ASSETS_DIR, { maxAge: "2h" }));
app.use(
"/res",
express.static(path.join(__dirname, "..", "public"), {
maxAge: "2h",
etag: false,
})
);
app.get("/health", (_req, res) => res.sendStatus(200));
app.use(cors());
const blacklist = createBlacklistMiddleware("IP_BLACKLIST", config.ipBlacklist);
app.use(blacklist);
app.use(checkOrigin);
app.use("/admin", adminRouter);
app.use((req, _, next) => {
// For whatever reason SillyTavern just ignores the path a user provides
// when using Google AI with reverse proxy. We'll fix it here.
if (req.path.startsWith("/v1beta/models/")) {
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
return next();
}
next();
});
app.use(config.proxyEndpointRoute, proxyRouter);
app.use("/user", userRouter);
if (config.staticServiceInfo) {
@@ -153,8 +125,6 @@ async function start() {
await logQueue.start();
}
await initializeDatabase();
logger.info("Starting request queue...");
startRequestQueue();
@@ -176,20 +146,6 @@ async function start() {
);
}
function cleanup() {
console.log("Shutting down...");
if (config.eventLogging) {
try {
const db = getDatabase();
db.close();
console.log("Closed sqlite database.");
} catch (error) {}
}
process.exit(0);
}
process.on("SIGINT", cleanup);
function registerUncaughtExceptionHandler() {
process.on("uncaughtException", (err: any) => {
logger.error(
+133 -128
View File
@@ -2,7 +2,8 @@ import { config, listConfig } from "./config";
import {
AnthropicKey,
AwsBedrockKey,
GcpKey,
AzureOpenAIKey,
GoogleAIKey,
keyPool,
OpenAIKey,
} from "./shared/key-management";
@@ -10,7 +11,6 @@ import {
AnthropicModelFamily,
assertIsKnownModelFamily,
AwsBedrockModelFamily,
GcpModelFamily,
AzureOpenAIModelFamily,
GoogleAIModelFamily,
LLM_SERVICES,
@@ -24,16 +24,22 @@ import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
import { getUniqueIps } from "./proxy/rate-limit";
import { assertNever } from "./shared/utils";
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
import { MistralAIKey } from "./shared/key-management/mistral-ai/provider";
const CACHE_TTL = 2000;
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
k.service === "azure";
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsGoogleAIKey = (k: KeyPoolKey): k is GoogleAIKey =>
k.service === "google-ai";
const keyIsMistralAIKey = (k: KeyPoolKey): k is MistralAIKey =>
k.service === "mistral-ai";
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
/** Stats aggregated across all keys for a given service. */
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
@@ -45,15 +51,10 @@ type ModelAggregates = {
overQuota?: number;
pozzed?: number;
awsLogged?: number;
// needed to disambugiate aws-claude family's variants
awsClaude2?: number;
awsSonnet3?: number;
awsSonnet3_5?: number;
awsHaiku: number;
gcpSonnet?: number;
gcpSonnet35?: number;
gcpHaiku?: number;
awsSonnet?: number;
awsHaiku?: number;
queued: number;
queueTime: string;
tokens: number;
};
/** All possible combinations of model family and aggregate type. */
@@ -79,16 +80,13 @@ type OpenAIInfo = BaseFamilyInfo & {
overQuotaKeys?: number;
};
type AnthropicInfo = BaseFamilyInfo & {
trialKeys?: number;
prefilledKeys?: number;
overQuotaKeys?: number;
};
type AwsInfo = BaseFamilyInfo & {
privacy?: string;
enabledVariants?: string;
};
type GcpInfo = BaseFamilyInfo & {
enabledVariants?: string;
sonnetKeys?: number;
haikuKeys?: number;
};
// prettier-ignore
@@ -96,11 +94,12 @@ export type ServiceInfo = {
uptime: number;
endpoints: {
openai?: string;
openai2?: string;
anthropic?: string;
"anthropic-claude-3"?: string;
"google-ai"?: string;
"mistral-ai"?: string;
"aws"?: string;
gcp?: string;
aws?: string;
azure?: string;
"openai-image"?: string;
"azure-image"?: string;
@@ -114,7 +113,6 @@ export type ServiceInfo = {
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
& { [f in AwsBedrockModelFamily]?: AwsInfo }
& { [f in GcpModelFamily]?: GcpInfo }
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
@@ -137,10 +135,13 @@ export type ServiceInfo = {
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
openai: {
openai: `%BASE%/openai`,
openai2: `%BASE%/openai/turbo-instruct`,
"openai-image": `%BASE%/openai-image`,
},
anthropic: {
anthropic: `%BASE%/anthropic`,
"anthropic-sonnet (⚠️Temporary: for Claude 3 Sonnet)": `%BASE%/anthropic/sonnet`,
"anthropic-opus (⚠️Temporary: for Claude 3 Opus)": `%BASE%/anthropic/opus`,
},
"google-ai": {
"google-ai": `%BASE%/google-ai`,
@@ -149,11 +150,8 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
"mistral-ai": `%BASE%/mistral-ai`,
},
aws: {
"aws-claude": `%BASE%/aws/claude`,
"aws-mistral": `%BASE%/aws/mistral`,
},
gcp: {
gcp: `%BASE%/gcp/claude`,
aws: `%BASE%/aws/claude`,
"aws-sonnet (⚠️Temporary: for AWS Claude 3 Sonnet)": `%BASE%/aws/claude/sonnet`,
},
azure: {
azure: `%BASE%/azure/openai`,
@@ -161,7 +159,7 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
},
};
const familyStats = new Map<ModelAggregateKey, number>();
const modelStats = new Map<ModelAggregateKey, number>();
const serviceStats = new Map<keyof AllStats, number>();
let cachedInfo: ServiceInfo | undefined;
@@ -178,7 +176,7 @@ export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
.concat("turbo")
);
familyStats.clear();
modelStats.clear();
serviceStats.clear();
keys.forEach(addKeyToAggregates);
@@ -213,8 +211,7 @@ export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
}
function getStatus() {
if (!config.checkKeys)
return "Key checking is disabled. The data displayed are not reliable.";
if (!config.checkKeys) return "Key checking is disabled.";
let unchecked = 0;
for (const service of LLM_SERVICES) {
@@ -297,102 +294,129 @@ function increment<T extends keyof AllStats | ModelAggregateKey>(
) {
map.set(key, (map.get(key) || 0) + delta);
}
const addToService = increment.bind(null, serviceStats);
const addToFamily = increment.bind(null, familyStats);
function addKeyToAggregates(k: KeyPoolKey) {
addToService("proompts", k.promptCount);
addToService("openai__keys", k.service === "openai" ? 1 : 0);
addToService("anthropic__keys", k.service === "anthropic" ? 1 : 0);
addToService("google-ai__keys", k.service === "google-ai" ? 1 : 0);
addToService("mistral-ai__keys", k.service === "mistral-ai" ? 1 : 0);
addToService("aws__keys", k.service === "aws" ? 1 : 0);
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
addToService("azure__keys", k.service === "azure" ? 1 : 0);
increment(serviceStats, "proompts", k.promptCount);
increment(serviceStats, "openai__keys", k.service === "openai" ? 1 : 0);
increment(serviceStats, "anthropic__keys", k.service === "anthropic" ? 1 : 0);
increment(serviceStats, "google-ai__keys", k.service === "google-ai" ? 1 : 0);
increment(
serviceStats,
"mistral-ai__keys",
k.service === "mistral-ai" ? 1 : 0
);
increment(serviceStats, "aws__keys", k.service === "aws" ? 1 : 0);
increment(serviceStats, "azure__keys", k.service === "azure" ? 1 : 0);
let sumTokens = 0;
let sumCost = 0;
const incrementGenericFamilyStats = (f: ModelFamily) => {
const tokens = (k as any)[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
addToFamily(`${f}__tokens`, tokens);
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
};
switch (k.service) {
case "openai":
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
addToService("openai__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__trial`, k.isTrial ? 1 : 0);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "anthropic":
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
addToService("anthropic__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__trial`, k.tier === "free" ? 1 : 0);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
addToFamily(`${f}__pozzed`, k.isPozzed ? 1 : 0);
});
break;
increment(
serviceStats,
"openai__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "azure":
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
});
break;
case "anthropic": {
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
increment(modelStats, `${f}__pozzed`, k.isPozzed ? 1 : 0);
});
increment(
serviceStats,
"anthropic__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
break;
}
case "google-ai": {
if (!keyIsGoogleAIKey(k)) throw new Error("Invalid key type");
const family = "gemini-pro";
sumTokens += k["gemini-proTokens"];
sumCost += getTokenCostUsd(family, k["gemini-proTokens"]);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${family}__tokens`, k["gemini-proTokens"]);
break;
}
case "mistral-ai": {
if (!keyIsMistralAIKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
});
break;
}
case "aws": {
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach(incrementGenericFamilyStats);
if (!k.isDisabled) {
// Don't add revoked keys to available AWS variants
k.modelIds.forEach((id) => {
if (id.includes("claude-3-sonnet")) {
addToFamily(`aws-claude__awsSonnet3`, 1);
} else if (id.includes("claude-3-5-sonnet")) {
addToFamily(`aws-claude__awsSonnet3_5`, 1);
} else if (id.includes("claude-3-haiku")) {
addToFamily(`aws-claude__awsHaiku`, 1);
} else if (id.includes("claude-v2")) {
addToFamily(`aws-claude__awsClaude2`, 1);
}
});
}
const family = "aws-claude";
sumTokens += k["aws-claudeTokens"];
sumCost += getTokenCostUsd(family, k["aws-claudeTokens"]);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${family}__tokens`, k["aws-claudeTokens"]);
increment(modelStats, `${family}__awsSonnet`, k.sonnetEnabled ? 1 : 0);
increment(modelStats, `${family}__awsHaiku`, k.haikuEnabled ? 1 : 0);
// Ignore revoked keys for aws logging stats, but include keys where the
// logging status is unknown.
const countAsLogged =
k.lastChecked && !k.isDisabled && k.awsLoggingStatus === "enabled";
addToFamily(`aws-claude__awsLogged`, countAsLogged ? 1 : 0);
k.lastChecked && !k.isDisabled && k.awsLoggingStatus !== "disabled";
increment(modelStats, `${family}__awsLogged`, countAsLogged ? 1 : 0);
break;
}
case "gcp":
if (!keyIsGcpKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach(incrementGenericFamilyStats);
// TODO: add modelIds to GcpKey
break;
// These services don't have any additional stats to track.
case "azure":
case "google-ai":
case "mistral-ai":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
default:
assertNever(k.service);
}
addToService("tokens", sumTokens);
addToService("tokenCost", sumCost);
increment(serviceStats, "tokens", sumTokens);
increment(serviceStats, "tokenCost", sumCost);
}
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const tokens = familyStats.get(`${family}__tokens`) || 0;
const tokens = modelStats.get(`${family}__tokens`) || 0;
const cost = getTokenCostUsd(family, tokens);
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo = {
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
activeKeys: familyStats.get(`${family}__active`) || 0,
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
activeKeys: modelStats.get(`${family}__active`) || 0,
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
};
// Add service-specific stats to the info object.
@@ -400,8 +424,8 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const service = MODEL_FAMILY_SERVICE[family];
switch (service) {
case "openai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
// Delete trial/revoked keys for non-turbo families.
// Trials are turbo 99% of the time, and if a key is invalid we don't
@@ -412,36 +436,17 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
}
break;
case "anthropic":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
info.prefilledKeys = familyStats.get(`${family}__pozzed`) || 0;
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.prefilledKeys = modelStats.get(`${family}__pozzed`) || 0;
break;
case "aws":
if (family === "aws-claude") {
const logged = familyStats.get(`${family}__awsLogged`) || 0;
const variants = new Set<string>();
if (familyStats.get(`${family}__awsClaude2`) || 0)
variants.add("claude2");
if (familyStats.get(`${family}__awsSonnet3`) || 0)
variants.add("sonnet3");
if (familyStats.get(`${family}__awsSonnet3_5`) || 0)
variants.add("sonnet3.5");
if (familyStats.get(`${family}__awsHaiku`) || 0)
variants.add("haiku");
info.enabledVariants = variants.size
? `${Array.from(variants).join(",")}`
: undefined;
if (logged > 0) {
info.privacy = config.allowAwsLogging
? `AWS logging verification inactive. Prompts could be logged.`
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
}
}
break;
case "gcp":
if (family === "gcp-claude") {
// TODO: implement
info.enabledVariants = "not implemented";
info.sonnetKeys = modelStats.get(`${family}__awsSonnet`) || 0;
info.haikuKeys = modelStats.get(`${family}__awsHaiku`) || 0;
const logged = modelStats.get(`${family}__awsLogged`) || 0;
if (logged > 0) {
info.privacy = config.allowAwsLogging
? `${logged} active keys are potentially logged.`
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
}
break;
}
-68
View File
@@ -1,68 +0,0 @@
import type { Request } from "express";
import { z } from "zod";
import { APIFormat } from "../key-management";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
transformAnthropicTextToAnthropicChat,
transformOpenAIToAnthropicText,
transformOpenAIToAnthropicChat,
} from "./anthropic";
import { OpenAIV1ChatCompletionSchema } from "./openai";
import {
OpenAIV1TextCompletionSchema,
transformOpenAIToOpenAIText,
} from "./openai-text";
import {
OpenAIV1ImagesGenerationSchema,
transformOpenAIToOpenAIImage,
} from "./openai-image";
import {
GoogleAIV1GenerateContentSchema,
transformOpenAIToGoogleAI,
} from "./google-ai";
import {
MistralAIV1ChatCompletionsSchema,
MistralAIV1TextCompletionsSchema,
transformMistralChatToText,
} from "./mistral-ai";
export { OpenAIChatMessage } from "./openai";
export {
AnthropicChatMessage,
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
flattenAnthropicMessages,
} from "./anthropic";
export { GoogleAIChatMessage } from "./google-ai";
export { MistralAIChatMessage } from "./mistral-ai";
type APIPair = `${APIFormat}->${APIFormat}`;
type TransformerMap = {
[key in APIPair]?: APIFormatTransformer<any>;
};
export type APIFormatTransformer<Z extends z.ZodType<any, any>> = (
req: Request
) => Promise<z.infer<Z>>;
export const API_REQUEST_TRANSFORMERS: TransformerMap = {
"anthropic-text->anthropic-chat": transformAnthropicTextToAnthropicChat,
"openai->anthropic-chat": transformOpenAIToAnthropicChat,
"openai->anthropic-text": transformOpenAIToAnthropicText,
"openai->openai-text": transformOpenAIToOpenAIText,
"openai->openai-image": transformOpenAIToOpenAIImage,
"openai->google-ai": transformOpenAIToGoogleAI,
"mistral-ai->mistral-text": transformMistralChatToText,
};
export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
"anthropic-chat": AnthropicV1MessagesSchema,
"anthropic-text": AnthropicV1TextSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
"mistral-text": MistralAIV1TextCompletionsSchema,
};
-164
View File
@@ -1,164 +0,0 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
import { Template } from "@huggingface/jinja";
import { APIFormatTransformer } from "./index";
import { logger } from "../../logger";
const MistralChatMessageSchema = z.object({
role: z.enum(["system", "user", "assistant", "tool"]), // TODO: implement tools
content: z.string(),
prefix: z.boolean().optional(),
});
const MistralMessagesSchema = z.array(MistralChatMessageSchema).refine(
(input) => {
const prefixIdx = input.findIndex((msg) => Boolean(msg.prefix));
if (prefixIdx === -1) return true; // no prefix messages
const lastIdx = input.length - 1;
const lastMsg = input[lastIdx];
return prefixIdx === lastIdx && lastMsg.role === "assistant";
},
{
message:
"`prefix` can only be set to `true` on the last message, and only for an assistant message.",
}
);
// https://docs.mistral.ai/api#operation/createChatCompletion
const BaseMistralAIV1CompletionsSchema = z.object({
model: z.string(),
messages: MistralMessagesSchema.optional(),
prompt: z.string().optional(),
temperature: z.number().optional().default(0.7),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
// Mistral docs say that `stop` can be a string or array but AWS Mistral
// blows up if a string is passed. We must convert it to an array.
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
random_seed: z.number().int().min(0).optional(),
response_format: z.enum(["text", "json_object"]).optional().default("text"),
safe_prompt: z.boolean().optional().default(false),
});
export const MistralAIV1ChatCompletionsSchema =
BaseMistralAIV1CompletionsSchema.and(
z.object({ messages: MistralMessagesSchema })
);
export const MistralAIV1TextCompletionsSchema =
BaseMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
/*
Slightly more strict version that only allows a subset of the parameters. AWS
Mistral helpfully returns no details if unsupported parameters are passed so
this list comes from trial and error as of 2024-08-12.
*/
const BaseAWSMistralAIV1CompletionsSchema =
BaseMistralAIV1CompletionsSchema.pick({
temperature: true,
top_p: true,
max_tokens: true,
stop: true,
random_seed: true,
// response_format: true,
// safe_prompt: true,
}).strip();
export const AWSMistralV1ChatCompletionsSchema =
BaseAWSMistralAIV1CompletionsSchema.and(
z.object({ messages: MistralMessagesSchema })
);
export const AWSMistralV1TextCompletionsSchema =
BaseAWSMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
export type MistralAIChatMessage = z.infer<typeof MistralChatMessageSchema>;
export function fixMistralPrompt(
messages: MistralAIChatMessage[]
): MistralAIChatMessage[] {
// Mistral uses OpenAI format but has some additional requirements:
// - Only one system message per request, and it must be the first message if
// present.
// - Final message must be a user message, unless it has `prefix: true`.
// - Cannot have multiple messages from the same role in a row.
// While frontends should be able to handle this, we can fix it here in the
// meantime.
const fixed = messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
if (acc.length === 0) {
acc.push(msg);
return acc;
}
const copy = { ...msg };
// Reattribute subsequent system messages to the user
if (msg.role === "system") {
copy.role = "user";
}
// Consolidate multiple messages from the same role
const last = acc[acc.length - 1];
if (last.role === copy.role) {
last.content += "\n\n" + copy.content;
} else {
acc.push(copy);
}
return acc;
}, []);
// If the last message is an assistant message, mark it as a prefix. An
// assistant message at the end of the conversation without `prefix: true`
// results in an error.
if (fixed[fixed.length - 1].role === "assistant") {
fixed[fixed.length - 1].prefix = true;
}
return fixed;
}
let jinjaTemplate: Template;
let renderTemplate: (messages: MistralAIChatMessage[]) => string;
function renderMistralPrompt(messages: MistralAIChatMessage[]) {
if (!jinjaTemplate) {
logger.warn("Lazy loading mistral chat template...");
const { chatTemplate, bosToken, eosToken } =
require("./templates/mistral-template").MISTRAL_TEMPLATE;
jinjaTemplate = new Template(chatTemplate);
renderTemplate = (messages) =>
jinjaTemplate.render({
messages,
bos_token: bosToken,
eos_token: eosToken,
});
}
return renderTemplate(messages);
}
/**
* Attempts to convert a Mistral chat completions request to a text completions,
* using the official prompt template published by Mistral.
*/
export const transformMistralChatToText: APIFormatTransformer<
typeof MistralAIV1TextCompletionsSchema
> = async (req) => {
const { body } = req;
const result = MistralAIV1ChatCompletionsSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid Mistral chat completions request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = renderMistralPrompt(messages);
return { ...rest, prompt, messages: undefined };
};
-58
View File
@@ -1,58 +0,0 @@
import { z } from "zod";
import {
flattenOpenAIChatMessages,
OpenAIV1ChatCompletionSchema,
} from "./openai";
import { APIFormatTransformer } from "./index";
export const OpenAIV1TextCompletionSchema = z
.object({
model: z
.string()
.max(100)
.regex(
/^gpt-3.5-turbo-instruct/,
"Model must start with 'gpt-3.5-turbo-instruct'"
),
prompt: z.string({
required_error:
"No `prompt` found. Ensure you've set the correct completion endpoint.",
}),
logprobs: z.number().int().nullish().default(null),
echo: z.boolean().optional().default(false),
best_of: z.literal(1).optional(),
stop: z
.union([z.string().max(500), z.array(z.string().max(500)).max(4)])
.optional(),
suffix: z.string().max(1000).optional(),
})
.strip()
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true, logprobs: true }));
export const transformOpenAIToOpenAIText: APIFormatTransformer<
typeof OpenAIV1TextCompletionSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-text request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAIChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
const transformed = { ...rest, prompt: prompt, stop: stops };
return OpenAIV1TextCompletionSchema.parse(transformed);
};
@@ -1,36 +0,0 @@
export const MISTRAL_TEMPLATE = {
bosToken: "<s>",
eosToken: "</s>",
chatTemplate: `"{%- if messages[0]["role"] == "system" %}
{%- set system_message = messages[0]["content"] %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set loop_messages = messages %}
{%- endif %}
{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
{%- for message in loop_messages %}
{%- if (message["role"] == "user") != (loop.index0 % 2 == 0) %}
{{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif %}
{%- endfor %}
{{- bos_token }}
{%- for message in loop_messages %}
{%- if message["role"] == "user" %}
{%- if loop.last and system_message is defined %}
{{- "[INST] " + system_message + "\\n\\n" + message["content"] + "[/INST]" }}
{%- else %}
{{- "[INST] " + message["content"] + "[/INST]" }}
{%- endif %}
{%- elif message["role"] == "assistant" %}
{%- if loop.last and message.prefix is defined and message.prefix %}
{{- " " + message["content"] }}
{%- else %}
{{- " " + message["content"] + eos_token}}
{%- endif %}
{%- else %}
{{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
{%- endif %}
{%- endfor %}`,
};
+84
View File
@@ -0,0 +1,84 @@
import type { Request, Response } from "express";
import { z } from "zod";
import { APIFormat } from "../key-management";
import { AnthropicV1MessagesSchema } from "./kits/anthropic-chat/schema";
import { AnthropicV1TextSchema } from "./kits/anthropic-text/schema";
import { transformOpenAIToAnthropicText } from "./kits/anthropic-text/request-transformers";
import {
transformAnthropicTextToAnthropicChat,
transformOpenAIToAnthropicChat,
} from "./kits/anthropic-chat/request-transformers";
import { GoogleAIV1GenerateContentSchema } from "./kits/google-ai/schema";
import { transformOpenAIToGoogleAI } from "./kits/google-ai/request-transformers";
import { MistralAIV1ChatCompletionsSchema } from "./kits/mistral-ai/schema";
import { OpenAIV1ChatCompletionSchema } from "./kits/openai/schema";
import { OpenAIV1ImagesGenerationSchema } from "./kits/openai-image/schema";
import { transformOpenAIToOpenAIImage } from "./kits/openai-image/request-transformers";
import { OpenAIV1TextCompletionSchema } from "./kits/openai-text/schema";
import { transformOpenAIToOpenAIText } from "./kits/openai-text/request-transformers";
export type APIRequestTransformer<Z extends z.ZodType<any, any>> = (
req: Request
) => Promise<z.infer<Z>>;
export type APIResponseTransformer<Z extends z.ZodType<any, any>> = (
res: Response
) => Promise<z.infer<Z>>;
/** Represents a transformation from one API format to another. */
type APITransformation = `${APIFormat}->${APIFormat}`;
type APIRequestTransformerMap = {
[key in APITransformation]?: APIRequestTransformer<any>;
};
type APIResponseTransformerMap = {
[key in APITransformation]?: APIResponseTransformer<any>;
};
export const API_REQUEST_TRANSFORMERS: APIRequestTransformerMap = {
"anthropic-text->anthropic-chat": transformAnthropicTextToAnthropicChat,
"openai->anthropic-chat": transformOpenAIToAnthropicChat,
"openai->anthropic-text": transformOpenAIToAnthropicText,
"openai->openai-text": transformOpenAIToOpenAIText,
"openai->openai-image": transformOpenAIToOpenAIImage,
"openai->google-ai": transformOpenAIToGoogleAI,
};
export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
"anthropic-chat": AnthropicV1MessagesSchema,
"anthropic-text": AnthropicV1TextSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
};
export { AnthropicChatMessage } from "./kits/anthropic-chat/schema";
export { AnthropicV1MessagesSchema } from "./kits/anthropic-chat/schema";
export { AnthropicV1TextSchema } from "./kits/anthropic-text/schema";
export interface APIFormatKit<T extends APIFormat, P> {
name: T;
/** Zod schema for validating requests in this format. */
requestValidator: z.ZodSchema<any>;
/** Flattens non-sting prompts (such as message arrays) into a single string. */
promptStringifier: (prompt: P) => string;
/** Counts the number of tokens in a prompt. */
promptTokenCounter: (prompt: P, model: string) => Promise<number>;
/** Counts the number of tokens in a completion. */
completionTokenCounter: (
completion: string,
model: string
) => Promise<number>;
/** Functions which transform requests from other formats into this format. */
requestTransformers: APIRequestTransformerMap;
/** Functions which transform responses from this format into other formats. */
responseTransformers: APIResponseTransformerMap;
}
export { GoogleAIChatMessage } from "./kits/google-ai";
export { MistralAIChatMessage } from "./kits/mistral-ai";
export { OpenAIChatMessage } from "./kits/openai/schema";
export { flattenAnthropicMessages } from "./kits/anthropic-chat/stringifier";
+4
View File
@@ -0,0 +1,4 @@
# API Kits
This directory contains "kits" for each supported language model API. Each kit implements the `APIFormatKit` interface and provides functionality that the proxy application needs to be able to validate requests, transform prompts and responses, tokenize text, and so forth.
## Structure
@@ -1,98 +1,23 @@
import { z } from "zod";
import { config } from "../../config";
import { BadRequestError } from "../errors";
import {
flattenOpenAIMessageContent,
OpenAIChatMessage,
OpenAIV1ChatCompletionSchema,
} from "./openai";
import { APIFormatTransformer } from "./index";
import { AnthropicChatMessage, AnthropicV1MessagesSchema } from "./schema";
import { AnthropicV1TextSchema, APIRequestTransformer, OpenAIChatMessage } from "../../index";
import { BadRequestError } from "../../../errors";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
const AnthropicV1BaseSchema = z
.object({
model: z.string().max(100),
stop_sequences: z.array(z.string().max(500)).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.object({ user_id: z.string().optional() }).optional(),
})
.strip();
/**
* Represents the union of all content types without the `string` shorthand
* for `text` content.
*/
type AnthropicChatMessageContentWithoutString = Exclude<
AnthropicChatMessage["content"],
string
>;
/** Represents a message with all shorthand `string` content expanded. */
type ConvertedAnthropicChatMessage = AnthropicChatMessage & {
content: AnthropicChatMessageContentWithoutString;
};
// https://docs.anthropic.com/claude/reference/complete_post [deprecated]
export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
z.object({
prompt: z.string(),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
})
);
const AnthropicV1MessageMultimodalContentSchema = z.array(
z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
}),
])
);
// https://docs.anthropic.com/claude/reference/messages_post
export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
z.object({
messages: z.array(
z.object({
role: z.enum(["user", "assistant"]),
content: z.union([
z.string(),
AnthropicV1MessageMultimodalContentSchema,
]),
})
),
max_tokens: z
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
system: z.string().optional(),
})
);
export type AnthropicChatMessage = z.infer<
typeof AnthropicV1MessagesSchema
>["messages"][0];
function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
const name = m.name?.trim();
const content = flattenOpenAIMessageContent(m.content);
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${name ? `(as ${name}) ` : ""}${content}`;
})
.join("") + "\n\nAssistant:"
);
}
export const transformOpenAIToAnthropicChat: APIFormatTransformer<
export const transformOpenAIToAnthropicChat: APIRequestTransformer<
typeof AnthropicV1MessagesSchema
> = async (req) => {
const { body } = req;
@@ -119,8 +44,7 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
stop_sequences:
typeof rest.stop === "string" ? [rest.stop] : rest.stop || undefined,
stop_sequences: typeof rest.stop === "string" ? [rest.stop] : rest.stop,
...(rest.user ? { metadata: { user_id: rest.user } } : {}),
// Anthropic supports top_k, but OpenAI does not
// OpenAI supports frequency_penalty, presence_penalty, logit_bias, n, seed,
@@ -128,53 +52,11 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
};
};
export const transformOpenAIToAnthropicText: APIFormatTransformer<
typeof AnthropicV1TextSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic Text request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudeTextPrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
model: rest.model,
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
};
};
/**
* Converts an older Anthropic Text Completion prompt to the newer Messages API
* by splitting the flat text into messages.
*/
export const transformAnthropicTextToAnthropicChat: APIFormatTransformer<
export const transformAnthropicTextToAnthropicChat: APIRequestTransformer<
typeof AnthropicV1MessagesSchema
> = async (req) => {
const { body } = req;
@@ -256,39 +138,6 @@ function validateAnthropicTextPrompt(prompt: string) {
}
}
export function flattenAnthropicMessages(
messages: AnthropicChatMessage[]
): string {
return messages
.map((msg) => {
const name = msg.role === "user" ? "Human" : "Assistant";
const parts = Array.isArray(msg.content)
? msg.content
: [{ type: "text", text: msg.content }];
return `${name}: ${parts
.map((part) =>
part.type === "text"
? part.text
: `[Omitted multimodal content of type ${part.type}]`
)
.join("\n")}`;
})
.join("\n\n");
}
/**
* Represents the union of all content types without the `string` shorthand
* for `text` content.
*/
type AnthropicChatMessageContentWithoutString = Exclude<
AnthropicChatMessage["content"],
string
>;
/** Represents a message with all shorthand `string` content expanded. */
type ConvertedAnthropicChatMessage = AnthropicChatMessage & {
content: AnthropicChatMessageContentWithoutString;
};
function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
messages: AnthropicChatMessage[];
system: string;
@@ -439,10 +288,3 @@ function convertOpenAIContent(
}
});
}
export function containsImageContent(messages: AnthropicChatMessage[]) {
return messages.some(
({ content }) =>
typeof content !== "string" && content.some((c) => c.type === "image")
);
}
@@ -0,0 +1,52 @@
import { z } from "zod";
import { config } from "../../../../config";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
export const AnthropicV1BaseSchema = z
.object({
model: z.string().max(100),
stop_sequences: z.array(z.string().max(500)).optional(),
stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1),
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.object({ user_id: z.string().optional() }).optional(),
})
.strip();
const AnthropicV1MessageMultimodalContentSchema = z.array(
z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
}),
])
);
// https://docs.anthropic.com/claude/reference/messages_post
export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
z.object({
messages: z.array(
z.object({
role: z.enum(["user", "assistant"]),
content: z.union([
z.string(),
AnthropicV1MessageMultimodalContentSchema,
]),
})
),
max_tokens: z
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
system: z.string().optional(),
})
);
export type AnthropicChatMessage = z.infer<
typeof AnthropicV1MessagesSchema
>["messages"][0];
@@ -0,0 +1,21 @@
import { AnthropicChatMessage } from "./schema";
export function flattenAnthropicMessages(
messages: AnthropicChatMessage[]
): string {
return messages
.map((msg) => {
const name = msg.role === "user" ? "\n\nHuman: " : "\n\nAssistant: ";
const parts = Array.isArray(msg.content)
? msg.content
: [{ type: "text", text: msg.content }];
return `${name}: ${parts
.map((part) =>
part.type === "text"
? part.text
: `[Omitted multimodal content of type ${part.type}]`
)
.join("\n")}`;
})
.join("\n\n");
}
@@ -0,0 +1,73 @@
import {
AnthropicV1TextSchema,
APIRequestTransformer,
OpenAIChatMessage,
} from "../../index";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
import { flattenOpenAIMessageContent } from "../openai/stringifier";
export const transformOpenAIToAnthropicText: APIRequestTransformer<
typeof AnthropicV1TextSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Anthropic Text request"
);
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudeTextPrompt(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
// Recommended by Anthropic
stops.push("\n\nHuman:");
// Helps with jailbreak prompts that send fake system messages and multi-bot
// chats that prefix bot messages with "System: Respond as <bot name>".
stops.push("\n\nSystem:");
// Remove duplicates
stops = [...new Set(stops)];
return {
model: rest.model,
prompt: prompt,
max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops,
stream: rest.stream,
temperature: rest.temperature,
top_p: rest.top_p,
};
};
function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
return (
messages
.map((m) => {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
}
const name = m.name?.trim();
const content = flattenOpenAIMessageContent(m.content);
// https://console.anthropic.com/docs/prompt-design
// `name` isn't supported by Anthropic but we can still try to use it.
return `\n\n${role}: ${name ? `(as ${name}) ` : ""}${content}`;
})
.join("") + "\n\nAssistant:"
);
}
@@ -0,0 +1,16 @@
import { z } from "zod";
import { AnthropicV1BaseSchema } from "../anthropic-chat/schema";
import { config } from "../../../../config";
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
// https://docs.anthropic.com/claude/reference/complete_post [deprecated]
export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
z.object({
prompt: z.string(),
max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
})
);
@@ -0,0 +1 @@
export { GoogleAIChatMessage } from "./schema";
@@ -1,44 +1,11 @@
import { z } from "zod";
import {
flattenOpenAIMessageContent,
OpenAIV1ChatCompletionSchema,
} from "./openai";
import { APIFormatTransformer } from "./index";
import { APIRequestTransformer, GoogleAIChatMessage } from "../../index";
import { GoogleAIV1GenerateContentSchema } from "./schema";
const GoogleAIV1ContentSchema = z.object({
parts: z.array(z.object({ text: z.string() })), // TODO: add other media types
role: z.enum(["user", "model"]).optional(),
});
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
export const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
contents: z.array(GoogleAIV1ContentSchema),
tools: z.array(z.object({})).max(0).optional(),
safetySettings: z.array(z.object({})).optional(),
systemInstruction: GoogleAIV1ContentSchema.optional(),
generationConfig: z.object({
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 4096)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
}),
})
.strip();
export type GoogleAIChatMessage = z.infer<
typeof GoogleAIV1GenerateContentSchema
>["contents"][0];
import { flattenOpenAIMessageContent } from "../openai/stringifier";
export const transformOpenAIToGoogleAI: APIFormatTransformer<
export const transformOpenAIToGoogleAI: APIRequestTransformer<
typeof GoogleAIV1GenerateContentSchema
> = async (req) => {
const { body } = req;
@@ -104,7 +71,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
stops = [...new Set(stops)].slice(0, 5);
return {
model: req.body.model,
model: "gemini-pro",
stream: rest.stream,
contents,
tools: [],
@@ -0,0 +1,34 @@
import { z } from "zod";
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
export const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
contents: z.array(
z.object({
parts: z.array(z.object({ text: z.string() })),
role: z.enum(["user", "model"]),
})
),
tools: z.array(z.object({})).max(0).optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
generationConfig: z.object({
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
}),
})
.strip();
export type GoogleAIChatMessage = z.infer<
typeof GoogleAIV1GenerateContentSchema
>["contents"][0];
@@ -0,0 +1 @@
export { MistralAIChatMessage } from "./schema";
@@ -0,0 +1,35 @@
import { MistralAIChatMessage } from "./schema";
export function fixMistralPrompt(
messages: MistralAIChatMessage[]
): MistralAIChatMessage[] {
// Mistral uses OpenAI format but has some additional requirements:
// - Only one system message per request, and it must be the first message if
// present.
// - Final message must be a user message.
// - Cannot have multiple messages from the same role in a row.
// While frontends should be able to handle this, we can fix it here in the
// meantime.
return messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
if (acc.length === 0) {
acc.push(msg);
return acc;
}
const copy = { ...msg };
// Reattribute subsequent system messages to the user
if (msg.role === "system") {
copy.role = "user";
}
// Consolidate multiple messages from the same role
const last = acc[acc.length - 1];
if (last.role === copy.role) {
last.content += "\n\n" + copy.content;
} else {
acc.push(copy);
}
return acc;
}, []);
}
@@ -0,0 +1,28 @@
// https://docs.mistral.ai/api#operation/createChatCompletion
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "../openai/schema";
export const MistralAIV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
})
),
temperature: z.number().optional().default(0.7),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
safe_prompt: z.boolean().optional().default(false),
random_seed: z.number().int().optional(),
});
export type MistralAIChatMessage = z.infer<
typeof MistralAIV1ChatCompletionsSchema
>["messages"][0];
@@ -1,26 +1,9 @@
import { z } from "zod";
import { OpenAIV1ChatCompletionSchema } from "./openai";
import { APIFormatTransformer } from "./index";
/* Takes the last chat message and uses it verbatim as the image prompt. */
import { APIRequestTransformer } from "../../index";
import { OpenAIV1ImagesGenerationSchema } from "./schema";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
// https://platform.openai.com/docs/api-reference/images/create
export const OpenAIV1ImagesGenerationSchema = z
.object({
prompt: z.string().max(4000),
model: z.string().max(100).optional(),
quality: z.enum(["standard", "hd"]).optional().default("standard"),
n: z.number().int().min(1).max(4).optional().default(1),
response_format: z.enum(["url", "b64_json"]).optional(),
size: z
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
.optional()
.default("1024x1024"),
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
user: z.string().max(500).optional(),
})
.strip();
// Takes the last chat message and uses it verbatim as the image prompt.
export const transformOpenAIToOpenAIImage: APIFormatTransformer<
export const transformOpenAIToOpenAIImage: APIRequestTransformer<
typeof OpenAIV1ImagesGenerationSchema
> = async (req) => {
const { body } = req;
@@ -0,0 +1,18 @@
// https://platform.openai.com/docs/api-reference/images/create
import { z } from "zod";
export const OpenAIV1ImagesGenerationSchema = z
.object({
prompt: z.string().max(4000),
model: z.string().max(100).optional(),
quality: z.enum(["standard", "hd"]).optional().default("standard"),
n: z.number().int().min(1).max(4).optional().default(1),
response_format: z.enum(["url", "b64_json"]).optional(),
size: z
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
.optional()
.default("1024x1024"),
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
user: z.string().max(500).optional(),
})
.strip();
@@ -0,0 +1,33 @@
import { APIRequestTransformer } from "../../index";
import { OpenAIV1TextCompletionSchema } from "./schema";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
import { flattenOpenAIChatMessages } from "../openai/stringifier";
export const transformOpenAIToOpenAIText: APIRequestTransformer<
typeof OpenAIV1TextCompletionSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-OpenAI-text request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
const prompt = flattenOpenAIChatMessages(messages);
let stops = rest.stop
? Array.isArray(rest.stop)
? rest.stop
: [rest.stop]
: [];
stops.push("\n\nUser:");
stops = [...new Set(stops)];
const transformed = { ...rest, prompt: prompt, stop: stops };
return OpenAIV1TextCompletionSchema.parse(transformed);
};
@@ -0,0 +1,26 @@
import { z } from "zod";
import { OpenAIV1ChatCompletionSchema } from "../openai/schema";
export const OpenAIV1TextCompletionSchema = z
.object({
model: z
.string()
.max(100)
.regex(
/^gpt-3.5-turbo-instruct/,
"Model must start with 'gpt-3.5-turbo-instruct'"
),
prompt: z.string({
required_error:
"No `prompt` found. Ensure you've set the correct completion endpoint.",
}),
logprobs: z.number().int().nullish().default(null),
echo: z.boolean().optional().default(false),
best_of: z.literal(1).optional(),
stop: z
.union([z.string().max(500), z.array(z.string().max(500)).max(4)])
.optional(),
suffix: z.string().max(1000).optional(),
})
.strip()
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true, logprobs: true }));
@@ -0,0 +1,13 @@
import { APIFormatKit } from "../../index";
import { OpenAIChatMessage, OpenAIV1ChatCompletionSchema } from "./schema";
import { flattenOpenAIChatMessages } from "./stringifier";
import { getOpenAITokenCount } from "./tokenizer";
const kit: APIFormatKit<"openai", OpenAIChatMessage[]> = {
name: "openai",
requestValidator: OpenAIV1ChatCompletionSchema,
// We never transform from other formats into OpenAI format.
requestTransformers: {},
promptStringifier: flattenOpenAIChatMessages,
promptTokenCounter: getOpenAITokenCount,
};

Some files were not shown because too many files have changed in this diff Show More