Compare commits
167 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 94d4efe9bb | |||
| 12276a1f59 | |||
| fdd824f0e4 | |||
| fbdea30264 | |||
| cd1b9d0e0c | |||
| 9e61d9029f | |||
| f95e24afbb | |||
| f29049f993 | |||
| 7f2f324e26 | |||
| dc61291933 | |||
| 6c02e9b265 | |||
| e018672968 | |||
| bfd7e23124 | |||
| 6aa6bebf08 | |||
| 6acdf35914 | |||
| 3de79873e9 | |||
| 3aca9e90f0 | |||
| 5fabe1d1f8 | |||
| 4a68c14477 | |||
| 20c064394a | |||
| 3ea23760c3 | |||
| 5db07404f2 | |||
| c453a5f2ad | |||
| c7a095d345 | |||
| e9110611fa | |||
| 79e1fe09e4 | |||
| 08b2196bfb | |||
| 350d6542cf | |||
| c9c24f86bb | |||
| b6f8f15a1f | |||
| 5467136c1a | |||
| 0d5dfeccf8 | |||
| b615ffa433 | |||
| a27163a629 | |||
| 5a8fb3aff6 | |||
| 51dd0c71ba | |||
| 89e1ed46d5 | |||
| 26dc79c8f1 | |||
| 89e9b67f3f | |||
| 52ec2ec265 | |||
| 8bd2f749c1 | |||
| ff27ca3780 | |||
| 41a463d2c8 | |||
| 3f7e50f87e | |||
| f6cfc6e882 | |||
| af4d8dae40 | |||
| 725fd6e6f1 | |||
| c87484f1ff | |||
| 15a2cb5a26 | |||
| c8182cea17 | |||
| b06d48e1f8 | |||
| 140bdea14e | |||
| 12f78fa1f2 | |||
| daf6a123d5 | |||
| 4e05b01e90 | |||
| 5033d00444 | |||
| ba0b20617e | |||
| 4a5fd91da3 | |||
| ecf897e685 | |||
| 6a3d753f0d | |||
| 0bf2f5c123 | |||
| ede274c117 | |||
| d2267beb18 | |||
| 0837c89a42 | |||
| f67560a17b | |||
| e13361a323 | |||
| fa4bf468d2 | |||
| 7e681a7bef | |||
| 1b0106a1ea | |||
| f5521aa6c3 | |||
| f8b480f4c2 | |||
| 1f35fe1ae1 | |||
| 35b44e1c6b | |||
| 075e415343 | |||
| ec4f7e845b | |||
| 8923bb76a0 | |||
| 35a6c393ed | |||
| ef554f8e06 | |||
| 624973fc82 | |||
| c6453638e9 | |||
| 40e71435f0 | |||
| 5e57dbb8f1 | |||
| 201f71a989 | |||
| 66f1d809ec | |||
| 437fe1e720 | |||
| 404ce4fc80 | |||
| 95d2369acc | |||
| 2a453ab657 | |||
| 5728e235dc | |||
| 7b3d6efb02 | |||
| 63542bfabb | |||
| a558920ccf | |||
| 6afb62fef6 | |||
| 0e325e89e0 | |||
| f05e196994 | |||
| 435b46ad4d | |||
| 980abcc01f | |||
| fe0f04ceb8 | |||
| 4b32130eaa | |||
| ffc0c6472e | |||
| 2c0a659b2d | |||
| bed275a195 | |||
| 7cab0a5c52 | |||
| 27a1181752 | |||
| 85aeeb2c05 | |||
| 8d557c844e | |||
| 0a52ec478f | |||
| e462ad585e | |||
| 4d781e1720 | |||
| 3c56103de0 | |||
| bb78a399eb | |||
| 09416c0b90 | |||
| abb30d3608 | |||
| 6833736392 | |||
| 7c9c3a640c | |||
| cb780e85da | |||
| 785b1f69f3 | |||
| c05bfefba4 | |||
| 9b184ab245 | |||
| 6bb67281d9 | |||
| 5d3fb6af3a | |||
| 268165e2be | |||
| 6f4e581bf2 | |||
| 358339d48b | |||
| c8d8e2e58f | |||
| d1d83b41fa | |||
| 81ceee7897 | |||
| dc32e41ab5 | |||
| 21ee00f057 | |||
| 97a2b6b479 | |||
| 61d90f3f3a | |||
| bb230469b2 | |||
| 125bbe6441 | |||
| d29c304d5a | |||
| addfa7c57b | |||
| e5b4c7bc9e | |||
| 51503dec14 | |||
| 00346360af | |||
| e2bd8a6b86 | |||
| b8534dafae | |||
| 56a4902599 | |||
| 7634afeea4 | |||
| 77c2309b52 | |||
| aa5380d2ef | |||
| cbf9f16108 | |||
| 576423d1f8 | |||
| c31540e54e | |||
| 120b7da340 | |||
| d7a4829d13 | |||
| c749e2d57d | |||
| efa1b03570 | |||
| f6f13f7955 | |||
| 7478112077 | |||
| aee382c84e | |||
| 32605fff53 | |||
| 71882b18ae | |||
| 561c063d90 | |||
| 2a7efc8d42 | |||
| 327e860967 | |||
| 6598b4df0d | |||
| 6a7f64b037 | |||
| c8b3238398 | |||
| 602931bf7f | |||
| db034a51b3 | |||
| 43359779e7 | |||
| c0ac69df27 | |||
| 3a2a6e96fd |
@@ -1,59 +1,132 @@
|
|||||||
# Copy this file to .env and fill in the values you wish to change. Most already
|
# To customize your server, make a copy of this file to `.env` and edit any
|
||||||
# have sensible defaults. See config.ts for more details.
|
# values you want to change. Be sure to remove the `#` at the beginning of each
|
||||||
|
# line you want to modify.
|
||||||
|
|
||||||
# PORT=7860
|
# All values have reasonable defaults, so you only need to change the ones you
|
||||||
# SERVER_TITLE=Coom Tunnel
|
# want to override.
|
||||||
# MODEL_RATE_LIMIT=4
|
|
||||||
# MAX_OUTPUT_TOKENS_OPENAI=300
|
|
||||||
# MAX_OUTPUT_TOKENS_ANTHROPIC=900
|
|
||||||
# LOG_LEVEL=info
|
|
||||||
# REJECT_DISALLOWED=false
|
|
||||||
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
|
|
||||||
# CHECK_KEYS=true
|
|
||||||
# QUOTA_DISPLAY_MODE=full
|
|
||||||
# QUEUE_MODE=fair
|
|
||||||
# BLOCKED_ORIGINS=reddit.com,9gag.com
|
|
||||||
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
|
||||||
# BLOCK_REDIRECT="https://roblox.com/"
|
|
||||||
|
|
||||||
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
|
|
||||||
# by default in production mode.
|
|
||||||
|
|
||||||
# Optional settings for user management. See docs/user-management.md.
|
|
||||||
# GATEKEEPER=none
|
|
||||||
# GATEKEEPER_STORE=memory
|
|
||||||
# MAX_IPS_PER_USER=20
|
|
||||||
|
|
||||||
# Optional settings for prompt logging. See docs/logging-sheets.md.
|
|
||||||
# PROMPT_LOGGING=false
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# The values below are secret -- make sure they are set securely.
|
# General settings:
|
||||||
|
|
||||||
|
# The title displayed on the info page.
|
||||||
|
# SERVER_TITLE=Coom Tunnel
|
||||||
|
|
||||||
|
# Text model requests allowed per minute per user.
|
||||||
|
# TEXT_MODEL_RATE_LIMIT=4
|
||||||
|
# Image model requests allowed per minute per user.
|
||||||
|
# IMAGE_MODEL_RATE_LIMIT=2
|
||||||
|
|
||||||
|
# Max number of context tokens a user can request at once.
|
||||||
|
# Increase this if your proxy allow GPT 32k or 128k context
|
||||||
|
# MAX_CONTEXT_TOKENS_OPENAI=16384
|
||||||
|
|
||||||
|
# Max number of output tokens a user can request at once.
|
||||||
|
# MAX_OUTPUT_TOKENS_OPENAI=400
|
||||||
|
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
|
||||||
|
|
||||||
|
# Whether to show the estimated cost of consumed tokens on the info page.
|
||||||
|
# SHOW_TOKEN_COSTS=false
|
||||||
|
|
||||||
|
# Whether to automatically check API keys for validity.
|
||||||
|
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
|
||||||
|
# by default in production mode.
|
||||||
|
# CHECK_KEYS=true
|
||||||
|
|
||||||
|
# Which model types users are allowed to access.
|
||||||
|
# The following model families are recognized:
|
||||||
|
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | bison | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo
|
||||||
|
# By default, all models are allowed except for 'dall-e'. To allow DALL-E image
|
||||||
|
# generation, uncomment the line below and add 'dall-e' to the list.
|
||||||
|
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,bison,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
|
||||||
|
|
||||||
|
# URLs from which requests will be blocked.
|
||||||
|
# BLOCKED_ORIGINS=reddit.com,9gag.com
|
||||||
|
# Message to show when requests are blocked.
|
||||||
|
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
|
||||||
|
# Destination to redirect blocked requests to.
|
||||||
|
# BLOCK_REDIRECT="https://roblox.com/"
|
||||||
|
|
||||||
|
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
|
||||||
|
# Surround phrases with quotes if they contain commas.
|
||||||
|
# Avoid short or common phrases as this tests the entire prompt.
|
||||||
|
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
|
||||||
|
# Message to show when requests are rejected.
|
||||||
|
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
|
||||||
|
|
||||||
|
# Whether prompts should be logged to Google Sheets.
|
||||||
|
# Requires additional setup. See `docs/google-sheets.md` for more information.
|
||||||
|
# PROMPT_LOGGING=false
|
||||||
|
|
||||||
|
# The port to listen on.
|
||||||
|
# PORT=7860
|
||||||
|
|
||||||
|
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
|
||||||
|
# USE_INSECURE_COOKIES=false
|
||||||
|
|
||||||
|
# Detail level of logging. (trace | debug | info | warn | error)
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Optional settings for user management, access control, and quota enforcement:
|
||||||
|
# See `docs/user-management.md` for more information and setup instructions.
|
||||||
|
# See `docs/user-quotas.md` to learn how to set up quotas.
|
||||||
|
|
||||||
|
# Which access control method to use. (none | proxy_key | user_token)
|
||||||
|
# GATEKEEPER=none
|
||||||
|
# Which persistence method to use. (memory | firebase_rtdb)
|
||||||
|
# GATEKEEPER_STORE=memory
|
||||||
|
|
||||||
|
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
|
||||||
|
# MAX_IPS_PER_USER=0
|
||||||
|
# Whether user_tokens should be automatically disabled when reaching the IP limit.
|
||||||
|
# MAX_IPS_AUTO_BAN=true
|
||||||
|
|
||||||
|
# With user_token gatekeeper, whether to allow users to change their nickname.
|
||||||
|
# ALLOW_NICKNAME_CHANGES=true
|
||||||
|
|
||||||
|
# Default token quotas for each model family. (0 for unlimited)
|
||||||
|
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
|
||||||
|
# which is similar to the cost of GPT-4 Turbo.
|
||||||
|
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
|
||||||
|
# See `docs/dall-e-configuration.md` for more information.
|
||||||
|
# TOKEN_QUOTA_TURBO=0
|
||||||
|
# TOKEN_QUOTA_GPT4=0
|
||||||
|
# TOKEN_QUOTA_GPT4_32K=0
|
||||||
|
# TOKEN_QUOTA_GPT4_TURBO=0
|
||||||
|
# TOKEN_QUOTA_DALL_E=0
|
||||||
|
# TOKEN_QUOTA_CLAUDE=0
|
||||||
|
# TOKEN_QUOTA_BISON=0
|
||||||
|
# TOKEN_QUOTA_AWS_CLAUDE=0
|
||||||
|
|
||||||
|
# How often to refresh token quotas. (hourly | daily)
|
||||||
|
# Leave unset to never automatically refresh quotas.
|
||||||
|
# QUOTA_REFRESH_PERIOD=daily
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Secrets and keys:
|
||||||
|
# Do not put any passwords or API keys directly in this file.
|
||||||
# For Huggingface, set them via the Secrets section in your Space's config UI.
|
# For Huggingface, set them via the Secrets section in your Space's config UI.
|
||||||
# For Render, create a "secret file" called .env using the Environment tab.
|
# For Render, create a "secret file" called .env using the Environment tab.
|
||||||
|
|
||||||
# You can add multiple keys by separating them with a comma.
|
# You can add multiple API keys by separating them with a comma.
|
||||||
|
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
|
||||||
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
|
||||||
|
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
|
||||||
|
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
|
||||||
|
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
|
||||||
|
|
||||||
# TEMPORARY: This will eventually be replaced by a more robust system.
|
# With proxy_key gatekeeper, the password users must provide to access the API.
|
||||||
# You can adjust the models used when sending OpenAI prompts to /anthropic.
|
|
||||||
# Refer to Anthropic's docs for more info (note that they don't list older
|
|
||||||
# versions of the models, but they still work).
|
|
||||||
# CLAUDE_SMALL_MODEL=claude-v1.2
|
|
||||||
# CLAUDE_BIG_MODEL=claude-v1-100k
|
|
||||||
|
|
||||||
# You can require a Bearer token for requests when using proxy_token gatekeeper.
|
|
||||||
# PROXY_KEY=your-secret-key
|
# PROXY_KEY=your-secret-key
|
||||||
|
|
||||||
# You can set an admin key for user management when using user_token gatekeeper.
|
# With user_token gatekeeper, the admin password used to manage users.
|
||||||
# ADMIN_KEY=your-very-secret-key
|
# ADMIN_KEY=your-very-secret-key
|
||||||
|
|
||||||
# These are used for various persistence features. Refer to the docs for more
|
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
|
||||||
# info.
|
|
||||||
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
|
||||||
|
|
||||||
# This is only relevant if you want to use the prompt logging feature.
|
# With prompt logging, the Google Sheets credentials.
|
||||||
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
.env
|
.env
|
||||||
.venv
|
.venv
|
||||||
.vscode
|
.vscode
|
||||||
|
.idea
|
||||||
build
|
build
|
||||||
greeting.md
|
greeting.md
|
||||||
node_modules
|
node_modules
|
||||||
|
http-client.private.env.json
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
. "$(dirname -- "$0")/_/husky.sh"
|
||||||
|
|
||||||
|
npm run type-check
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"overrides": [
|
||||||
|
{
|
||||||
|
"files": [
|
||||||
|
"*.ejs"
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"printWidth": 160,
|
||||||
|
"bracketSameLine": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"trailingComma": "es5"
|
||||||
|
}
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
*
|
||||||
|
!.gitkeep
|
||||||
@@ -3,6 +3,8 @@ RUN apt-get update && \
|
|||||||
apt-get install -y git
|
apt-get install -y git
|
||||||
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
RUN chown -R 1000:1000 /app
|
||||||
|
USER 1000
|
||||||
RUN npm install
|
RUN npm install
|
||||||
COPY Dockerfile greeting.md* .env* ./
|
COPY Dockerfile greeting.md* .env* ./
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
|
|||||||
|
After Width: | Height: | Size: 4.2 KiB |
|
Before Width: | Height: | Size: 153 KiB After Width: | Height: | Size: 153 KiB |
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 36 KiB |
@@ -1,4 +1,4 @@
|
|||||||
# Shat out by GPT-4, I did not check for correctness beyond a cursory glance
|
|
||||||
openapi: 3.0.0
|
openapi: 3.0.0
|
||||||
info:
|
info:
|
||||||
version: 1.0.0
|
version: 1.0.0
|
||||||
@@ -26,6 +26,26 @@ paths:
|
|||||||
post:
|
post:
|
||||||
summary: Create a new user
|
summary: Create a new user
|
||||||
operationId: createUser
|
operationId: createUser
|
||||||
|
requestBody:
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
oneOf:
|
||||||
|
- type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
enum: ["normal", "special"]
|
||||||
|
- type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
enum: ["temporary"]
|
||||||
|
expiresAt:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
tokenLimits:
|
||||||
|
$ref: "#/components/schemas/TokenCount"
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: The created user's token
|
description: The created user's token
|
||||||
@@ -170,9 +190,24 @@ paths:
|
|||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
error:
|
error:
|
||||||
type: string
|
type: string
|
||||||
components:
|
components:
|
||||||
schemas:
|
schemas:
|
||||||
|
TokenCount:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
turbo:
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
gpt4:
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
"gpt4-32k":
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
claude:
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
User:
|
User:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@@ -182,15 +217,18 @@ components:
|
|||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
|
nickname:
|
||||||
|
type: string
|
||||||
type:
|
type:
|
||||||
type: string
|
type: string
|
||||||
enum: ["normal", "special"]
|
enum: ["normal", "special"]
|
||||||
promptCount:
|
promptCount:
|
||||||
type: integer
|
type: integer
|
||||||
format: int32
|
format: int32
|
||||||
tokenCount:
|
tokenLimits:
|
||||||
type: integer
|
$ref: "#/components/schemas/TokenCount"
|
||||||
format: int32
|
tokenCounts:
|
||||||
|
$ref: "#/components/schemas/TokenCount"
|
||||||
createdAt:
|
createdAt:
|
||||||
type: integer
|
type: integer
|
||||||
format: int64
|
format: int64
|
||||||
@@ -202,3 +240,6 @@ components:
|
|||||||
format: int64
|
format: int64
|
||||||
disabledReason:
|
disabledReason:
|
||||||
type: string
|
type: string
|
||||||
|
expiresAt:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
# Configuring the proxy for AWS Bedrock
|
||||||
|
|
||||||
|
The proxy supports AWS Bedrock models via the `/proxy/aws/claude` endpoint. There are a few extra steps necessary to use AWS Bedrock compared to the other supported APIs.
|
||||||
|
|
||||||
|
- [Setting keys](#setting-keys)
|
||||||
|
- [Attaching policies](#attaching-policies)
|
||||||
|
- [Provisioning models](#provisioning-models)
|
||||||
|
- [Note regarding logging](#note-regarding-logging)
|
||||||
|
|
||||||
|
## Setting keys
|
||||||
|
|
||||||
|
Use the `AWS_CREDENTIALS` environment variable to set the AWS API keys.
|
||||||
|
|
||||||
|
Like other APIs, you can provide multiple keys separated by commas. Each AWS key, however, is a set of credentials including the access key, secret key, and region. These are separated by a colon (`:`).
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
AWS_CREDENTIALS=AKIA000000000000000:somesecretkey:us-east-1,AKIA111111111111111:anothersecretkey:us-west-2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Attaching policies
|
||||||
|
|
||||||
|
Unless your credentials belong to the root account, the principal will need to be granted the following permissions:
|
||||||
|
|
||||||
|
- `bedrock:InvokeModel`
|
||||||
|
- `bedrock:InvokeModelWithResponseStream`
|
||||||
|
- `bedrock:GetModelInvocationLoggingConfiguration`
|
||||||
|
- The proxy needs this to determine whether prompt/response logging is enabled. By default, the proxy won't use credentials unless it can conclusively determine that logging is disabled, for privacy reasons.
|
||||||
|
|
||||||
|
Use the IAM console or the AWS CLI to attach these policies to the principal associated with the credentials.
|
||||||
|
|
||||||
|
## Provisioning models
|
||||||
|
|
||||||
|
AWS does not automatically provide accounts with access to every model. You will need to provision the models you want to use, in the regions you want to use them in. You can do this from the AWS console.
|
||||||
|
|
||||||
|
⚠️ **Models are region-specific.** Currently AWS only offers Claude in a small number of regions. Switch to the AWS region you want to use, then go to the models page and request access to **Anthropic / Claude**.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Access is generally granted more or less instantly. Once your account has access, you can enable the model by checking the box next to it.
|
||||||
|
|
||||||
|
You can also request Claude Instant, but support for this isn't fully implemented yet.
|
||||||
|
|
||||||
|
### Supported model IDs
|
||||||
|
Users can send these model IDs to the proxy to invoke the corresponding models.
|
||||||
|
- **Claude**
|
||||||
|
- `anthropic.claude-v1` (~18k context, claude 1.3)
|
||||||
|
- `anthropic.claude-v2` (~100k context, claude 2.0)
|
||||||
|
- `anthropic.claude-v2:1` (~200k context, claude 2.1)
|
||||||
|
- **Claude Instant**
|
||||||
|
- `anthropic.claude-instant-v1` (~100k context, claude instant 1.2)
|
||||||
|
|
||||||
|
## Note regarding logging
|
||||||
|
|
||||||
|
By default, the proxy will refuse to use keys if it finds that logging is enabled, or if it doesn't have permission to check logging status.
|
||||||
|
|
||||||
|
If you can't attach the `bedrock:GetModelInvocationLoggingConfiguration` policy to the principal, you can set the `ALLOW_AWS_LOGGING` environment variable to `true` to force the proxy to use the keys anyway. A warning will appear on the info page when this is enabled.
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
# Configuring the proxy for Azure
|
||||||
|
|
||||||
|
The proxy supports Azure OpenAI Service via the `/proxy/azure/openai` endpoint. The process of setting it up is slightly different from regular OpenAI.
|
||||||
|
|
||||||
|
- [Setting keys](#setting-keys)
|
||||||
|
- [Model assignment](#model-assignment)
|
||||||
|
|
||||||
|
## Setting keys
|
||||||
|
|
||||||
|
Use the `AZURE_CREDENTIALS` environment variable to set the Azure API keys.
|
||||||
|
|
||||||
|
Like other APIs, you can provide multiple keys separated by commas. Each Azure key, however, is a set of values including the Resource Name, Deployment ID, and API key. These are separated by a colon (`:`).
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
AZURE_CREDENTIALS=contoso-ml:gpt4-8k:0123456789abcdef0123456789abcdef,northwind-corp:testdeployment:0123456789abcdef0123456789abcdef
|
||||||
|
```
|
||||||
|
|
||||||
|
## Model assignment
|
||||||
|
Note that each Azure deployment is assigned a model when you create it in the Azure OpenAI Service portal. If you want to use a different model, you'll need to create a new deployment, and therefore a new key to be added to the AZURE_CREDENTIALS environment variable. Each credential only grants access to one model.
|
||||||
|
|
||||||
|
### Supported model IDs
|
||||||
|
Users can send normal OpenAI model IDs to the proxy to invoke the corresponding models. For the most part they work the same with Azure. GPT-3.5 Turbo has an ID of "gpt-35-turbo" because Azure doesn't allow periods in model names, but the proxy should automatically convert this to the correct ID.
|
||||||
|
|
||||||
|
As noted above, you can only use model IDs for which a deployment has been created and added to the proxy.
|
||||||
|
|
||||||
|
## On content filtering
|
||||||
|
Be aware that all Azure OpenAI Service deployments have content filtering enabled by default at a Medium level. Prompts or responses which are deemed to be inappropriate will be rejected by the API. This is a feature of the Azure OpenAI Service and not the proxy.
|
||||||
|
|
||||||
|
You can disable this from deployment's settings within Azure, but you would need to request an exemption from Microsoft for your organization first. See [this page](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/content-filters) for more information.
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
# Configuring the proxy for DALL-E
|
||||||
|
|
||||||
|
The proxy supports DALL-E 2 and DALL-E 3 image generation via the `/proxy/openai-images` endpoint. By default it is disabled as it is somewhat expensive and potentially more open to abuse than text generation.
|
||||||
|
|
||||||
|
- [Updating your Dockerfile](#updating-your-dockerfile)
|
||||||
|
- [Enabling DALL-E](#enabling-dall-e)
|
||||||
|
- [Setting quotas](#setting-quotas)
|
||||||
|
- [Rate limiting](#rate-limiting)
|
||||||
|
|
||||||
|
## Updating your Dockerfile
|
||||||
|
If you are using a previous version of the Dockerfile supplied with the proxy, it doesn't have the necessary permissions to let the proxy save temporary files.
|
||||||
|
|
||||||
|
You can replace the entire thing with the new Dockerfile at [./docker/huggingface/Dockerfile](../docker/huggingface/Dockerfile) (or the equivalent for Render deployments).
|
||||||
|
|
||||||
|
You can also modify your existing Dockerfile; just add the following lines after the `WORKDIR` line:
|
||||||
|
|
||||||
|
```Dockerfile
|
||||||
|
# Existing
|
||||||
|
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Take ownership of the app directory and switch to the non-root user
|
||||||
|
RUN chown -R 1000:1000 /app
|
||||||
|
USER 1000
|
||||||
|
|
||||||
|
# Existing
|
||||||
|
RUN npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Enabling DALL-E
|
||||||
|
Add `dall-e` to the `ALLOWED_MODEL_FAMILIES` environment variable to enable DALL-E. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
# GPT3.5 Turbo, GPT-4, GPT-4 Turbo, and DALL-E
|
||||||
|
ALLOWED_MODEL_FAMILIES=turbo,gpt-4,gpt-4turbo,dall-e
|
||||||
|
|
||||||
|
# All models as of this writing
|
||||||
|
ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,bison,aws-claude,dall-e
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to [.env.example](../.env.example) for a full list of supported model families. You can add `dall-e` to that list to enable all models.
|
||||||
|
|
||||||
|
## Setting quotas
|
||||||
|
DALL-E doesn't bill by token like text generation models. Instead there is a fixed cost per image generated, depending on the model, image size, and selected quality.
|
||||||
|
|
||||||
|
The proxy still uses tokens to set quotas for users. The cost for each generated image will be converted to "tokens" at a rate of 100000 tokens per US$1.00. This works out to a similar cost-per-token as GPT-4 Turbo, so you can use similar token quotas for both.
|
||||||
|
|
||||||
|
Use `TOKEN_QUOTA_DALL_E` to set the default quota for image generation. Otherwise it works the same as token quotas for other models.
|
||||||
|
|
||||||
|
```
|
||||||
|
# ~50 standard DALL-E images per refresh period, or US$2.00
|
||||||
|
TOKEN_QUOTA_DALL_E=200000
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to [https://openai.com/pricing](https://openai.com/pricing) for the latest pricing information. As of this writing, the cheapest DALL-E 3 image costs $0.04 per generation, which works out to 4000 tokens. Higher resolution and quality settings can cost up to $0.12 per image, or 12000 tokens.
|
||||||
|
|
||||||
|
## Rate limiting
|
||||||
|
The old `MODEL_RATE_LIMIT` setting has been split into `TEXT_MODEL_RATE_LIMIT` and `IMAGE_MODEL_RATE_LIMIT`. Whatever value you previously set for `MODEL_RATE_LIMIT` will be used for text models.
|
||||||
|
|
||||||
|
If you don't specify a `IMAGE_MODEL_RATE_LIMIT`, it defaults to half of the `TEXT_MODEL_RATE_LIMIT`, to a minimum of 1 image per minute.
|
||||||
|
|
||||||
|
```
|
||||||
|
# 4 text generations per minute, 2 images per minute
|
||||||
|
TEXT_MODEL_RATE_LIMIT=4
|
||||||
|
IMAGE_MODEL_RATE_LIMIT=2
|
||||||
|
```
|
||||||
|
|
||||||
|
If a prompt is filtered by OpenAI's content filter, it won't count towards the rate limit.
|
||||||
|
|
||||||
|
## Hiding recent images
|
||||||
|
By default, the proxy shows the last 12 recently generated images by users. You can hide this section by setting `SHOW_RECENT_IMAGES` to `false`.
|
||||||
@@ -12,12 +12,12 @@ This repository can be deployed to a [Huggingface Space](https://huggingface.co/
|
|||||||
- Provide a name for your Space and select "Docker" as the SDK. Select "Blank" for the template.
|
- Provide a name for your Space and select "Docker" as the SDK. Select "Blank" for the template.
|
||||||
- Click "Create Space" and wait for the Space to be created.
|
- Click "Create Space" and wait for the Space to be created.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 3. Create an empty Dockerfile
|
### 3. Create an empty Dockerfile
|
||||||
- Once your Space is created, you'll see an option to "Create the Dockerfile in your browser". Click that link.
|
- Once your Space is created, you'll see an option to "Create the Dockerfile in your browser". Click that link.
|
||||||
|
|
||||||

|

|
||||||
- Paste the following into the text editor and click "Save".
|
- Paste the following into the text editor and click "Save".
|
||||||
```dockerfile
|
```dockerfile
|
||||||
FROM node:18-bullseye-slim
|
FROM node:18-bullseye-slim
|
||||||
@@ -25,6 +25,8 @@ RUN apt-get update && \
|
|||||||
apt-get install -y git
|
apt-get install -y git
|
||||||
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
RUN chown -R 1000:1000 /app
|
||||||
|
USER 1000
|
||||||
RUN npm install
|
RUN npm install
|
||||||
COPY Dockerfile greeting.md* .env* ./
|
COPY Dockerfile greeting.md* .env* ./
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
@@ -34,7 +36,7 @@ CMD [ "npm", "start" ]
|
|||||||
```
|
```
|
||||||
- Click "Commit new file to `main`" to save the Dockerfile.
|
- Click "Commit new file to `main`" to save the Dockerfile.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 4. Set your API key as a secret
|
### 4. Set your API key as a secret
|
||||||
- Click the Settings button in the top right corner of your repository.
|
- Click the Settings button in the top right corner of your repository.
|
||||||
@@ -82,14 +84,18 @@ MAX_OUTPUT_TOKENS_ANTHROPIC=512
|
|||||||
# Block prompts containing disallowed characters
|
# Block prompts containing disallowed characters
|
||||||
REJECT_DISALLOWED=false
|
REJECT_DISALLOWED=false
|
||||||
REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
|
REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
|
||||||
# Show exact quota usage on the Server Info page
|
|
||||||
QUOTA_DISPLAY_MODE=full
|
|
||||||
```
|
```
|
||||||
|
|
||||||
See `.env.example` for a full list of available settings, or check `config.ts` for details on what each setting does.
|
See `.env.example` for a full list of available settings, or check `config.ts` for details on what each setting does.
|
||||||
|
|
||||||
## Restricting access to the server
|
## Restricting access to the server
|
||||||
|
|
||||||
If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key.
|
If you want to restrict access to the server, you can set a `PROXY_KEY` secret. This key will need to be passed in the Authentication header of every request to the server, just like an OpenAI API key. Set the `GATEKEEPER` mode to `proxy_key`, and then set the `PROXY_KEY` variable to whatever password you want.
|
||||||
|
|
||||||
Add this using the same method as the OPENAI_KEY secret above. Don't add this to your `.env` file because that file is public and anyone can see it.
|
Add this using the same method as the OPENAI_KEY secret above. Don't add this to your `.env` file because that file is public and anyone can see it.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
GATEKEEPER=proxy_key
|
||||||
|
PROXY_KEY=your_secret_password
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Deploy to Render.com
|
# Deploy to Render.com
|
||||||
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received.
|
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
|
||||||
|
|
||||||
### 1. Create account
|
### 1. Create account
|
||||||
- [Sign up for Render.com](https://render.com/) to create an account and access the dashboard.
|
- [Sign up for Render.com](https://render.com/) to create an account and access the dashboard.
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
# User Management
|
# User Management
|
||||||
|
|
||||||
The proxy supports several different user management strategies. You can choose the one that best fits your needs by setting the `GATEKEEPER` environment variable.
|
The proxy supports several different user management strategies. You can choose the one that best fits your needs by setting the `GATEKEEPER` environment variable.
|
||||||
|
|
||||||
Several of these features require you to set secrets in your environment. If using Huggingface Spaces to deploy, do not set these in your `.env` file because that file is public and anyone can see it.
|
Several of these features require you to set secrets in your environment. If using Huggingface Spaces to deploy, do not set these in your `.env` file because that file is public and anyone can see it.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [No user management](#no-user-management-gatekeepernone)
|
- [No user management](#no-user-management-gatekeepernone)
|
||||||
- [Single-password authentication](#single-password-authentication-gatekeeperproxy_key)
|
- [Single-password authentication](#single-password-authentication-gatekeeperproxy_key)
|
||||||
- [Per-user authentication](#per-user-authentication-gatekeeperuser_token)
|
- [Per-user authentication](#per-user-authentication-gatekeeperuser_token)
|
||||||
@@ -18,29 +19,30 @@ This is the default mode. The proxy will not require any authentication to acces
|
|||||||
|
|
||||||
## Single-password authentication (`GATEKEEPER=proxy_key`)
|
## Single-password authentication (`GATEKEEPER=proxy_key`)
|
||||||
|
|
||||||
This mode allows you to set a password that must be passed in the `Authentication` header of every request to the server as a bearer token. This is useful if you want to restrict access to the server, but don't want to create a separate account for every user.
|
This mode allows you to set a password that must be passed in the `Authentication` header of every request to the server as a bearer token. This is useful if you want to restrict access to the server, but don't want to create a separate account for every user.
|
||||||
|
|
||||||
To set the password, create a `PROXY_KEY` secret in your environment.
|
To set the password, create a `PROXY_KEY` secret in your environment.
|
||||||
|
|
||||||
## Per-user authentication (`GATEKEEPER=user_token`)
|
## Per-user authentication (`GATEKEEPER=user_token`)
|
||||||
|
|
||||||
This mode allows you to provision separate Bearer tokens for each user. You can manage users via the /admin/users REST API, which itself requires an admin Bearer token.
|
This mode allows you to provision separate Bearer tokens for each user. You can manage users via the /admin/users via REST or through the admin interface at `/admin`.
|
||||||
|
|
||||||
To begin, set `ADMIN_KEY` to a secret value. This will be used to authenticate requests to the /admin/users REST API.
|
To begin, set `ADMIN_KEY` to a secret value. This will be used to authenticate requests to the REST API or to log in to the UI.
|
||||||
|
|
||||||
[You can find an OpenAPI specification for the /admin/users REST API here.](openapi-admin-users.yaml)
|
[You can find an OpenAPI specification for the /admin/users REST API here.](openapi-admin-users.yaml)
|
||||||
|
|
||||||
By default, the proxy will store user data in memory. Naturally, this means that user data will be lost when the proxy is restarted, though you can use the bulk user import/export feature to save and restore user data manually or via a script. However, the proxy also supports persisting user data to an external data store with some additional configuration.
|
By default, the proxy will store user data in memory. Naturally, this means that user data will be lost when the proxy is restarted, though you can use the user import/export feature to save and restore user data manually or via a script. However, the proxy also supports persisting user data to an external data store with some additional configuration.
|
||||||
|
|
||||||
Below are the supported data stores and their configuration options.
|
Below are the supported data stores and their configuration options.
|
||||||
|
|
||||||
### Memory
|
### Memory
|
||||||
|
|
||||||
This is the default data store (`GATEKEEPER_STORE=memory`) User data will be stored in memory and will be lost when the proxy is restarted. You are responsible for downloading and re-uploading user data via the REST API if you want to persist it.
|
This is the default data store (`GATEKEEPER_STORE=memory`) User data will be stored in memory and will be lost when the server is restarted. You are responsible for exporting and re-importing user data after a restart.
|
||||||
|
|
||||||
### Firebase Realtime Database
|
### Firebase Realtime Database
|
||||||
|
|
||||||
To use Firebase Realtime Database to persist user data, set the following environment variables:
|
To use Firebase Realtime Database to persist user data, set the following environment variables:
|
||||||
|
|
||||||
- `GATEKEEPER_STORE`: Set this to `firebase_rtdb`
|
- `GATEKEEPER_STORE`: Set this to `firebase_rtdb`
|
||||||
- **Secret** `FIREBASE_RTDB_URL`: The URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`
|
- **Secret** `FIREBASE_RTDB_URL`: The URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`
|
||||||
- **Secret** `FIREBASE_KEY`: A base-64 encoded service account key for your Firebase project. Refer to the instructions below for how to create this key.
|
- **Secret** `FIREBASE_KEY`: A base-64 encoded service account key for your Firebase project. Refer to the instructions below for how to create this key.
|
||||||
@@ -49,17 +51,13 @@ To use Firebase Realtime Database to persist user data, set the following enviro
|
|||||||
|
|
||||||
1. Go to the [Firebase console](https://console.firebase.google.com/) and click "Add project", then follow the prompts to create a new project.
|
1. Go to the [Firebase console](https://console.firebase.google.com/) and click "Add project", then follow the prompts to create a new project.
|
||||||
2. From the **Project Overview** page, click **All products** in the left sidebar, then click **Realtime Database**.
|
2. From the **Project Overview** page, click **All products** in the left sidebar, then click **Realtime Database**.
|
||||||
3. Click **Create database** and choose **Start in test mode**. Click **Enable**.
|
3. Click **Create database** and choose **Start in test mode**. Click **Enable**.
|
||||||
- Test mode is fine for this use case as it still requires authentication to access the database. You may wish to set up more restrictive rules if you plan to use the database for other purposes.
|
- Test mode is fine for this use case as it still requires authentication to access the database. You may wish to set up more restrictive rules if you plan to use the database for other purposes.
|
||||||
- The reference URL for the database will be displayed on the page. You will need this later.
|
- The reference URL for the database will be displayed on the page. You will need this later.
|
||||||
4. Click the gear icon next to **Project Overview** in the left sidebar, then click **Project settings**.
|
4. Click the gear icon next to **Project Overview** in the left sidebar, then click **Project settings**.
|
||||||
5. Click the **Service accounts** tab, then click **Generate new private key**.
|
5. Click the **Service accounts** tab, then click **Generate new private key**.
|
||||||
6. The downloaded file contains your key. Encode it as base64 and set it as the `FIREBASE_KEY` secret in your environment.
|
6. The downloaded file contains your key. Encode it as base64 and set it as the `FIREBASE_KEY` secret in your environment.
|
||||||
7. Set `FIREBASE_RTDB_URL` to the reference URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`.
|
7. Set `FIREBASE_RTDB_URL` to the reference URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`.
|
||||||
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
|
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
|
||||||
|
|
||||||
The proxy will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
|
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Users are loaded from the database and changes are flushed periodically. You can use the PUT /admin/users API to bulk import users and force a flush to the database.
|
|
||||||
|
|||||||
@@ -0,0 +1,36 @@
|
|||||||
|
# User Quotas
|
||||||
|
|
||||||
|
When using `user_token` authentication, you can set (model) token quotas for user. These quotas are enforced by the proxy server and are separate from the quotas enforced by OpenAI.
|
||||||
|
|
||||||
|
You can set the default quota via environment variables. Quotas are enforced on a per-model basis, and count both prompt tokens and completion tokens. By default, all quotas are disabled.
|
||||||
|
|
||||||
|
Set the following environment variables to set the default quotas:
|
||||||
|
- `TOKEN_QUOTA_TURBO`
|
||||||
|
- `TOKEN_QUOTA_GPT4`
|
||||||
|
- `TOKEN_QUOTA_CLAUDE`
|
||||||
|
|
||||||
|
Quotas only apply to `normal`-type users; `special`-type users are exempt from quotas. You can change users' types via the REST API.
|
||||||
|
|
||||||
|
**Note that changes to these environment variables will only apply to newly created users.** To modify existing users' quotas, use the REST API or the admin UI.
|
||||||
|
|
||||||
|
## Automatically refreshing quotas
|
||||||
|
|
||||||
|
You can use the `QUOTA_REFRESH_PERIOD` environment variable to automatically refresh users' quotas periodically. This is useful if you want to give users a certain number of tokens per day, for example. The entire quota will be refreshed at the start of the specified period, and any tokens a user has not used will not be carried over.
|
||||||
|
|
||||||
|
Quotas for all models and users will be refreshed. If you haven't set `TOKEN_QUOTA_*` for a particular model, quotas for that model will not be refreshed (so any manually set quotas will not be overwritten).
|
||||||
|
|
||||||
|
Set the `QUOTA_REFRESH_PERIOD` environment variable to one of the following values:
|
||||||
|
- `daily` (at midnight)
|
||||||
|
- `hourly`
|
||||||
|
- leave unset to disable automatic refreshing
|
||||||
|
|
||||||
|
You can also use a cron expression, for example:
|
||||||
|
- Every 45 seconds: `"*/45 * * * * *"`
|
||||||
|
- Every 30 minutes: `"*/30 * * * *"`
|
||||||
|
- Every 6 hours: `"0 */6 * * *"`
|
||||||
|
- Every 3 days: `"0 0 */3 * *"`
|
||||||
|
- Daily, but at mid-day: `"0 12 * * *"`
|
||||||
|
|
||||||
|
Make sure to enclose the cron expression in quotation marks.
|
||||||
|
|
||||||
|
All times are in the server's local time zone. Refer to [crontab.guru](https://crontab.guru/) for more examples.
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"dev": {
|
||||||
|
"proxy-host": "http://localhost:7860",
|
||||||
|
"oai-key-1": "override in http-client.private.env.json",
|
||||||
|
"proxy-key": "override in http-client.private.env.json",
|
||||||
|
"azu-resource-name": "override in http-client.private.env.json",
|
||||||
|
"azu-deployment-id": "override in http-client.private.env.json"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,13 +3,12 @@
|
|||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"description": "Reverse proxy for the OpenAI API",
|
"description": "Reverse proxy for the OpenAI API",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build:watch": "esbuild src/server.ts --outfile=build/server.js --platform=node --target=es2020 --format=cjs --bundle --sourcemap --watch",
|
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
|
||||||
"build": "tsc",
|
"prepare": "husky install",
|
||||||
"start:dev": "concurrently \"npm run build:watch\" \"npm run start:watch\"",
|
|
||||||
"start:dev:tsc": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
|
||||||
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
|
||||||
"start:replit": "tsc && node build/server.js",
|
|
||||||
"start": "node build/server.js",
|
"start": "node build/server.js",
|
||||||
|
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
|
||||||
|
"start:replit": "tsc && node build/server.js",
|
||||||
|
"start:watch": "nodemon --require source-map-support/register build/server.js",
|
||||||
"type-check": "tsc --noEmit"
|
"type-check": "tsc --noEmit"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -18,32 +17,62 @@
|
|||||||
"author": "",
|
"author": "",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@anthropic-ai/tokenizer": "^0.0.4",
|
||||||
|
"@aws-crypto/sha256-js": "^5.1.0",
|
||||||
|
"@smithy/protocol-http": "^3.0.6",
|
||||||
|
"@smithy/signature-v4": "^2.0.10",
|
||||||
|
"@smithy/types": "^2.3.4",
|
||||||
"axios": "^1.3.5",
|
"axios": "^1.3.5",
|
||||||
|
"check-disk-space": "^3.4.0",
|
||||||
|
"cookie-parser": "^1.4.6",
|
||||||
|
"copyfiles": "^2.4.1",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
|
"csrf-csrf": "^2.3.0",
|
||||||
"dotenv": "^16.0.3",
|
"dotenv": "^16.0.3",
|
||||||
|
"ejs": "^3.1.9",
|
||||||
"express": "^4.18.2",
|
"express": "^4.18.2",
|
||||||
"firebase-admin": "^11.8.0",
|
"express-session": "^1.17.3",
|
||||||
"googleapis": "^117.0.0",
|
"firebase-admin": "^11.10.1",
|
||||||
|
"googleapis": "^122.0.0",
|
||||||
"http-proxy-middleware": "^3.0.0-beta.1",
|
"http-proxy-middleware": "^3.0.0-beta.1",
|
||||||
"openai": "^3.2.1",
|
"lifion-aws-event-stream": "^1.0.7",
|
||||||
|
"memorystore": "^1.6.7",
|
||||||
|
"multer": "^1.4.5-lts.1",
|
||||||
|
"node-schedule": "^2.1.1",
|
||||||
"pino": "^8.11.0",
|
"pino": "^8.11.0",
|
||||||
"pino-http": "^8.3.3",
|
"pino-http": "^8.3.3",
|
||||||
|
"sanitize-html": "^2.11.0",
|
||||||
|
"sharp": "^0.32.6",
|
||||||
"showdown": "^2.1.0",
|
"showdown": "^2.1.0",
|
||||||
|
"tiktoken": "^1.0.10",
|
||||||
"uuid": "^9.0.0",
|
"uuid": "^9.0.0",
|
||||||
"zlib": "^1.0.5",
|
"zlib": "^1.0.5",
|
||||||
"zod": "^3.21.4"
|
"zod": "^3.22.3",
|
||||||
|
"zod-error": "^1.5.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@types/cookie-parser": "^1.4.3",
|
||||||
"@types/cors": "^2.8.13",
|
"@types/cors": "^2.8.13",
|
||||||
"@types/express": "^4.17.17",
|
"@types/express": "^4.17.17",
|
||||||
|
"@types/express-session": "^1.17.7",
|
||||||
|
"@types/multer": "^1.4.7",
|
||||||
|
"@types/node-schedule": "^2.1.0",
|
||||||
|
"@types/sanitize-html": "^2.9.0",
|
||||||
"@types/showdown": "^2.0.0",
|
"@types/showdown": "^2.0.0",
|
||||||
"@types/uuid": "^9.0.1",
|
"@types/uuid": "^9.0.1",
|
||||||
"concurrently": "^8.0.1",
|
"concurrently": "^8.0.1",
|
||||||
"esbuild": "^0.17.16",
|
"esbuild": "^0.17.16",
|
||||||
"esbuild-register": "^3.4.2",
|
"esbuild-register": "^3.4.2",
|
||||||
"nodemon": "^2.0.22",
|
"husky": "^8.0.3",
|
||||||
|
"nodemon": "^3.0.1",
|
||||||
|
"pino-pretty": "^10.2.3",
|
||||||
|
"prettier": "^3.0.3",
|
||||||
"source-map-support": "^0.5.21",
|
"source-map-support": "^0.5.21",
|
||||||
"ts-node": "^10.9.1",
|
"ts-node": "^10.9.1",
|
||||||
"typescript": "^5.0.4"
|
"typescript": "^5.1.3"
|
||||||
|
},
|
||||||
|
"overrides": {
|
||||||
|
"google-gax": "^3.6.1",
|
||||||
|
"postcss": "^8.4.31"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,248 @@
|
|||||||
|
# OAI Reverse Proxy
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name OpenAI -- Chat Completions
|
||||||
|
POST https://api.openai.com/v1/chat/completions
|
||||||
|
Authorization: Bearer {{oai-key-1}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 30,
|
||||||
|
"stream": false,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "This is a test prompt."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name OpenAI -- Text Completions
|
||||||
|
POST https://api.openai.com/v1/completions
|
||||||
|
Authorization: Bearer {{oai-key-1}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo-instruct",
|
||||||
|
"max_tokens": 30,
|
||||||
|
"stream": false,
|
||||||
|
"prompt": "This is a test prompt where"
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name OpenAI -- Create Embedding
|
||||||
|
POST https://api.openai.com/v1/embeddings
|
||||||
|
Authorization: Bearer {{oai-key-1}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "text-embedding-ada-002",
|
||||||
|
"input": "This is a test embedding input."
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name OpenAI -- Get Organizations
|
||||||
|
GET https://api.openai.com/v1/organizations
|
||||||
|
Authorization: Bearer {{oai-key-1}}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name OpenAI -- Get Models
|
||||||
|
GET https://api.openai.com/v1/models
|
||||||
|
Authorization: Bearer {{oai-key-1}}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Azure OpenAI -- Chat Completions
|
||||||
|
POST https://{{azu-resource-name}}.openai.azure.com/openai/deployments/{{azu-deployment-id}}/chat/completions?api-version=2023-09-01-preview
|
||||||
|
api-key: {{azu-key-1}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"max_tokens": 1,
|
||||||
|
"stream": false,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "This is a test prompt."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / OpenAI -- Get Models
|
||||||
|
GET {{proxy-host}}/proxy/openai/v1/models
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / OpenAI -- Native Chat Completions
|
||||||
|
POST {{proxy-host}}/proxy/openai/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 20,
|
||||||
|
"stream": true,
|
||||||
|
"temperature": 1,
|
||||||
|
"seed": 123,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "phrase one"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / OpenAI -- Native Text Completions
|
||||||
|
POST {{proxy-host}}/proxy/openai/v1/turbo-instruct/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo-instruct",
|
||||||
|
"max_tokens": 20,
|
||||||
|
"temperature": 0,
|
||||||
|
"prompt": "Genshin Impact is a game about",
|
||||||
|
"stream": false
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / OpenAI -- Chat-to-Text API Translation
|
||||||
|
# Accepts a chat completion request and reformats it to work with the text completion API. `model` is ignored.
|
||||||
|
POST {{proxy-host}}/proxy/openai/turbo-instruct/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-4",
|
||||||
|
"max_tokens": 20,
|
||||||
|
"stream": true,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What is the name of the fourth president of the united states?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "That would be George Washington."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "I don't think that's right..."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / OpenAI -- Create Embedding
|
||||||
|
POST {{proxy-host}}/proxy/openai/embeddings
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "text-embedding-ada-002",
|
||||||
|
"input": "This is a test embedding input."
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / Anthropic -- Native Completion (old API)
|
||||||
|
POST {{proxy-host}}/proxy/anthropic/v1/complete
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
anthropic-version: 2023-01-01
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "claude-v1.3",
|
||||||
|
"max_tokens_to_sample": 20,
|
||||||
|
"temperature": 0.2,
|
||||||
|
"stream": true,
|
||||||
|
"prompt": "What is genshin impact\n\n:Assistant:"
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / Anthropic -- Native Completion (2023-06-01 API)
|
||||||
|
POST {{proxy-host}}/proxy/anthropic/v1/complete
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
anthropic-version: 2023-06-01
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "claude-v1.3",
|
||||||
|
"max_tokens_to_sample": 20,
|
||||||
|
"temperature": 0.2,
|
||||||
|
"stream": true,
|
||||||
|
"prompt": "What is genshin impact\n\n:Assistant:"
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / Anthropic -- OpenAI-to-Anthropic API Translation
|
||||||
|
POST {{proxy-host}}/proxy/anthropic/v1/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
#anthropic-version: 2023-06-01
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 20,
|
||||||
|
"stream": false,
|
||||||
|
"temperature": 0,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What is genshin impact"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / AWS Claude -- Native Completion
|
||||||
|
POST {{proxy-host}}/proxy/aws/claude/v1/complete
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
anthropic-version: 2023-01-01
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "claude-v2",
|
||||||
|
"max_tokens_to_sample": 10,
|
||||||
|
"temperature": 0,
|
||||||
|
"stream": true,
|
||||||
|
"prompt": "What is genshin impact\n\n:Assistant:"
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / AWS Claude -- OpenAI-to-Anthropic API Translation
|
||||||
|
POST {{proxy-host}}/proxy/aws/claude/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 50,
|
||||||
|
"stream": true,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What is genshin impact?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# @name Proxy / Google PaLM -- OpenAI-to-PaLM API Translation
|
||||||
|
POST {{proxy-host}}/proxy/google-palm/v1/chat/completions
|
||||||
|
Authorization: Bearer {{proxy-key}}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"model": "gpt-4",
|
||||||
|
"max_tokens": 42,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hi what is the name of the fourth president of the united states?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
const axios = require("axios");
|
||||||
|
|
||||||
|
const concurrentRequests = 5;
|
||||||
|
const headers = {
|
||||||
|
Authorization: "Bearer test",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
};
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
model: "gpt-4",
|
||||||
|
max_tokens: 1,
|
||||||
|
stream: false,
|
||||||
|
messages: [{ role: "user", content: "Hi" }],
|
||||||
|
};
|
||||||
|
|
||||||
|
const makeRequest = async (i) => {
|
||||||
|
try {
|
||||||
|
const response = await axios.post(
|
||||||
|
"http://localhost:7860/proxy/azure/openai/v1/chat/completions",
|
||||||
|
payload,
|
||||||
|
{ headers }
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Req ${i} finished with status code ${response.status} and response:`,
|
||||||
|
response.data
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`Error in req ${i}:`, error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const executeRequestsConcurrently = () => {
|
||||||
|
const promises = [];
|
||||||
|
for (let i = 1; i <= concurrentRequests; i++) {
|
||||||
|
console.log(`Starting request ${i}`);
|
||||||
|
promises.push(makeRequest(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise.all(promises).then(() => {
|
||||||
|
console.log("All requests finished");
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
executeRequestsConcurrently();
|
||||||
@@ -1,37 +1,18 @@
|
|||||||
import { Router } from "express";
|
import { Router } from "express";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
import * as userStore from "../proxy/auth/user-store";
|
import * as userStore from "../../shared/users/user-store";
|
||||||
|
import { parseSort, sortBy } from "../../shared/utils";
|
||||||
|
import { UserPartialSchema, UserSchema } from "../../shared/users/schema";
|
||||||
|
|
||||||
const usersRouter = Router();
|
const router = Router();
|
||||||
|
|
||||||
const UserSchema = z
|
|
||||||
.object({
|
|
||||||
ip: z.array(z.string()).optional(),
|
|
||||||
type: z.enum(["normal", "special"]).optional(),
|
|
||||||
promptCount: z.number().optional(),
|
|
||||||
tokenCount: z.number().optional(),
|
|
||||||
createdAt: z.number().optional(),
|
|
||||||
lastUsedAt: z.number().optional(),
|
|
||||||
disabledAt: z.number().optional(),
|
|
||||||
disabledReason: z.string().optional(),
|
|
||||||
})
|
|
||||||
.strict();
|
|
||||||
|
|
||||||
const UserSchemaWithToken = UserSchema.extend({
|
|
||||||
token: z.string(),
|
|
||||||
}).strict();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of all users, sorted by prompt count and then last used time.
|
* Returns a list of all users, sorted by prompt count and then last used time.
|
||||||
* GET /admin/users
|
* GET /admin/users
|
||||||
*/
|
*/
|
||||||
usersRouter.get("/", (_req, res) => {
|
router.get("/", (req, res) => {
|
||||||
const users = userStore.getUsers().sort((a, b) => {
|
const sort = parseSort(req.query.sort) || ["promptCount", "lastUsedAt"];
|
||||||
if (a.promptCount !== b.promptCount) {
|
const users = userStore.getUsers().sort(sortBy(sort, false));
|
||||||
return b.promptCount - a.promptCount;
|
|
||||||
}
|
|
||||||
return (b.lastUsedAt ?? 0) - (a.lastUsedAt ?? 0);
|
|
||||||
});
|
|
||||||
res.json({ users, count: users.length });
|
res.json({ users, count: users.length });
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -39,7 +20,7 @@ usersRouter.get("/", (_req, res) => {
|
|||||||
* Returns the user with the given token.
|
* Returns the user with the given token.
|
||||||
* GET /admin/users/:token
|
* GET /admin/users/:token
|
||||||
*/
|
*/
|
||||||
usersRouter.get("/:token", (req, res) => {
|
router.get("/:token", (req, res) => {
|
||||||
const user = userStore.getUser(req.params.token);
|
const user = userStore.getUser(req.params.token);
|
||||||
if (!user) {
|
if (!user) {
|
||||||
return res.status(404).json({ error: "Not found" });
|
return res.status(404).json({ error: "Not found" });
|
||||||
@@ -49,11 +30,33 @@ usersRouter.get("/:token", (req, res) => {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new user.
|
* Creates a new user.
|
||||||
|
* Optionally accepts a JSON body containing `type`, and for temporary-type
|
||||||
|
* users, `tokenLimits` and `expiresAt` fields.
|
||||||
* Returns the created user's token.
|
* Returns the created user's token.
|
||||||
* POST /admin/users
|
* POST /admin/users
|
||||||
*/
|
*/
|
||||||
usersRouter.post("/", (_req, res) => {
|
router.post("/", (req, res) => {
|
||||||
res.json({ token: userStore.createUser() });
|
const body = req.body;
|
||||||
|
|
||||||
|
const base = z.object({
|
||||||
|
type: UserSchema.shape.type.exclude(["temporary"]).default("normal"),
|
||||||
|
});
|
||||||
|
const tempUser = base
|
||||||
|
.extend({
|
||||||
|
type: z.literal("temporary"),
|
||||||
|
expiresAt: UserSchema.shape.expiresAt,
|
||||||
|
tokenLimits: UserSchema.shape.tokenLimits,
|
||||||
|
})
|
||||||
|
.required();
|
||||||
|
|
||||||
|
const schema = z.union([base, tempUser]);
|
||||||
|
const result = schema.safeParse(body);
|
||||||
|
if (!result.success) {
|
||||||
|
return res.status(400).json({ error: result.error });
|
||||||
|
}
|
||||||
|
|
||||||
|
const token = userStore.createUser({ ...result.data });
|
||||||
|
res.json({ token });
|
||||||
});
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -62,12 +65,15 @@ usersRouter.post("/", (_req, res) => {
|
|||||||
* Returns the upserted user.
|
* Returns the upserted user.
|
||||||
* PUT /admin/users/:token
|
* PUT /admin/users/:token
|
||||||
*/
|
*/
|
||||||
usersRouter.put("/:token", (req, res) => {
|
router.put("/:token", (req, res) => {
|
||||||
const result = UserSchema.safeParse(req.body);
|
const result = UserPartialSchema.safeParse({
|
||||||
|
...req.body,
|
||||||
|
token: req.params.token,
|
||||||
|
});
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
return res.status(400).json({ error: result.error });
|
return res.status(400).json({ error: result.error });
|
||||||
}
|
}
|
||||||
userStore.upsertUser({ ...result.data, token: req.params.token });
|
userStore.upsertUser(result.data);
|
||||||
res.json(userStore.getUser(req.params.token));
|
res.json(userStore.getUser(req.params.token));
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -77,16 +83,13 @@ usersRouter.put("/:token", (req, res) => {
|
|||||||
* Returns an object containing the upserted users and the number of upserts.
|
* Returns an object containing the upserted users and the number of upserts.
|
||||||
* PUT /admin/users
|
* PUT /admin/users
|
||||||
*/
|
*/
|
||||||
usersRouter.put("/", (req, res) => {
|
router.put("/", (req, res) => {
|
||||||
const result = z.array(UserSchemaWithToken).safeParse(req.body.users);
|
const result = z.array(UserPartialSchema).safeParse(req.body.users);
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
return res.status(400).json({ error: result.error });
|
return res.status(400).json({ error: result.error });
|
||||||
}
|
}
|
||||||
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
||||||
res.json({
|
res.json({ upserted_users: upserts, count: upserts.length });
|
||||||
upserted_users: upserts,
|
|
||||||
count: upserts.length,
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -95,7 +98,7 @@ usersRouter.put("/", (req, res) => {
|
|||||||
* Returns the disabled user.
|
* Returns the disabled user.
|
||||||
* DELETE /admin/users/:token
|
* DELETE /admin/users/:token
|
||||||
*/
|
*/
|
||||||
usersRouter.delete("/:token", (req, res) => {
|
router.delete("/:token", (req, res) => {
|
||||||
const user = userStore.getUser(req.params.token);
|
const user = userStore.getUser(req.params.token);
|
||||||
const disabledReason = z
|
const disabledReason = z
|
||||||
.string()
|
.string()
|
||||||
@@ -111,4 +114,4 @@ usersRouter.delete("/:token", (req, res) => {
|
|||||||
res.json(userStore.getUser(req.params.token));
|
res.json(userStore.getUser(req.params.token));
|
||||||
});
|
});
|
||||||
|
|
||||||
export { usersRouter };
|
export { router as usersApiRouter };
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
import { Request, Response, RequestHandler } from "express";
|
||||||
|
import { config } from "../config";
|
||||||
|
|
||||||
|
const ADMIN_KEY = config.adminKey;
|
||||||
|
const failedAttempts = new Map<string, number>();
|
||||||
|
|
||||||
|
type AuthorizeParams = { via: "cookie" | "header" };
|
||||||
|
|
||||||
|
export const authorize: ({ via }: AuthorizeParams) => RequestHandler =
|
||||||
|
({ via }) =>
|
||||||
|
(req, res, next) => {
|
||||||
|
const bearerToken = req.headers.authorization?.slice("Bearer ".length);
|
||||||
|
const cookieToken = req.session.adminToken;
|
||||||
|
const token = via === "cookie" ? cookieToken : bearerToken;
|
||||||
|
const attempts = failedAttempts.get(req.ip) ?? 0;
|
||||||
|
|
||||||
|
if (!ADMIN_KEY) {
|
||||||
|
req.log.warn(
|
||||||
|
{ ip: req.ip },
|
||||||
|
`Blocked admin request because no admin key is configured`
|
||||||
|
);
|
||||||
|
return res.status(401).json({ error: "Unauthorized" });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (attempts > 5) {
|
||||||
|
req.log.warn(
|
||||||
|
{ ip: req.ip, token: bearerToken },
|
||||||
|
`Blocked admin request due to too many failed attempts`
|
||||||
|
);
|
||||||
|
return res.status(401).json({ error: "Too many attempts" });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (token && token === ADMIN_KEY) {
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
|
||||||
|
req.log.warn(
|
||||||
|
{ ip: req.ip, attempts, invalidToken: String(token) },
|
||||||
|
`Attempted admin request with invalid token`
|
||||||
|
);
|
||||||
|
return handleFailedLogin(req, res);
|
||||||
|
};
|
||||||
|
|
||||||
|
function handleFailedLogin(req: Request, res: Response) {
|
||||||
|
const attempts = failedAttempts.get(req.ip) ?? 0;
|
||||||
|
const newAttempts = attempts + 1;
|
||||||
|
failedAttempts.set(req.ip, newAttempts);
|
||||||
|
if (req.accepts("json", "html") === "json") {
|
||||||
|
return res.status(401).json({ error: "Unauthorized" });
|
||||||
|
}
|
||||||
|
delete req.session.adminToken;
|
||||||
|
req.session.flash = { type: "error", message: `Invalid admin key.` };
|
||||||
|
return res.redirect("/admin/login");
|
||||||
|
}
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
import { Router } from "express";
|
||||||
|
|
||||||
|
const loginRouter = Router();
|
||||||
|
|
||||||
|
loginRouter.get("/login", (_req, res) => {
|
||||||
|
res.render("admin_login");
|
||||||
|
});
|
||||||
|
|
||||||
|
loginRouter.post("/login", (req, res) => {
|
||||||
|
req.session.adminToken = req.body.token;
|
||||||
|
res.redirect("/admin");
|
||||||
|
});
|
||||||
|
|
||||||
|
loginRouter.get("/logout", (req, res) => {
|
||||||
|
delete req.session.adminToken;
|
||||||
|
res.redirect("/admin/login");
|
||||||
|
});
|
||||||
|
|
||||||
|
loginRouter.get("/", (req, res) => {
|
||||||
|
if (req.session.adminToken) {
|
||||||
|
return res.redirect("/admin/manage");
|
||||||
|
}
|
||||||
|
res.redirect("/admin/login");
|
||||||
|
});
|
||||||
|
|
||||||
|
export { loginRouter };
|
||||||
@@ -1,36 +1,60 @@
|
|||||||
import { RequestHandler, Router } from "express";
|
import express, { Router } from "express";
|
||||||
import { config } from "../config";
|
import { authorize } from "./auth";
|
||||||
import { usersRouter } from "./users";
|
import { HttpError } from "../shared/errors";
|
||||||
|
import { injectLocals } from "../shared/inject-locals";
|
||||||
const ADMIN_KEY = config.adminKey;
|
import { withSession } from "../shared/with-session";
|
||||||
const failedAttempts = new Map<string, number>();
|
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
|
||||||
|
import { buildInfoPageHtml } from "../info-page";
|
||||||
|
import { loginRouter } from "./login";
|
||||||
|
import { usersApiRouter as apiRouter } from "./api/users";
|
||||||
|
import { usersWebRouter as webRouter } from "./web/manage";
|
||||||
|
|
||||||
const adminRouter = Router();
|
const adminRouter = Router();
|
||||||
|
|
||||||
const auth: RequestHandler = (req, res, next) => {
|
adminRouter.use(
|
||||||
const token = req.headers.authorization?.slice("Bearer ".length);
|
express.json({ limit: "20mb" }),
|
||||||
const attempts = failedAttempts.get(req.ip) ?? 0;
|
express.urlencoded({ extended: true, limit: "20mb" })
|
||||||
if (attempts > 5) {
|
);
|
||||||
req.log.warn(
|
adminRouter.use(withSession);
|
||||||
{ ip: req.ip, token },
|
adminRouter.use(injectCsrfToken);
|
||||||
`Blocked request to admin API due to too many failed attempts`
|
|
||||||
);
|
adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
|
||||||
return res.status(401).json({ error: "Too many attempts" });
|
|
||||||
|
adminRouter.use(checkCsrfToken);
|
||||||
|
adminRouter.use(injectLocals);
|
||||||
|
adminRouter.use("/", loginRouter);
|
||||||
|
adminRouter.use("/manage", authorize({ via: "cookie" }), webRouter);
|
||||||
|
adminRouter.use("/service-info", authorize({ via: "cookie" }), (req, res) => {
|
||||||
|
return res.send(
|
||||||
|
buildInfoPageHtml(req.protocol + "://" + req.get("host"), true)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
adminRouter.use(
|
||||||
|
(
|
||||||
|
err: Error,
|
||||||
|
req: express.Request,
|
||||||
|
res: express.Response,
|
||||||
|
_next: express.NextFunction
|
||||||
|
) => {
|
||||||
|
const data: any = { message: err.message, stack: err.stack };
|
||||||
|
if (err instanceof HttpError) {
|
||||||
|
data.status = err.status;
|
||||||
|
res.status(err.status);
|
||||||
|
if (req.accepts(["html", "json"]) === "json") {
|
||||||
|
return res.json({ error: data });
|
||||||
|
}
|
||||||
|
return res.render("admin_error", data);
|
||||||
|
} else if (err.name === "ForbiddenError") {
|
||||||
|
data.status = 403;
|
||||||
|
if (err.message === "invalid csrf token") {
|
||||||
|
data.message =
|
||||||
|
"Invalid CSRF token; try refreshing the previous page before submitting again.";
|
||||||
|
}
|
||||||
|
return res.status(403).render("admin_error", { ...data, flash: null });
|
||||||
|
}
|
||||||
|
res.status(500).json({ error: data });
|
||||||
}
|
}
|
||||||
|
);
|
||||||
|
|
||||||
if (token !== ADMIN_KEY) {
|
|
||||||
const newAttempts = attempts + 1;
|
|
||||||
failedAttempts.set(req.ip, newAttempts);
|
|
||||||
req.log.warn(
|
|
||||||
{ ip: req.ip, attempts: newAttempts, token },
|
|
||||||
`Attempted admin API request with invalid token`
|
|
||||||
);
|
|
||||||
return res.status(401).json({ error: "Unauthorized" });
|
|
||||||
}
|
|
||||||
|
|
||||||
next();
|
|
||||||
};
|
|
||||||
|
|
||||||
adminRouter.use(auth);
|
|
||||||
adminRouter.use("/users", usersRouter);
|
|
||||||
export { adminRouter };
|
export { adminRouter };
|
||||||
|
|||||||
@@ -0,0 +1,358 @@
|
|||||||
|
import { Router } from "express";
|
||||||
|
import multer from "multer";
|
||||||
|
import { z } from "zod";
|
||||||
|
import { config } from "../../config";
|
||||||
|
import { HttpError } from "../../shared/errors";
|
||||||
|
import * as userStore from "../../shared/users/user-store";
|
||||||
|
import { parseSort, sortBy, paginate } from "../../shared/utils";
|
||||||
|
import { keyPool } from "../../shared/key-management";
|
||||||
|
import { MODEL_FAMILIES } from "../../shared/models";
|
||||||
|
import { getTokenCostUsd, prettyTokens } from "../../shared/stats";
|
||||||
|
import {
|
||||||
|
User,
|
||||||
|
UserPartialSchema,
|
||||||
|
UserSchema,
|
||||||
|
UserTokenCounts,
|
||||||
|
} from "../../shared/users/schema";
|
||||||
|
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
const upload = multer({
|
||||||
|
storage: multer.memoryStorage(),
|
||||||
|
fileFilter: (_req, file, cb) => {
|
||||||
|
if (file.mimetype !== "application/json") {
|
||||||
|
cb(new Error("Invalid file type"));
|
||||||
|
} else {
|
||||||
|
cb(null, true);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/create-user", (req, res) => {
|
||||||
|
const recentUsers = userStore
|
||||||
|
.getUsers()
|
||||||
|
.sort(sortBy(["createdAt"], false))
|
||||||
|
.slice(0, 5);
|
||||||
|
res.render("admin_create-user", {
|
||||||
|
recentUsers,
|
||||||
|
newToken: !!req.query.created,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/create-user", (req, res) => {
|
||||||
|
const body = req.body;
|
||||||
|
|
||||||
|
const base = z.object({ type: UserSchema.shape.type.default("normal") });
|
||||||
|
const tempUser = base
|
||||||
|
.extend({
|
||||||
|
temporaryUserDuration: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.min(1)
|
||||||
|
.max(10080 * 4),
|
||||||
|
})
|
||||||
|
.merge(
|
||||||
|
MODEL_FAMILIES.reduce((schema, model) => {
|
||||||
|
return schema.extend({
|
||||||
|
[`temporaryUserQuota_${model}`]: z.coerce.number().int().min(0),
|
||||||
|
});
|
||||||
|
}, z.object({}))
|
||||||
|
)
|
||||||
|
.transform((data: any) => {
|
||||||
|
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
|
||||||
|
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
|
||||||
|
limits[model] = data[`temporaryUserQuota_${model}`];
|
||||||
|
return limits;
|
||||||
|
}, {} as UserTokenCounts);
|
||||||
|
return { ...data, expiresAt, tokenLimits };
|
||||||
|
});
|
||||||
|
|
||||||
|
const createSchema = body.type === "temporary" ? tempUser : base;
|
||||||
|
const result = createSchema.safeParse(body);
|
||||||
|
if (!result.success) {
|
||||||
|
throw new HttpError(
|
||||||
|
400,
|
||||||
|
result.error.issues.flatMap((issue) => issue.message).join(", ")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
userStore.createUser({ ...result.data });
|
||||||
|
return res.redirect(`/admin/manage/create-user?created=true`);
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/view-user/:token", (req, res) => {
|
||||||
|
const user = userStore.getUser(req.params.token);
|
||||||
|
if (!user) throw new HttpError(404, "User not found");
|
||||||
|
res.render("admin_view-user", { user });
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/list-users", (req, res) => {
|
||||||
|
const sort = parseSort(req.query.sort) || ["sumTokens", "createdAt"];
|
||||||
|
const requestedPageSize =
|
||||||
|
Number(req.query.perPage) || Number(req.cookies.perPage) || 20;
|
||||||
|
const perPage = Math.max(1, Math.min(1000, requestedPageSize));
|
||||||
|
const users = userStore
|
||||||
|
.getUsers()
|
||||||
|
.map((user) => {
|
||||||
|
const sums = getSumsForUser(user);
|
||||||
|
return { ...user, ...sums };
|
||||||
|
})
|
||||||
|
.sort(sortBy(sort, false));
|
||||||
|
|
||||||
|
const page = Number(req.query.page) || 1;
|
||||||
|
const { items, ...pagination } = paginate(users, page, perPage);
|
||||||
|
|
||||||
|
return res.render("admin_list-users", {
|
||||||
|
sort: sort.join(","),
|
||||||
|
users: items,
|
||||||
|
...pagination,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/import-users", (_req, res) => {
|
||||||
|
res.render("admin_import-users");
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/import-users", upload.single("users"), (req, res) => {
|
||||||
|
if (!req.file) throw new HttpError(400, "No file uploaded");
|
||||||
|
|
||||||
|
const data = JSON.parse(req.file.buffer.toString());
|
||||||
|
const result = z.array(UserPartialSchema).safeParse(data.users);
|
||||||
|
if (!result.success) throw new HttpError(400, result.error.toString());
|
||||||
|
|
||||||
|
const upserts = result.data.map((user) => userStore.upsertUser(user));
|
||||||
|
req.session.flash = {
|
||||||
|
type: "success",
|
||||||
|
message: `${upserts.length} users imported`,
|
||||||
|
};
|
||||||
|
res.redirect("/admin/manage/import-users");
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/export-users", (_req, res) => {
|
||||||
|
res.render("admin_export-users");
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/export-users.json", (_req, res) => {
|
||||||
|
const users = userStore.getUsers();
|
||||||
|
res.setHeader("Content-Disposition", "attachment; filename=users.json");
|
||||||
|
res.setHeader("Content-Type", "application/json");
|
||||||
|
res.send(JSON.stringify({ users }, null, 2));
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/", (_req, res) => {
|
||||||
|
res.render("admin_index");
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/edit-user/:token", (req, res) => {
|
||||||
|
const result = UserPartialSchema.safeParse({
|
||||||
|
...req.body,
|
||||||
|
token: req.params.token,
|
||||||
|
});
|
||||||
|
if (!result.success) {
|
||||||
|
throw new HttpError(
|
||||||
|
400,
|
||||||
|
result.error.issues.flatMap((issue) => issue.message).join(", ")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
userStore.upsertUser(result.data);
|
||||||
|
return res.status(200).json({ success: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/reactivate-user/:token", (req, res) => {
|
||||||
|
const user = userStore.getUser(req.params.token);
|
||||||
|
if (!user) throw new HttpError(404, "User not found");
|
||||||
|
|
||||||
|
userStore.upsertUser({
|
||||||
|
token: user.token,
|
||||||
|
disabledAt: null,
|
||||||
|
disabledReason: null,
|
||||||
|
});
|
||||||
|
return res.sendStatus(204);
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/disable-user/:token", (req, res) => {
|
||||||
|
const user = userStore.getUser(req.params.token);
|
||||||
|
if (!user) throw new HttpError(404, "User not found");
|
||||||
|
|
||||||
|
userStore.disableUser(req.params.token, req.body.reason);
|
||||||
|
return res.sendStatus(204);
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/refresh-user-quota", (req, res) => {
|
||||||
|
const user = userStore.getUser(req.body.token);
|
||||||
|
if (!user) throw new HttpError(404, "User not found");
|
||||||
|
|
||||||
|
userStore.refreshQuota(user.token);
|
||||||
|
req.session.flash = {
|
||||||
|
type: "success",
|
||||||
|
message: "User's quota was refreshed",
|
||||||
|
};
|
||||||
|
return res.redirect(`/admin/manage/view-user/${user.token}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/maintenance", (req, res) => {
|
||||||
|
const action = req.body.action;
|
||||||
|
let flash = { type: "", message: "" };
|
||||||
|
switch (action) {
|
||||||
|
case "recheck": {
|
||||||
|
keyPool.recheck("openai");
|
||||||
|
keyPool.recheck("anthropic");
|
||||||
|
const size = keyPool
|
||||||
|
.list()
|
||||||
|
.filter((k) => k.service !== "google-palm").length;
|
||||||
|
flash.type = "success";
|
||||||
|
flash.message = `Scheduled recheck of ${size} keys for OpenAI and Anthropic.`;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "resetQuotas": {
|
||||||
|
const users = userStore.getUsers();
|
||||||
|
users.forEach((user) => userStore.refreshQuota(user.token));
|
||||||
|
const { claude, gpt4, turbo } = config.tokenQuota;
|
||||||
|
flash.type = "success";
|
||||||
|
flash.message = `All users' token quotas reset to ${turbo} (Turbo), ${gpt4} (GPT-4), ${claude} (Claude).`;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "resetCounts": {
|
||||||
|
const users = userStore.getUsers();
|
||||||
|
users.forEach((user) => userStore.resetUsage(user.token));
|
||||||
|
flash.type = "success";
|
||||||
|
flash.message = `All users' token usage records reset.`;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
throw new HttpError(400, "Invalid action");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.session.flash = flash;
|
||||||
|
|
||||||
|
return res.redirect(`/admin/manage`);
|
||||||
|
});
|
||||||
|
|
||||||
|
router.get("/download-stats", (_req, res) => {
|
||||||
|
return res.render("admin_download-stats");
|
||||||
|
});
|
||||||
|
|
||||||
|
router.post("/generate-stats", (req, res) => {
|
||||||
|
const body = req.body;
|
||||||
|
|
||||||
|
const valid = z
|
||||||
|
.object({
|
||||||
|
anon: z.coerce.boolean().optional().default(false),
|
||||||
|
sort: z.string().optional().default("prompts"),
|
||||||
|
maxUsers: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.min(5)
|
||||||
|
.max(1000)
|
||||||
|
.optional()
|
||||||
|
.default(1000),
|
||||||
|
tableType: z.enum(["code", "markdown"]).optional().default("markdown"),
|
||||||
|
format: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.default("# Stats\n{{header}}\n{{stats}}\n{{time}}"),
|
||||||
|
})
|
||||||
|
.strict()
|
||||||
|
.safeParse(body);
|
||||||
|
|
||||||
|
if (!valid.success) {
|
||||||
|
throw new HttpError(
|
||||||
|
400,
|
||||||
|
valid.error.issues.flatMap((issue) => issue.message).join(", ")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const { anon, sort, format, maxUsers, tableType } = valid.data;
|
||||||
|
const users = userStore.getUsers();
|
||||||
|
|
||||||
|
let totalTokens = 0;
|
||||||
|
let totalCost = 0;
|
||||||
|
let totalPrompts = 0;
|
||||||
|
let totalIps = 0;
|
||||||
|
|
||||||
|
const lines = users
|
||||||
|
.map((u) => {
|
||||||
|
const sums = getSumsForUser(u);
|
||||||
|
totalTokens += sums.sumTokens;
|
||||||
|
totalCost += sums.sumCost;
|
||||||
|
totalPrompts += u.promptCount;
|
||||||
|
totalIps += u.ip.length;
|
||||||
|
|
||||||
|
const getName = (u: User) => {
|
||||||
|
const id = `...${u.token.slice(-5)}`;
|
||||||
|
const banned = !!u.disabledAt;
|
||||||
|
let nick = anon || !u.nickname ? "Anonymous" : u.nickname;
|
||||||
|
|
||||||
|
if (tableType === "markdown") {
|
||||||
|
nick = banned ? `~~${nick}~~` : nick;
|
||||||
|
return `${nick.slice(0, 18)} | ${id}`;
|
||||||
|
} else {
|
||||||
|
// Strikethrough doesn't work within code blocks
|
||||||
|
const dead = !!u.disabledAt ? "[dead] " : "";
|
||||||
|
nick = `${dead}${nick}`;
|
||||||
|
return `${nick.slice(0, 18).padEnd(18)} ${id}`.padEnd(27);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const user = getName(u);
|
||||||
|
const prompts = `${u.promptCount} proompts`.padEnd(14);
|
||||||
|
const ips = `${u.ip.length} IPs`.padEnd(8);
|
||||||
|
const tokens = `${sums.prettyUsage} tokens`.padEnd(30);
|
||||||
|
const sortField = sort === "prompts" ? u.promptCount : sums.sumTokens;
|
||||||
|
return { user, prompts, ips, tokens, sortField };
|
||||||
|
})
|
||||||
|
.sort((a, b) => b.sortField - a.sortField)
|
||||||
|
.map(({ user, prompts, ips, tokens }, i) => {
|
||||||
|
const pos = tableType === "markdown" ? (i + 1 + ".").padEnd(4) : "";
|
||||||
|
return `${pos}${user} | ${prompts} | ${ips} | ${tokens}`;
|
||||||
|
})
|
||||||
|
.slice(0, maxUsers);
|
||||||
|
|
||||||
|
const strTotalPrompts = `${totalPrompts} proompts`;
|
||||||
|
const strTotalIps = `${totalIps} IPs`;
|
||||||
|
const strTotalTokens = `${prettyTokens(totalTokens)} tokens`;
|
||||||
|
const strTotalCost = `US$${totalCost.toFixed(2)} cost`;
|
||||||
|
const header = `!!!Note ${users.length} users | ${strTotalPrompts} | ${strTotalIps} | ${strTotalTokens} | ${strTotalCost}`;
|
||||||
|
const time = `\n-> *(as of ${new Date().toISOString()})* <-`;
|
||||||
|
|
||||||
|
let table = [];
|
||||||
|
table.push(lines.join("\n"));
|
||||||
|
|
||||||
|
if (valid.data.tableType === "markdown") {
|
||||||
|
table = ["User||Prompts|IPs|Usage", "---|---|---|---|---", ...table];
|
||||||
|
} else {
|
||||||
|
table = ["```text", ...table, "```"];
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = format
|
||||||
|
.replace("{{header}}", header)
|
||||||
|
.replace("{{stats}}", table.join("\n"))
|
||||||
|
.replace("{{time}}", time);
|
||||||
|
|
||||||
|
res.setHeader(
|
||||||
|
"Content-Disposition",
|
||||||
|
`attachment; filename=proxy-stats-${new Date().toISOString()}.md`
|
||||||
|
);
|
||||||
|
res.setHeader("Content-Type", "text/markdown");
|
||||||
|
res.send(result);
|
||||||
|
});
|
||||||
|
|
||||||
|
function getSumsForUser(user: User) {
|
||||||
|
const sums = MODEL_FAMILIES.reduce(
|
||||||
|
(s, model) => {
|
||||||
|
const tokens = user.tokenCounts[model] ?? 0;
|
||||||
|
s.sumTokens += tokens;
|
||||||
|
s.sumCost += getTokenCostUsd(model, tokens);
|
||||||
|
return s;
|
||||||
|
},
|
||||||
|
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
|
||||||
|
);
|
||||||
|
sums.prettyUsage = `${prettyTokens(sums.sumTokens)} ($${sums.sumCost.toFixed(
|
||||||
|
2
|
||||||
|
)})`;
|
||||||
|
return sums;
|
||||||
|
}
|
||||||
|
|
||||||
|
export { router as usersWebRouter };
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Create User - OAI Reverse Proxy Admin" }) %>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
#temporaryUserOptions {
|
||||||
|
margin-top: 1em;
|
||||||
|
max-width: 30em;
|
||||||
|
}
|
||||||
|
|
||||||
|
#temporaryUserOptions h3 {
|
||||||
|
margin-bottom: -0.4em;
|
||||||
|
}
|
||||||
|
|
||||||
|
input[type="number"] {
|
||||||
|
max-width: 10em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.temporary-user-fieldset {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(4, 1fr); /* Four equal-width columns */
|
||||||
|
column-gap: 1em;
|
||||||
|
row-gap: 0.2em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.full-width {
|
||||||
|
grid-column: 1 / -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.quota-label {
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<h1>Create User Token</h1>
|
||||||
|
<p>User token types:</p>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Normal</strong> - Standard users.
|
||||||
|
<li><strong>Special</strong> - Exempt from token quotas and <code>MAX_IPS_PER_USER</code> enforcement.</li>
|
||||||
|
<li><strong>Temporary</strong> - Disabled after a specified duration. Quotas never refresh.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<form action="/admin/manage/create-user" method="post">
|
||||||
|
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||||
|
<label for="type">Type</label>
|
||||||
|
<select name="type">
|
||||||
|
<option value="normal">Normal</option>
|
||||||
|
<option value="special">Special</option>
|
||||||
|
<option value="temporary">Temporary</option>
|
||||||
|
</select>
|
||||||
|
<input type="submit" value="Create" />
|
||||||
|
<fieldset id="temporaryUserOptions" style="display: none">
|
||||||
|
<legend>Temporary User Options</legend>
|
||||||
|
<div class="temporary-user-fieldset">
|
||||||
|
<p class="full-width">
|
||||||
|
Temporary users will be disabled after the specified duration, and their records will be deleted 72 hours after that.
|
||||||
|
These options apply only to new
|
||||||
|
temporary users; existing ones use whatever options were in effect when they were created.
|
||||||
|
</p>
|
||||||
|
<label for="temporaryUserDuration" class="full-width">Access duration (in minutes)</label>
|
||||||
|
<input type="number" name="temporaryUserDuration" id="temporaryUserDuration" value="60" class="full-width" />
|
||||||
|
<!-- convenience calculations -->
|
||||||
|
<span>6 hours:</span><code>360</code>
|
||||||
|
<span>12 hours:</span><code>720</code>
|
||||||
|
<span>1 day:</span><code>1440</code>
|
||||||
|
<span>1 week:</span><code>10080</code>
|
||||||
|
<h3 class="full-width">Token Quotas</h3>
|
||||||
|
<p class="full-width">Temporary users' quotas are never refreshed.</p>
|
||||||
|
<% Object.entries(quota).forEach(function([model, tokens]) { %>
|
||||||
|
<label class="quota-label" for="temporaryUserQuota_<%= model %>"><%= model %></label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
name="temporaryUserQuota_<%= model %>"
|
||||||
|
id="temporaryUserQuota_<%= model %>"
|
||||||
|
value="0"
|
||||||
|
data-fieldtype="tokenquota"
|
||||||
|
data-default="<%= tokens %>" />
|
||||||
|
<% }) %>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
</form>
|
||||||
|
<% if (newToken) { %>
|
||||||
|
<p>Just created <code><%= recentUsers[0].token %></code>.</p>
|
||||||
|
<% } %>
|
||||||
|
<h2>Recent Tokens</h2>
|
||||||
|
<ul>
|
||||||
|
<% recentUsers.forEach(function(user) { %>
|
||||||
|
<li><a href="/admin/manage/view-user/<%= user.token %>"><%= user.token %></a></li>
|
||||||
|
<% }) %>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
const typeInput = document.querySelector("select[name=type]");
|
||||||
|
const temporaryUserOptions = document.querySelector("#temporaryUserOptions");
|
||||||
|
typeInput.addEventListener("change", function () {
|
||||||
|
localStorage.setItem("admin__create-user__type", typeInput.value);
|
||||||
|
if (typeInput.value === "temporary") {
|
||||||
|
temporaryUserOptions.style.display = "block";
|
||||||
|
} else {
|
||||||
|
temporaryUserOptions.style.display = "none";
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
function loadDefaults() {
|
||||||
|
const defaultType = localStorage.getItem("admin__create-user__type");
|
||||||
|
if (defaultType) {
|
||||||
|
typeInput.value = defaultType;
|
||||||
|
typeInput.dispatchEvent(new Event("change"));
|
||||||
|
}
|
||||||
|
|
||||||
|
const durationInput = document.querySelector("input[name=temporaryUserDuration]");
|
||||||
|
const defaultDuration = localStorage.getItem("admin__create-user__duration");
|
||||||
|
durationInput.addEventListener("change", function () {
|
||||||
|
localStorage.setItem("admin__create-user__duration", durationInput.value);
|
||||||
|
});
|
||||||
|
if (defaultDuration) {
|
||||||
|
durationInput.value = defaultDuration;
|
||||||
|
}
|
||||||
|
|
||||||
|
const tokenQuotaInputs = document.querySelectorAll("input[data-fieldtype=tokenquota]");
|
||||||
|
tokenQuotaInputs.forEach(function (input) {
|
||||||
|
const defaultQuota = localStorage.getItem("admin__create-user__quota__" + input.id);
|
||||||
|
input.addEventListener("change", function () {
|
||||||
|
localStorage.setItem("admin__create-user__quota__" + input.id, input.value);
|
||||||
|
});
|
||||||
|
if (defaultQuota) {
|
||||||
|
input.value = defaultQuota;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
loadDefaults();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,147 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Download Stats - OAI Reverse Proxy Admin" }) %>
|
||||||
|
<style>
|
||||||
|
#statsForm {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
#statsForm div {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: row;
|
||||||
|
margin-bottom: 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
#statsForm div label {
|
||||||
|
width: 6em;
|
||||||
|
text-align: right;
|
||||||
|
margin-right: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
#statsForm ul {
|
||||||
|
margin: 0;
|
||||||
|
padding-left: 2em;
|
||||||
|
font-size: 0.8em;
|
||||||
|
}
|
||||||
|
|
||||||
|
#statsForm li {
|
||||||
|
list-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
#statsForm textarea {
|
||||||
|
font-family: monospace;
|
||||||
|
flex-grow: 1;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<h1>Download Stats</h1>
|
||||||
|
<p>
|
||||||
|
Download usage statistics to a Markdown document. You can paste this into a service like Rentry.org to share it.
|
||||||
|
</p>
|
||||||
|
<div>
|
||||||
|
<h3>Options</h3>
|
||||||
|
<form id="statsForm" action="/admin/manage/generate-stats" method="post"
|
||||||
|
style="display: flex; flex-direction: column;">
|
||||||
|
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||||
|
<div>
|
||||||
|
<label for="anon">Anonymize</label>
|
||||||
|
<input id="anon" type="checkbox" name="anon" value="true" />
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label for="sort">Sort</label>
|
||||||
|
<select id="sort" name="sort">
|
||||||
|
<option value="tokens" selected>By Token Count</option>
|
||||||
|
<option value="prompts">By Prompt Count</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label for="maxUsers">Max Users</label>
|
||||||
|
<input id="maxUsers" type="number" name="maxUsers" value="1000" />
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label for="tableType">Table Type</label>
|
||||||
|
<select id="tableType" name="tableType">
|
||||||
|
<option value="markdown" selected>Markdown Table</option>
|
||||||
|
<option value="code">Code Block</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label for="format">Custom Format <ul>
|
||||||
|
<li><code>{{header}}</code></li>
|
||||||
|
<li><code>{{stats}}</code></li>
|
||||||
|
<li><code>{{time}}</code></li>
|
||||||
|
</ul></label>
|
||||||
|
<textarea id="format" name="format" rows="10" cols="50" placeholder="{{stats}}">
|
||||||
|
# Stats
|
||||||
|
{{header}}
|
||||||
|
{{stats}}
|
||||||
|
{{time}}
|
||||||
|
</textarea>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<button type="submit">Download</button>
|
||||||
|
<button id="copyButton" type="button">Copy to Clipboard</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
function loadDefaults() {
|
||||||
|
const getState = (key) => localStorage.getItem("admin__download-stats__" + key);
|
||||||
|
const setState = (key, value) => localStorage.setItem("admin__download-stats__" + key, value);
|
||||||
|
|
||||||
|
const checkboxes = ["anon"];
|
||||||
|
const values = ["sort", "format", "tableType", "maxUsers"];
|
||||||
|
|
||||||
|
checkboxes.forEach((key) => {
|
||||||
|
const value = getState(key);
|
||||||
|
if (value) {
|
||||||
|
document.getElementById(key).checked = value == "true";
|
||||||
|
}
|
||||||
|
document.getElementById(key).addEventListener("change", (e) => {
|
||||||
|
setState(key, e.target.checked);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
values.forEach((key) => {
|
||||||
|
const value = getState(key);
|
||||||
|
if (value) {
|
||||||
|
document.getElementById(key).value = value;
|
||||||
|
}
|
||||||
|
document.getElementById(key).addEventListener("change", (e) => {
|
||||||
|
setState(key, e.target.value?.trim());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
loadDefaults();
|
||||||
|
|
||||||
|
async function fetchAndCopy() {
|
||||||
|
const form = document.getElementById('statsForm');
|
||||||
|
const formData = new FormData(form);
|
||||||
|
|
||||||
|
const response = await fetch(form.action, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||||
|
credentials: 'same-origin',
|
||||||
|
body: new URLSearchParams(formData),
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
if (response.ok) {
|
||||||
|
const content = await response.text();
|
||||||
|
copyToClipboard(content);
|
||||||
|
} else {
|
||||||
|
throw new Error('Failed to fetch generated stats. Try reloading the page.');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function copyToClipboard(text) {
|
||||||
|
navigator.clipboard.writeText(text).then(() => {
|
||||||
|
alert('Copied to clipboard');
|
||||||
|
}).catch(err => {
|
||||||
|
alert('Failed to copy to clipboard. Try downloading the file instead.');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
document.getElementById('copyButton').addEventListener('click', fetchAndCopy);
|
||||||
|
</script>
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Error" }) %>
|
||||||
|
<div id="error-content" style="color: red; background-color: #eedddd; padding: 1em">
|
||||||
|
<p><strong>⚠️ Error <%= status %>:</strong> <%= message %></p>
|
||||||
|
<pre><%= stack %></pre>
|
||||||
|
<a href="#" onclick="window.history.back()">Go Back</a> | <a href="/admin">Go Home</a>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Export Users - OAI Reverse Proxy Admin" }) %>
|
||||||
|
<h1>Export Users</h1>
|
||||||
|
<p>
|
||||||
|
Export users to JSON. The JSON will be an array of objects under the key
|
||||||
|
<code>users</code>. You can use this JSON to import users later.
|
||||||
|
</p>
|
||||||
|
<script>
|
||||||
|
function exportUsers() {
|
||||||
|
var xhr = new XMLHttpRequest();
|
||||||
|
xhr.open("GET", "/admin/manage/export-users.json", true);
|
||||||
|
xhr.responseType = "blob";
|
||||||
|
xhr.onload = function() {
|
||||||
|
if (this.status === 200) {
|
||||||
|
var blob = new Blob([this.response], { type: "application/json" });
|
||||||
|
var url = URL.createObjectURL(blob);
|
||||||
|
var a = document.createElement("a");
|
||||||
|
a.href = url;
|
||||||
|
a.download = "users.json";
|
||||||
|
document.body.appendChild(a);
|
||||||
|
a.click();
|
||||||
|
a.remove();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
xhr.send();
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<button onclick="exportUsers()">Export</button>
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Import Users - OAI Reverse Proxy Admin" }) %>
|
||||||
|
<h1>Import Users</h1>
|
||||||
|
<p>
|
||||||
|
Import users from JSON. The JSON should be an array of objects under the key
|
||||||
|
<code>users</code>. Each object should have the following fields:
|
||||||
|
</p>
|
||||||
|
<ul>
|
||||||
|
<li><code>token</code> (required): a unique identifier for the user</li>
|
||||||
|
<li><code>nickname</code> (optional): a nickname for the user, max 80 chars</li>
|
||||||
|
<li><code>ip</code> (optional): IP addresses the user has connected from</li>
|
||||||
|
<li>
|
||||||
|
<code>type</code> (optional): either <code>normal</code> or
|
||||||
|
<code>special</code>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>promptCount</code> (optional): the number of times the user has sent a
|
||||||
|
prompt
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>tokenCounts</code> (optional): the number of tokens the user has
|
||||||
|
consumed. This should be an object with keys <code>turbo</code>,
|
||||||
|
<code>gpt4</code>, and <code>claude</code>.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>tokenLimits</code> (optional): the number of tokens the user can
|
||||||
|
consume. This should be an object with keys <code>turbo</code>,
|
||||||
|
<code>gpt4</code>, and <code>claude</code>.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>createdAt</code> (optional): the timestamp when the user was created
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>disabledAt</code> (optional): the timestamp when the user was disabled
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<code>disabledReason</code> (optional): the reason the user was disabled
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<p>
|
||||||
|
If a user with the same token already exists, the existing user will be
|
||||||
|
updated with the new values.
|
||||||
|
</p>
|
||||||
|
<form action="/admin/manage/import-users?_csrf=<%= csrfToken %>" method="post" enctype="multipart/form-data">
|
||||||
|
<input type="file" name="users" />
|
||||||
|
<input type="submit" value="Import" />
|
||||||
|
</form>
|
||||||
|
</form>
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "OAI Reverse Proxy Admin" }) %>
|
||||||
|
<h1>OAI Reverse Proxy Admin</h1>
|
||||||
|
<% if (!usersEnabled) { %>
|
||||||
|
<p style="color: red; background-color: #eedddd; padding: 1em">
|
||||||
|
<strong>🚨 <code>user_token</code> gatekeeper is not enabled.</strong><br />
|
||||||
|
<br />None of the user management features will do anything.
|
||||||
|
</p>
|
||||||
|
<% } %>
|
||||||
|
<% if (!persistenceEnabled) { %>
|
||||||
|
<p style="color: red; background-color: #eedddd; padding: 1em">
|
||||||
|
<strong>⚠️ Users will be lost when the server restarts because persistence is not configured.</strong><br />
|
||||||
|
<br />Be sure to export your users and import them again after restarting the server if you want to keep them.<br />
|
||||||
|
<br />
|
||||||
|
See the
|
||||||
|
<a target="_blank" href="https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/docs/user-management.md#firebase-realtime-database">
|
||||||
|
user management documentation</a
|
||||||
|
>
|
||||||
|
to learn how to set up persistence.
|
||||||
|
</p>
|
||||||
|
<% } %>
|
||||||
|
<h3>Users</h3>
|
||||||
|
<ul>
|
||||||
|
<li><a href="/admin/manage/list-users">List Users</a></li>
|
||||||
|
<li><a href="/admin/manage/create-user">Create User</a></li>
|
||||||
|
<li><a href="/admin/manage/import-users">Import Users</a></li>
|
||||||
|
<li><a href="/admin/manage/export-users">Export Users</a></li>
|
||||||
|
<li><a href="/admin/manage/download-stats">Download Rentry Stats</a>
|
||||||
|
<li><a href="/admin/service-info">Service Info</a></li>
|
||||||
|
</ul>
|
||||||
|
<h3>Maintenance</h3>
|
||||||
|
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
|
||||||
|
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||||
|
<input id="hiddenAction" type="hidden" name="action" value="" />
|
||||||
|
<div display="flex" flex-direction="column">
|
||||||
|
<fieldset>
|
||||||
|
<legend>Key Recheck</legend>
|
||||||
|
<button id="recheck-keys" type="button" onclick="submitForm('recheck')">Force Key Recheck</button>
|
||||||
|
<label for="recheck-keys">Triggers a recheck of all keys without restarting the server.</label>
|
||||||
|
</fieldset>
|
||||||
|
<% if (quotasEnabled) { %>
|
||||||
|
<fieldset>
|
||||||
|
<legend>Bulk Quota Management</legend>
|
||||||
|
<p>
|
||||||
|
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
|
||||||
|
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
|
||||||
|
Resets all users' token records to zero.
|
||||||
|
</p>
|
||||||
|
</fieldset>
|
||||||
|
<% } %>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
let confirmed = false;
|
||||||
|
function submitForm(action) {
|
||||||
|
if (action === "resetCounts" && !confirmed) {
|
||||||
|
document.getElementById("clear-token-counts").innerText = "💣 Confirm Clear All Token Counts";
|
||||||
|
alert("⚠️ This will permanently clear token records for all users. If you only want to refresh quotas, use the other button.");
|
||||||
|
confirmed = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
document.getElementById("hiddenAction").value = action;
|
||||||
|
document.getElementById("maintenanceForm").submit();
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,87 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Users - OAI Reverse Proxy Admin" }) %>
|
||||||
|
<h1>User Token List</h1>
|
||||||
|
|
||||||
|
<% if (users.length === 0) { %>
|
||||||
|
<p>No users found.</p>
|
||||||
|
<% } else { %>
|
||||||
|
<input type="checkbox" id="toggle-nicknames" onchange="toggleNicknames()" />
|
||||||
|
<label for="toggle-nicknames">Show Nicknames</label>
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>User</th>
|
||||||
|
<th <% if (sort.includes("ip")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=ip">IPs</a></th>
|
||||||
|
<th <% if (sort.includes("promptCount")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=promptCount">Prompts</a></th>
|
||||||
|
<th <% if (sort.includes("sumCost")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=sumCost">Usage</a></th>
|
||||||
|
<th>Type</th>
|
||||||
|
<th <% if (sort.includes("createdAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=createdAt">Created (UTC)</a></th>
|
||||||
|
<th <% if (sort.includes("lastUsedAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=lastUsedAt">Last Used (UTC)</a></th>
|
||||||
|
<th colspan="2">Banned?</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<% users.forEach(function(user){ %>
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<a href="/admin/manage/view-user/<%= user.token %>">
|
||||||
|
<code class="usertoken"><%= user.token %></code>
|
||||||
|
<% if (user.nickname) { %>
|
||||||
|
<span class="nickname" style="display: none"><%= user.nickname %></span>
|
||||||
|
<% } else { %>
|
||||||
|
<code class="nickname" style="display: none"><%= "..." + user.token.slice(-5) %></code>
|
||||||
|
<% } %>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td><%= user.ip.length %></td>
|
||||||
|
<td><%= user.promptCount %></td>
|
||||||
|
<td><%= user.prettyUsage %></td>
|
||||||
|
<td><%= user.type %></td>
|
||||||
|
<td><%= user.createdAt %></td>
|
||||||
|
<td><%= user.lastUsedAt ?? "never" %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<% if (user.disabledAt) { %>
|
||||||
|
<a title="Unban" href="#" class="unban" data-token="<%= user.token %>">🔄️</a>
|
||||||
|
<% } else { %>
|
||||||
|
<a title="Ban" href="#" class="ban" data-token="<%= user.token %>">🚫</a>
|
||||||
|
<% } %>
|
||||||
|
<td><%= user.disabledAt ? "Yes" : "No" %> <%= user.disabledReason ? `(${user.disabledReason})` : "" %></td>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<% }); %>
|
||||||
|
</table>
|
||||||
|
<ul class="pagination">
|
||||||
|
<% if (page > 1) { %>
|
||||||
|
<li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page - 1 %>">«</a></li>
|
||||||
|
<% } %> <% for (var i = 1; i <= pageCount; i++) { %>
|
||||||
|
<li <% if (i === page) { %>class="active"<% } %>><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= i %>"><%= i %></a></li>
|
||||||
|
<% } %> <% if (page < pageCount) { %>
|
||||||
|
<li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page + 1 %>">»</a></li>
|
||||||
|
<% } %>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>Showing <%= page * pageSize - pageSize + 1 %> to <%= users.length + page * pageSize - pageSize %> of <%= totalCount %> users.</p>
|
||||||
|
<%- include("partials/shared_pagination") %>
|
||||||
|
<% } %>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
function toggleNicknames() {
|
||||||
|
const checked = document.getElementById("toggle-nicknames").checked;
|
||||||
|
const visibleSelector = checked ? ".nickname" : ".usertoken";
|
||||||
|
const hiddenSelector = checked ? ".usertoken" : ".nickname";
|
||||||
|
document.querySelectorAll(visibleSelector).forEach(function (el) {
|
||||||
|
el.style.display = "inline";
|
||||||
|
});
|
||||||
|
document.querySelectorAll(hiddenSelector).forEach(function (el) {
|
||||||
|
el.style.display = "none";
|
||||||
|
});
|
||||||
|
localStorage.setItem("showNicknames", checked);
|
||||||
|
}
|
||||||
|
|
||||||
|
const state = localStorage.getItem("showNicknames") === "true";
|
||||||
|
document.getElementById("toggle-nicknames").checked = state;
|
||||||
|
toggleNicknames();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<%- include("partials/admin-ban-xhr-script") %>
|
||||||
|
|
||||||
|
<%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "Login" }) %>
|
||||||
|
<h1>Login</h1>
|
||||||
|
<form action="/admin/login" method="post">
|
||||||
|
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
|
||||||
|
<label for="token">Admin Key</label>
|
||||||
|
<input type="password" name="token" />
|
||||||
|
<input type="submit" value="Login" />
|
||||||
|
</form>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -0,0 +1,147 @@
|
|||||||
|
<%- include("partials/shared_header", { title: "View User - OAI Reverse Proxy Admin" }) %>
|
||||||
|
<h1>View User</h1>
|
||||||
|
|
||||||
|
<table class="striped">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th scope="col">Key</th>
|
||||||
|
<th scope="col" colspan="2">Value</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Token</th>
|
||||||
|
<td colspan="2"><%- user.token %></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Nickname</th>
|
||||||
|
<td><%- user.nickname ?? "none" %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<a title="Edit" id="edit-nickname" href="#" data-field="nickname" data-token="<%= user.token %>">✏️</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Type</th>
|
||||||
|
<td><%- user.type %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<a title="Edit" id="edit-type" href="#" data-field="type" data-token="<%= user.token %>">✏️</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Prompts</th>
|
||||||
|
<td colspan="2"><%- user.promptCount %></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Created At</th>
|
||||||
|
<td colspan="2"><%- user.createdAt %></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Last Used At</th>
|
||||||
|
<td colspan="2"><%- user.lastUsedAt || "never" %></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Disabled At</th>
|
||||||
|
<td><%- user.disabledAt %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<% if (user.disabledAt) { %>
|
||||||
|
<a title="Unban" href="#" class="unban" data-token="<%= user.token %>">🔄️</a>
|
||||||
|
<% } else { %>
|
||||||
|
<a title="Ban" href="#" class="ban" data-token="<%= user.token %>">🚫</a>
|
||||||
|
<% } %>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Disabled Reason</th>
|
||||||
|
<td><%- user.disabledReason %></td>
|
||||||
|
<% if (user.disabledAt) { %>
|
||||||
|
<td class="actions">
|
||||||
|
<a title="Edit" id="edit-disabledReason" href="#" data-field="disabledReason"
|
||||||
|
data-token="<%= user.token %>">✏️</a>
|
||||||
|
</td>
|
||||||
|
<% } %>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">IP Address Limit</th>
|
||||||
|
<td><%- (user.maxIps ?? maxIps) || "Unlimited" %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<a title="Edit" id="edit-maxIps" href="#" data-field="maxIps" data-token="<%= user.token %>">✏️</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">IPs</th>
|
||||||
|
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: false }) %></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Admin Note <span title="Unlike nickname, this is not visible to or editable by the user">🔒</span>
|
||||||
|
</th>
|
||||||
|
<td><%- user.adminNote ?? "none" %></td>
|
||||||
|
<td class="actions">
|
||||||
|
<a title="Edit" id="edit-adminNote" href="#" data-field="adminNote" data-token="<%= user.token %>">✏️</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<% if (user.type === "temporary") { %>
|
||||||
|
<tr>
|
||||||
|
<th scope="row">Expires At</th>
|
||||||
|
<td colspan="2"><%- user.expiresAt %></td>
|
||||||
|
</tr>
|
||||||
|
<% } %>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<form style="display:none" id="current-values">
|
||||||
|
<input type="hidden" name="token" value="<%- user.token %>" />
|
||||||
|
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
|
||||||
|
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
|
||||||
|
<% }); %>
|
||||||
|
</form>
|
||||||
|
|
||||||
|
<h3>Quota Information</h3>
|
||||||
|
<% if (quotasEnabled) { %>
|
||||||
|
<form action="/admin/manage/refresh-user-quota" method="POST">
|
||||||
|
<input type="hidden" name="token" value="<%- user.token %>" />
|
||||||
|
<input type="hidden" name="_csrf" value="<%- csrfToken %>" />
|
||||||
|
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
|
||||||
|
</form>
|
||||||
|
<% } %> <%- include("partials/shared_quota-info", { quota, user }) %>
|
||||||
|
|
||||||
|
<p><a href="/admin/manage/list-users">Back to User List</a></p>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
document.querySelectorAll("td.actions a[data-field]").forEach(function (a) {
|
||||||
|
a.addEventListener("click", function (e) {
|
||||||
|
e.preventDefault();
|
||||||
|
const token = a.dataset.token;
|
||||||
|
const field = a.dataset.field;
|
||||||
|
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
|
||||||
|
let value = prompt(`Enter new value for '${field}'':`, existingValue);
|
||||||
|
if (value !== null) {
|
||||||
|
if (value === "") {
|
||||||
|
value = null;
|
||||||
|
}
|
||||||
|
fetch(`/admin/manage/edit-user/${token}`, {
|
||||||
|
method: "POST",
|
||||||
|
credentials: "same-origin",
|
||||||
|
body: JSON.stringify({
|
||||||
|
[field]: value,
|
||||||
|
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
|
||||||
|
}),
|
||||||
|
headers: { "Content-Type": "application/json", Accept: "application/json" },
|
||||||
|
})
|
||||||
|
.then((res) => Promise.all([res.ok, res.json()]))
|
||||||
|
.then(([ok, json]) => {
|
||||||
|
const url = new URL(window.location.href);
|
||||||
|
const params = new URLSearchParams();
|
||||||
|
if (!ok) {
|
||||||
|
params.set("flash", `error: ${json.error.message}`);
|
||||||
|
} else {
|
||||||
|
params.set("flash", `success: User's ${field} updated.`);
|
||||||
|
}
|
||||||
|
url.search = params.toString();
|
||||||
|
window.location.assign(url);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<%- include("partials/admin-ban-xhr-script") %> <%- include("partials/admin-footer") %>
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
<script>
|
||||||
|
document.querySelectorAll("td.actions a.ban").forEach(function (a) {
|
||||||
|
a.addEventListener("click", function (e) {
|
||||||
|
e.preventDefault();
|
||||||
|
var token = a.getAttribute("data-token");
|
||||||
|
if (confirm("Are you sure you want to ban this user?")) {
|
||||||
|
let reason = prompt("Reason for ban:");
|
||||||
|
fetch("/admin/manage/disable-user/" + token, {
|
||||||
|
method: "POST",
|
||||||
|
credentials: "same-origin",
|
||||||
|
body: JSON.stringify({ reason, _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
}).then(() => window.location.reload());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll("td.actions a.unban").forEach(function (a) {
|
||||||
|
a.addEventListener("click", function (e) {
|
||||||
|
e.preventDefault();
|
||||||
|
var token = a.getAttribute("data-token");
|
||||||
|
if (confirm("Are you sure you want to unban this user?")) {
|
||||||
|
fetch("/admin/manage/reactivate-user/" + token, {
|
||||||
|
method: "POST",
|
||||||
|
credentials: "same-origin",
|
||||||
|
body: JSON.stringify({ _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
}).then(() => window.location.reload());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
</script>
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
<hr />
|
||||||
|
<footer>
|
||||||
|
<a href="/admin">Index</a> | <a href="/admin/logout">Logout</a>
|
||||||
|
</footer>
|
||||||
|
<script>
|
||||||
|
document.querySelectorAll("td,time").forEach(function(td) {
|
||||||
|
if (td.innerText.match(/^\d{13}$/)) {
|
||||||
|
if (td.innerText == 0) return 'never';
|
||||||
|
var date = new Date(parseInt(td.innerText));
|
||||||
|
td.innerText = date.toISOString().replace("T", " ").replace(/\.\d+Z$/, "Z");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -1,15 +1,16 @@
|
|||||||
import dotenv from "dotenv";
|
import dotenv from "dotenv";
|
||||||
import type firebase from "firebase-admin";
|
import type firebase from "firebase-admin";
|
||||||
|
import path from "path";
|
||||||
import pino from "pino";
|
import pino from "pino";
|
||||||
|
import type { ModelFamily } from "./shared/models";
|
||||||
|
import { MODEL_FAMILIES } from "./shared/models";
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
// Can't import the usual logger here because it itself needs the config.
|
|
||||||
const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
|
const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
|
||||||
|
|
||||||
const isDev = process.env.NODE_ENV !== "production";
|
const isDev = process.env.NODE_ENV !== "production";
|
||||||
|
|
||||||
type PromptLoggingBackend = "google_sheets";
|
export const DATA_DIR = path.join(__dirname, "..", "data");
|
||||||
export type DequeueMode = "fair" | "random" | "none";
|
export const USER_ASSETS_DIR = path.join(DATA_DIR, "user-files");
|
||||||
|
|
||||||
type Config = {
|
type Config = {
|
||||||
/** The port the proxy server will listen on. */
|
/** The port the proxy server will listen on. */
|
||||||
@@ -18,107 +19,176 @@ type Config = {
|
|||||||
openaiKey?: string;
|
openaiKey?: string;
|
||||||
/** Comma-delimited list of Anthropic API keys. */
|
/** Comma-delimited list of Anthropic API keys. */
|
||||||
anthropicKey?: string;
|
anthropicKey?: string;
|
||||||
|
/** Comma-delimited list of Google PaLM API keys. */
|
||||||
|
googlePalmKey?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of AWS credentials. Each credential item should be a
|
||||||
|
* colon-delimited list of access key, secret key, and AWS region.
|
||||||
|
*
|
||||||
|
* The credentials must have access to the actions `bedrock:InvokeModel` and
|
||||||
|
* `bedrock:InvokeModelWithResponseStream`. You must also have already
|
||||||
|
* provisioned the necessary models in your AWS account, on the specific
|
||||||
|
* regions specified for each credential. Models are region-specific.
|
||||||
|
*
|
||||||
|
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
|
||||||
|
*/
|
||||||
|
awsCredentials?: string;
|
||||||
|
/**
|
||||||
|
* Comma-delimited list of Azure OpenAI credentials. Each credential item
|
||||||
|
* should be a colon-delimited list of Azure resource name, deployment ID, and
|
||||||
|
* API key.
|
||||||
|
*
|
||||||
|
* The resource name is the subdomain in your Azure OpenAI deployment's URL,
|
||||||
|
* e.g. `https://resource-name.openai.azure.com
|
||||||
|
*
|
||||||
|
* @example `AZURE_CREDENTIALS=resource_name_1:deployment_id_1:api_key_1,resource_name_2:deployment_id_2:api_key_2`
|
||||||
|
*/
|
||||||
|
azureCredentials?: string;
|
||||||
/**
|
/**
|
||||||
* The proxy key to require for requests. Only applicable if the user
|
* The proxy key to require for requests. Only applicable if the user
|
||||||
* management mode is set to 'proxy_key', and required if so.
|
* management mode is set to 'proxy_key', and required if so.
|
||||||
**/
|
*/
|
||||||
proxyKey?: string;
|
proxyKey?: string;
|
||||||
/**
|
/**
|
||||||
* The admin key used to access the /admin API. Required if the user
|
* The admin key used to access the /admin API or UI. Required if the user
|
||||||
* management mode is set to 'user_token'.
|
* management mode is set to 'user_token'.
|
||||||
**/
|
*/
|
||||||
adminKey?: string;
|
adminKey?: string;
|
||||||
/**
|
/**
|
||||||
* Which user management mode to use.
|
* Which user management mode to use.
|
||||||
*
|
* - `none`: No user management. Proxy is open to all requests with basic
|
||||||
* `none`: No user management. Proxy is open to all requests with basic
|
* abuse protection.
|
||||||
* abuse protection.
|
* - `proxy_key`: A specific proxy key must be provided in the Authorization
|
||||||
*
|
* header to use the proxy.
|
||||||
* `proxy_key`: A specific proxy key must be provided in the Authorization
|
* - `user_token`: Users must be created via by admins and provide their
|
||||||
* header to use the proxy.
|
* personal access token in the Authorization header to use the proxy.
|
||||||
*
|
* Configure this function and add users via the admin API or UI.
|
||||||
* `user_token`: Users must be created via the /admin REST API and provide
|
|
||||||
* their personal access token in the Authorization header to use the proxy.
|
|
||||||
* Configure this function and add users via the /admin API.
|
|
||||||
*/
|
*/
|
||||||
gatekeeper: "none" | "proxy_key" | "user_token";
|
gatekeeper: "none" | "proxy_key" | "user_token";
|
||||||
/**
|
/**
|
||||||
* Persistence layer to use for user management.
|
* Persistence layer to use for user management.
|
||||||
*
|
* - `memory`: Users are stored in memory and are lost on restart (default)
|
||||||
* `memory`: Users are stored in memory and are lost on restart (default)
|
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
|
||||||
*
|
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
|
||||||
* `firebase_rtdb`: Users are stored in a Firebase Realtime Database; requires
|
*/
|
||||||
* `firebaseKey` and `firebaseRtdbUrl` to be set.
|
|
||||||
**/
|
|
||||||
gatekeeperStore: "memory" | "firebase_rtdb";
|
gatekeeperStore: "memory" | "firebase_rtdb";
|
||||||
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
|
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
|
||||||
firebaseRtdbUrl?: string;
|
firebaseRtdbUrl?: string;
|
||||||
/** Base64-encoded Firebase service account key if using the Firebase RTDB store. */
|
/**
|
||||||
|
* Base64-encoded Firebase service account key if using the Firebase RTDB
|
||||||
|
* store. Note that you should encode the *entire* JSON key file, not just the
|
||||||
|
* `private_key` field inside it.
|
||||||
|
*/
|
||||||
firebaseKey?: string;
|
firebaseKey?: string;
|
||||||
/**
|
/**
|
||||||
* Maximum number of IPs per user, after which their token is disabled.
|
* Maximum number of IPs allowed per user token.
|
||||||
* Users with the manually-assigned `special` role are exempt from this limit.
|
* Users with the manually-assigned `special` role are exempt from this limit.
|
||||||
* By default, this is 0, meaning that users are not IP-limited.
|
* - Defaults to 0, which means that users are not IP-limited.
|
||||||
*/
|
*/
|
||||||
maxIpsPerUser: number;
|
maxIpsPerUser: number;
|
||||||
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */
|
/**
|
||||||
modelRateLimit: number;
|
* Whether a user token should be automatically disabled if it exceeds the
|
||||||
|
* `maxIpsPerUser` limit, or if only connections from new IPs are be rejected.
|
||||||
|
*/
|
||||||
|
maxIpsAutoBan: boolean;
|
||||||
|
/** Per-IP limit for requests per minute to text and chat models. */
|
||||||
|
textModelRateLimit: number;
|
||||||
|
/** Per-IP limit for requests per minute to image generation models. */
|
||||||
|
imageModelRateLimit: number;
|
||||||
|
/**
|
||||||
|
* For OpenAI, the maximum number of context tokens (prompt + max output) a
|
||||||
|
* user can request before their request is rejected.
|
||||||
|
* Context limits can help prevent excessive spend.
|
||||||
|
* - Defaults to 0, which means no limit beyond OpenAI's stated maximums.
|
||||||
|
*/
|
||||||
|
maxContextTokensOpenAI: number;
|
||||||
|
/**
|
||||||
|
* For Anthropic, the maximum number of context tokens a user can request.
|
||||||
|
* Claude context limits can prevent requests from tying up concurrency slots
|
||||||
|
* for too long, which can lengthen queue times for other users.
|
||||||
|
* - Defaults to 0, which means no limit beyond Anthropic's stated maximums.
|
||||||
|
*/
|
||||||
|
maxContextTokensAnthropic: number;
|
||||||
/** For OpenAI, the maximum number of sampled tokens a user can request. */
|
/** For OpenAI, the maximum number of sampled tokens a user can request. */
|
||||||
maxOutputTokensOpenAI: number;
|
maxOutputTokensOpenAI: number;
|
||||||
/** For Anthropic, the maximum number of sampled tokens a user can request. */
|
/** For Anthropic, the maximum number of sampled tokens a user can request. */
|
||||||
maxOutputTokensAnthropic: number;
|
maxOutputTokensAnthropic: number;
|
||||||
/** Whether requests containing disallowed characters should be rejected. */
|
/** Whether requests containing the following phrases should be rejected. */
|
||||||
rejectDisallowed?: boolean;
|
rejectPhrases: string[];
|
||||||
/** Message to return when rejecting requests. */
|
/** Message to return when rejecting requests. */
|
||||||
rejectMessage?: string;
|
rejectMessage: string;
|
||||||
/** Pino log level. */
|
/** Verbosity level of diagnostic logging. */
|
||||||
logLevel?: "debug" | "info" | "warn" | "error";
|
logLevel: "trace" | "debug" | "info" | "warn" | "error";
|
||||||
|
/**
|
||||||
|
* Whether to allow the usage of AWS credentials which could be logging users'
|
||||||
|
* model invocations. By default, such keys are treated as if they were
|
||||||
|
* disabled because users may not be aware that their usage is being logged.
|
||||||
|
*
|
||||||
|
* Some credentials do not have the policy attached that allows the proxy to
|
||||||
|
* confirm logging status, in which case the proxy assumes that logging could
|
||||||
|
* be enabled and will refuse to use the key. If you still want to use such a
|
||||||
|
* key and can't attach the policy, you can set this to true.
|
||||||
|
*/
|
||||||
|
allowAwsLogging?: boolean;
|
||||||
/** Whether prompts and responses should be logged to persistent storage. */
|
/** Whether prompts and responses should be logged to persistent storage. */
|
||||||
promptLogging?: boolean;
|
promptLogging?: boolean;
|
||||||
/** Which prompt logging backend to use. */
|
/** Which prompt logging backend to use. */
|
||||||
promptLoggingBackend?: PromptLoggingBackend;
|
promptLoggingBackend?: "google_sheets";
|
||||||
/** Base64-encoded Google Sheets API key. */
|
/** Base64-encoded Google Sheets API key. */
|
||||||
googleSheetsKey?: string;
|
googleSheetsKey?: string;
|
||||||
/** Google Sheets spreadsheet ID. */
|
/** Google Sheets spreadsheet ID. */
|
||||||
googleSheetsSpreadsheetId?: string;
|
googleSheetsSpreadsheetId?: string;
|
||||||
/** Whether to periodically check keys for usage and validity. */
|
/** Whether to periodically check keys for usage and validity. */
|
||||||
checkKeys?: boolean;
|
checkKeys: boolean;
|
||||||
/**
|
/** Whether to publicly show total token costs on the info page. */
|
||||||
* How to display quota information on the info page.
|
showTokenCosts: boolean;
|
||||||
*
|
|
||||||
* `none`: Hide quota information
|
|
||||||
*
|
|
||||||
* `partial`: Display quota information only as a percentage
|
|
||||||
*
|
|
||||||
* `full`: Display quota information as usage against total capacity
|
|
||||||
*/
|
|
||||||
quotaDisplayMode: "none" | "partial" | "full";
|
|
||||||
/**
|
|
||||||
* Which request queueing strategy to use when keys are over their rate limit.
|
|
||||||
*
|
|
||||||
* `fair`: Requests are serviced in the order they were received (default)
|
|
||||||
*
|
|
||||||
* `random`: Requests are serviced randomly
|
|
||||||
*
|
|
||||||
* `none`: Requests are not queued and users have to retry manually
|
|
||||||
*/
|
|
||||||
queueMode: DequeueMode;
|
|
||||||
/**
|
/**
|
||||||
* Comma-separated list of origins to block. Requests matching any of these
|
* Comma-separated list of origins to block. Requests matching any of these
|
||||||
* origins or referers will be rejected.
|
* origins or referers will be rejected.
|
||||||
* Partial matches are allowed, so `reddit` will match `www.reddit.com`.
|
* - Partial matches are allowed, so `reddit` will match `www.reddit.com`.
|
||||||
* Include only the hostname, not the protocol or path, e.g:
|
* - Include only the hostname, not the protocol or path, e.g:
|
||||||
* `reddit.com,9gag.com,gaiaonline.com`
|
* `reddit.com,9gag.com,gaiaonline.com`
|
||||||
*/
|
*/
|
||||||
blockedOrigins?: string;
|
blockedOrigins?: string;
|
||||||
/**
|
/** Message to return when rejecting requests from blocked origins. */
|
||||||
* Message to return when rejecting requests from blocked origins.
|
|
||||||
*/
|
|
||||||
blockMessage?: string;
|
blockMessage?: string;
|
||||||
/**
|
/** Destination URL to redirect blocked requests to, for non-JSON requests. */
|
||||||
* Desination URL to redirect blocked requests to, for non-JSON requests.
|
|
||||||
*/
|
|
||||||
blockRedirect?: string;
|
blockRedirect?: string;
|
||||||
|
/** Which model families to allow requests for. Applies only to OpenAI. */
|
||||||
|
allowedModelFamilies: ModelFamily[];
|
||||||
|
/**
|
||||||
|
* The number of (LLM) tokens a user can consume before requests are rejected.
|
||||||
|
* Limits include both prompt and response tokens. `special` users are exempt.
|
||||||
|
* - Defaults to 0, which means no limit.
|
||||||
|
* - Changes are not automatically applied to existing users. Use the
|
||||||
|
* admin API or UI to update existing users, or use the QUOTA_REFRESH_PERIOD
|
||||||
|
* setting to periodically set all users' quotas to these values.
|
||||||
|
*/
|
||||||
|
tokenQuota: { [key in ModelFamily]: number };
|
||||||
|
/**
|
||||||
|
* The period over which to enforce token quotas. Quotas will be fully reset
|
||||||
|
* at the start of each period, server time. Unused quota does not roll over.
|
||||||
|
* You can also provide a cron expression for a custom schedule. If not set,
|
||||||
|
* quotas will never automatically refresh.
|
||||||
|
* - Defaults to unset, which means quotas will never automatically refresh.
|
||||||
|
*/
|
||||||
|
quotaRefreshPeriod?: "hourly" | "daily" | string;
|
||||||
|
/** Whether to allow users to change their own nicknames via the UI. */
|
||||||
|
allowNicknameChanges: boolean;
|
||||||
|
/** Whether to show recent DALL-E image generations on the homepage. */
|
||||||
|
showRecentImages: boolean;
|
||||||
|
/**
|
||||||
|
* If true, cookies will be set without the `Secure` attribute, allowing
|
||||||
|
* the admin UI to used over HTTP.
|
||||||
|
*/
|
||||||
|
useInsecureCookies: boolean;
|
||||||
|
/**
|
||||||
|
* Whether to use a more minimal public Service Info page with static content.
|
||||||
|
* Disables all stats pertaining to traffic, prompt/token usage, and queues.
|
||||||
|
* The full info page will appear if you have signed in as an admin using the
|
||||||
|
* configured ADMIN_KEY and go to /admin/service-info.
|
||||||
|
**/
|
||||||
|
staticServiceInfo?: boolean;
|
||||||
};
|
};
|
||||||
|
|
||||||
// To change configs, create a file called .env in the root directory.
|
// To change configs, create a file called .env in the root directory.
|
||||||
@@ -127,27 +197,54 @@ export const config: Config = {
|
|||||||
port: getEnvWithDefault("PORT", 7860),
|
port: getEnvWithDefault("PORT", 7860),
|
||||||
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
||||||
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
|
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
|
||||||
|
googlePalmKey: getEnvWithDefault("GOOGLE_PALM_KEY", ""),
|
||||||
|
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
|
||||||
|
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
|
||||||
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
||||||
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
|
||||||
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
|
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
|
||||||
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
|
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
|
||||||
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
|
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
|
||||||
|
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", true),
|
||||||
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
|
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
|
||||||
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
|
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
|
||||||
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4),
|
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
|
||||||
maxOutputTokensOpenAI: getEnvWithDefault("MAX_OUTPUT_TOKENS_OPENAI", 300),
|
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
|
||||||
maxOutputTokensAnthropic: getEnvWithDefault(
|
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
|
||||||
"MAX_OUTPUT_TOKENS_ANTHROPIC",
|
maxContextTokensAnthropic: getEnvWithDefault(
|
||||||
600
|
"MAX_CONTEXT_TOKENS_ANTHROPIC",
|
||||||
|
0
|
||||||
),
|
),
|
||||||
rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false),
|
maxOutputTokensOpenAI: getEnvWithDefault(
|
||||||
|
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
|
||||||
|
400
|
||||||
|
),
|
||||||
|
maxOutputTokensAnthropic: getEnvWithDefault(
|
||||||
|
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
|
||||||
|
400
|
||||||
|
),
|
||||||
|
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
|
||||||
|
"turbo",
|
||||||
|
"gpt4",
|
||||||
|
"gpt4-32k",
|
||||||
|
"gpt4-turbo",
|
||||||
|
"claude",
|
||||||
|
"bison",
|
||||||
|
"aws-claude",
|
||||||
|
"azure-turbo",
|
||||||
|
"azure-gpt4",
|
||||||
|
"azure-gpt4-turbo",
|
||||||
|
"azure-gpt4-32k",
|
||||||
|
]),
|
||||||
|
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
|
||||||
rejectMessage: getEnvWithDefault(
|
rejectMessage: getEnvWithDefault(
|
||||||
"REJECT_MESSAGE",
|
"REJECT_MESSAGE",
|
||||||
"This content violates /aicg/'s acceptable use policy."
|
"This content violates /aicg/'s acceptable use policy."
|
||||||
),
|
),
|
||||||
logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
|
logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
|
||||||
checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev),
|
checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev),
|
||||||
quotaDisplayMode: getEnvWithDefault("QUOTA_DISPLAY_MODE", "partial"),
|
showTokenCosts: getEnvWithDefault("SHOW_TOKEN_COSTS", false),
|
||||||
|
allowAwsLogging: getEnvWithDefault("ALLOW_AWS_LOGGING", false),
|
||||||
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
|
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
|
||||||
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
|
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
|
||||||
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
|
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
|
||||||
@@ -155,73 +252,79 @@ export const config: Config = {
|
|||||||
"GOOGLE_SHEETS_SPREADSHEET_ID",
|
"GOOGLE_SHEETS_SPREADSHEET_ID",
|
||||||
undefined
|
undefined
|
||||||
),
|
),
|
||||||
queueMode: getEnvWithDefault("QUEUE_MODE", "fair"),
|
|
||||||
blockedOrigins: getEnvWithDefault("BLOCKED_ORIGINS", undefined),
|
blockedOrigins: getEnvWithDefault("BLOCKED_ORIGINS", undefined),
|
||||||
blockMessage: getEnvWithDefault(
|
blockMessage: getEnvWithDefault(
|
||||||
"BLOCK_MESSAGE",
|
"BLOCK_MESSAGE",
|
||||||
"You must be over the age of majority in your country to use this service."
|
"You must be over the age of majority in your country to use this service."
|
||||||
),
|
),
|
||||||
blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"),
|
blockRedirect: getEnvWithDefault("BLOCK_REDIRECT", "https://www.9gag.com"),
|
||||||
|
tokenQuota: MODEL_FAMILIES.reduce(
|
||||||
|
(acc, family: ModelFamily) => {
|
||||||
|
acc[family] = getEnvWithDefault(
|
||||||
|
`TOKEN_QUOTA_${family.toUpperCase().replace(/-/g, "_")}`,
|
||||||
|
0
|
||||||
|
) as number;
|
||||||
|
return acc;
|
||||||
|
},
|
||||||
|
{} as { [key in ModelFamily]: number }
|
||||||
|
),
|
||||||
|
quotaRefreshPeriod: getEnvWithDefault("QUOTA_REFRESH_PERIOD", undefined),
|
||||||
|
allowNicknameChanges: getEnvWithDefault("ALLOW_NICKNAME_CHANGES", true),
|
||||||
|
showRecentImages: getEnvWithDefault("SHOW_RECENT_IMAGES", true),
|
||||||
|
useInsecureCookies: getEnvWithDefault("USE_INSECURE_COOKIES", isDev),
|
||||||
|
staticServiceInfo: getEnvWithDefault("STATIC_SERVICE_INFO", false),
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
function migrateConfigs() {
|
function generateCookieSecret() {
|
||||||
let migrated = false;
|
if (process.env.COOKIE_SECRET !== undefined) {
|
||||||
const deprecatedMax = process.env.MAX_OUTPUT_TOKENS;
|
return process.env.COOKIE_SECRET;
|
||||||
|
|
||||||
if (!process.env.MAX_OUTPUT_TOKENS_OPENAI && deprecatedMax) {
|
|
||||||
migrated = true;
|
|
||||||
config.maxOutputTokensOpenAI = parseInt(deprecatedMax);
|
|
||||||
}
|
|
||||||
if (!process.env.MAX_OUTPUT_TOKENS_ANTHROPIC && deprecatedMax) {
|
|
||||||
migrated = true;
|
|
||||||
config.maxOutputTokensAnthropic = parseInt(deprecatedMax);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (migrated) {
|
const seed = "" + config.adminKey + config.openaiKey + config.anthropicKey;
|
||||||
startupLogger.warn(
|
const crypto = require("crypto");
|
||||||
{
|
return crypto.createHash("sha256").update(seed).digest("hex");
|
||||||
MAX_OUTPUT_TOKENS: deprecatedMax,
|
|
||||||
MAX_OUTPUT_TOKENS_OPENAI: config.maxOutputTokensOpenAI,
|
|
||||||
MAX_OUTPUT_TOKENS_ANTHROPIC: config.maxOutputTokensAnthropic,
|
|
||||||
},
|
|
||||||
"`MAX_OUTPUT_TOKENS` has been replaced with separate `MAX_OUTPUT_TOKENS_OPENAI` and `MAX_OUTPUT_TOKENS_ANTHROPIC` configs. You should update your .env file to remove `MAX_OUTPUT_TOKENS` and set the new configs."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Prevents the server from starting if config state is invalid. */
|
export const COOKIE_SECRET = generateCookieSecret();
|
||||||
export async function assertConfigIsValid() {
|
|
||||||
migrateConfigs();
|
export async function assertConfigIsValid() {
|
||||||
|
if (process.env.MODEL_RATE_LIMIT !== undefined) {
|
||||||
|
const limit =
|
||||||
|
parseInt(process.env.MODEL_RATE_LIMIT, 10) || config.textModelRateLimit;
|
||||||
|
|
||||||
|
config.textModelRateLimit = limit;
|
||||||
|
config.imageModelRateLimit = Math.max(Math.floor(limit / 2), 1);
|
||||||
|
|
||||||
|
startupLogger.warn(
|
||||||
|
{ textLimit: limit, imageLimit: config.imageModelRateLimit },
|
||||||
|
"MODEL_RATE_LIMIT is deprecated. Use TEXT_MODEL_RATE_LIMIT and IMAGE_MODEL_RATE_LIMIT instead."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure gatekeeper mode is valid.
|
|
||||||
if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) {
|
if (!["none", "proxy_key", "user_token"].includes(config.gatekeeper)) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.`
|
`Invalid gatekeeper mode: ${config.gatekeeper}. Must be one of: none, proxy_key, user_token.`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't allow `user_token` mode without `ADMIN_KEY`.
|
|
||||||
if (config.gatekeeper === "user_token" && !config.adminKey) {
|
if (config.gatekeeper === "user_token" && !config.adminKey) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"`user_token` gatekeeper mode requires an `ADMIN_KEY` to be set."
|
"`user_token` gatekeeper mode requires an `ADMIN_KEY` to be set."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't allow `proxy_key` mode without `PROXY_KEY`.
|
|
||||||
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
|
if (config.gatekeeper === "proxy_key" && !config.proxyKey) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
|
"`proxy_key` gatekeeper mode requires a `PROXY_KEY` to be set."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't allow `PROXY_KEY` to be set for other modes.
|
|
||||||
if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
|
if (config.gatekeeper !== "proxy_key" && config.proxyKey) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
|
"`PROXY_KEY` is set, but gatekeeper mode is not `proxy_key`. Make sure to set `GATEKEEPER=proxy_key`."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Require appropriate firebase config if using firebase store.
|
|
||||||
if (
|
if (
|
||||||
config.gatekeeperStore === "firebase_rtdb" &&
|
config.gatekeeperStore === "firebase_rtdb" &&
|
||||||
(!config.firebaseKey || !config.firebaseRtdbUrl)
|
(!config.firebaseKey || !config.firebaseRtdbUrl)
|
||||||
@@ -235,7 +338,8 @@ export async function assertConfigIsValid() {
|
|||||||
// them to users.
|
// them to users.
|
||||||
for (const key of getKeys(config)) {
|
for (const key of getKeys(config)) {
|
||||||
const maybeSensitive = ["key", "credentials", "secret", "password"].some(
|
const maybeSensitive = ["key", "credentials", "secret", "password"].some(
|
||||||
(sensitive) => key.toLowerCase().includes(sensitive)
|
(sensitive) =>
|
||||||
|
key.toLowerCase().includes(sensitive) && !["checkKeys"].includes(key)
|
||||||
);
|
);
|
||||||
const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]);
|
const secured = new Set([...SENSITIVE_KEYS, ...OMITTED_KEYS]);
|
||||||
if (maybeSensitive && !secured.has(key))
|
if (maybeSensitive && !secured.has(key))
|
||||||
@@ -262,10 +366,13 @@ export const OMITTED_KEYS: (keyof Config)[] = [
|
|||||||
"logLevel",
|
"logLevel",
|
||||||
"openaiKey",
|
"openaiKey",
|
||||||
"anthropicKey",
|
"anthropicKey",
|
||||||
|
"googlePalmKey",
|
||||||
|
"awsCredentials",
|
||||||
|
"azureCredentials",
|
||||||
"proxyKey",
|
"proxyKey",
|
||||||
"adminKey",
|
"adminKey",
|
||||||
"checkKeys",
|
"rejectPhrases",
|
||||||
"quotaDisplayMode",
|
"showTokenCosts",
|
||||||
"googleSheetsKey",
|
"googleSheetsKey",
|
||||||
"firebaseKey",
|
"firebaseKey",
|
||||||
"firebaseRtdbUrl",
|
"firebaseRtdbUrl",
|
||||||
@@ -274,14 +381,20 @@ export const OMITTED_KEYS: (keyof Config)[] = [
|
|||||||
"blockedOrigins",
|
"blockedOrigins",
|
||||||
"blockMessage",
|
"blockMessage",
|
||||||
"blockRedirect",
|
"blockRedirect",
|
||||||
|
"allowNicknameChanges",
|
||||||
|
"showRecentImages",
|
||||||
|
"useInsecureCookies",
|
||||||
|
"staticServiceInfo",
|
||||||
|
"checkKeys",
|
||||||
|
"allowedModelFamilies",
|
||||||
];
|
];
|
||||||
|
|
||||||
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
|
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
|
||||||
|
|
||||||
export function listConfig(): Record<string, string> {
|
export function listConfig(obj: Config = config): Record<string, any> {
|
||||||
const result: Record<string, string> = {};
|
const result: Record<string, any> = {};
|
||||||
for (const key of getKeys(config)) {
|
for (const key of getKeys(obj)) {
|
||||||
const value = config[key]?.toString() || "";
|
const value = obj[key]?.toString() || "";
|
||||||
|
|
||||||
const shouldOmit =
|
const shouldOmit =
|
||||||
OMITTED_KEYS.includes(key) || value === "" || value === "undefined";
|
OMITTED_KEYS.includes(key) || value === "" || value === "undefined";
|
||||||
@@ -296,19 +409,43 @@ export function listConfig(): Record<string, string> {
|
|||||||
} else {
|
} else {
|
||||||
result[key] = value;
|
result[key] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (typeof obj[key] === "object" && !Array.isArray(obj[key])) {
|
||||||
|
result[key] = listConfig(obj[key] as unknown as Config);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getEnvWithDefault<T>(name: string, defaultValue: T): T {
|
/**
|
||||||
const value = process.env[name];
|
* Tries to get a config value from one or more environment variables (in
|
||||||
|
* order), falling back to a default value if none are set.
|
||||||
|
*/
|
||||||
|
function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
|
||||||
|
const value = Array.isArray(env)
|
||||||
|
? env.map((e) => process.env[e]).find((v) => v !== undefined)
|
||||||
|
: process.env[env];
|
||||||
if (value === undefined) {
|
if (value === undefined) {
|
||||||
return defaultValue;
|
return defaultValue;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
if (name === "OPENAI_KEY" || name === "ANTHROPIC_KEY") {
|
if (
|
||||||
|
[
|
||||||
|
"OPENAI_KEY",
|
||||||
|
"ANTHROPIC_KEY",
|
||||||
|
"GOOGLE_PALM_KEY",
|
||||||
|
"AWS_CREDENTIALS",
|
||||||
|
"AZURE_CREDENTIALS",
|
||||||
|
].includes(String(env))
|
||||||
|
) {
|
||||||
return value as unknown as T;
|
return value as unknown as T;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Intended to be used for comma-delimited lists
|
||||||
|
if (Array.isArray(defaultValue)) {
|
||||||
|
return value.split(",").map((v) => v.trim()) as T;
|
||||||
|
}
|
||||||
|
|
||||||
return JSON.parse(value) as T;
|
return JSON.parse(value) as T;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
return value as unknown as T;
|
return value as unknown as T;
|
||||||
@@ -340,3 +477,11 @@ export function getFirebaseApp(): firebase.app.App {
|
|||||||
}
|
}
|
||||||
return firebaseApp;
|
return firebaseApp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function parseCsv(val: string): string[] {
|
||||||
|
if (!val) return [];
|
||||||
|
|
||||||
|
const regex = /(".*?"|[^",]+)(?=\s*,|\s*$)/g;
|
||||||
|
const matches = val.match(regex) || [];
|
||||||
|
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,52 +1,159 @@
|
|||||||
|
/** This whole module really sucks */
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
import { Request, Response } from "express";
|
import { Request, Response } from "express";
|
||||||
import showdown from "showdown";
|
import showdown from "showdown";
|
||||||
import { config, listConfig } from "./config";
|
import { config, listConfig } from "./config";
|
||||||
import { keyPool } from "./key-management";
|
|
||||||
import { getUniqueIps } from "./proxy/rate-limit";
|
|
||||||
import {
|
import {
|
||||||
QueuePartition,
|
AnthropicKey,
|
||||||
getEstimatedWaitTime,
|
AwsBedrockKey,
|
||||||
getQueueLength,
|
AzureOpenAIKey,
|
||||||
} from "./proxy/queue";
|
GooglePalmKey,
|
||||||
|
keyPool,
|
||||||
|
OpenAIKey,
|
||||||
|
} from "./shared/key-management";
|
||||||
|
import {
|
||||||
|
AzureOpenAIModelFamily,
|
||||||
|
ModelFamily,
|
||||||
|
OpenAIModelFamily,
|
||||||
|
} from "./shared/models";
|
||||||
|
import { getUniqueIps } from "./proxy/rate-limit";
|
||||||
|
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
|
||||||
|
import { getTokenCostUsd, prettyTokens } from "./shared/stats";
|
||||||
|
import { assertNever } from "./shared/utils";
|
||||||
|
import { getLastNImages } from "./shared/file-storage/image-history";
|
||||||
|
|
||||||
const INFO_PAGE_TTL = 5000;
|
const INFO_PAGE_TTL = 2000;
|
||||||
let infoPageHtml: string | undefined;
|
let infoPageHtml: string | undefined;
|
||||||
let infoPageLastUpdated = 0;
|
let infoPageLastUpdated = 0;
|
||||||
|
|
||||||
|
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
|
||||||
|
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
|
||||||
|
k.service === "openai";
|
||||||
|
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
|
||||||
|
k.service === "azure";
|
||||||
|
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
|
||||||
|
k.service === "anthropic";
|
||||||
|
const keyIsGooglePalmKey = (k: KeyPoolKey): k is GooglePalmKey =>
|
||||||
|
k.service === "google-palm";
|
||||||
|
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
|
||||||
|
|
||||||
|
type ModelAggregates = {
|
||||||
|
active: number;
|
||||||
|
trial?: number;
|
||||||
|
revoked?: number;
|
||||||
|
overQuota?: number;
|
||||||
|
pozzed?: number;
|
||||||
|
awsLogged?: number;
|
||||||
|
queued: number;
|
||||||
|
queueTime: string;
|
||||||
|
tokens: number;
|
||||||
|
};
|
||||||
|
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
|
||||||
|
type ServiceAggregates = {
|
||||||
|
status?: string;
|
||||||
|
openaiKeys?: number;
|
||||||
|
openaiOrgs?: number;
|
||||||
|
anthropicKeys?: number;
|
||||||
|
palmKeys?: number;
|
||||||
|
awsKeys?: number;
|
||||||
|
azureKeys?: number;
|
||||||
|
proompts: number;
|
||||||
|
tokens: number;
|
||||||
|
tokenCost: number;
|
||||||
|
openAiUncheckedKeys?: number;
|
||||||
|
anthropicUncheckedKeys?: number;
|
||||||
|
} & {
|
||||||
|
[modelFamily in ModelFamily]?: ModelAggregates;
|
||||||
|
};
|
||||||
|
|
||||||
|
const modelStats = new Map<ModelAggregateKey, number>();
|
||||||
|
const serviceStats = new Map<keyof ServiceAggregates, number>();
|
||||||
|
|
||||||
export const handleInfoPage = (req: Request, res: Response) => {
|
export const handleInfoPage = (req: Request, res: Response) => {
|
||||||
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
|
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
|
||||||
res.send(infoPageHtml);
|
return res.send(infoPageHtml);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sometimes huggingface doesn't send the host header and makes us guess.
|
|
||||||
const baseUrl =
|
const baseUrl =
|
||||||
process.env.SPACE_ID && !req.get("host")?.includes("hf.space")
|
process.env.SPACE_ID && !req.get("host")?.includes("hf.space")
|
||||||
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
|
? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID)
|
||||||
: req.protocol + "://" + req.get("host");
|
: req.protocol + "://" + req.get("host");
|
||||||
|
|
||||||
res.send(cacheInfoPageHtml(baseUrl));
|
infoPageHtml = buildInfoPageHtml(baseUrl + "/proxy");
|
||||||
|
infoPageLastUpdated = Date.now();
|
||||||
|
|
||||||
|
res.send(infoPageHtml);
|
||||||
};
|
};
|
||||||
|
|
||||||
function cacheInfoPageHtml(baseUrl: string) {
|
function getCostString(cost: number) {
|
||||||
|
if (!config.showTokenCosts) return "";
|
||||||
|
return ` ($${cost.toFixed(2)})`;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function buildInfoPageHtml(baseUrl: string, asAdmin = false) {
|
||||||
const keys = keyPool.list();
|
const keys = keyPool.list();
|
||||||
|
const hideFullInfo = config.staticServiceInfo && !asAdmin;
|
||||||
|
|
||||||
const openaiKeys = keys.filter((k) => k.service === "openai").length;
|
modelStats.clear();
|
||||||
const anthropicKeys = keys.filter((k) => k.service === "anthropic").length;
|
serviceStats.clear();
|
||||||
|
keys.forEach(addKeyToAggregates);
|
||||||
|
|
||||||
const info = {
|
const openaiKeys = serviceStats.get("openaiKeys") || 0;
|
||||||
uptime: process.uptime(),
|
const anthropicKeys = serviceStats.get("anthropicKeys") || 0;
|
||||||
endpoints: {
|
const palmKeys = serviceStats.get("palmKeys") || 0;
|
||||||
...(openaiKeys ? { openai: baseUrl + "/proxy/openai" } : {}),
|
const awsKeys = serviceStats.get("awsKeys") || 0;
|
||||||
...(anthropicKeys ? { anthropic: baseUrl + "/proxy/anthropic" } : {}),
|
const azureKeys = serviceStats.get("azureKeys") || 0;
|
||||||
},
|
const proompts = serviceStats.get("proompts") || 0;
|
||||||
proompts: keys.reduce((acc, k) => acc + k.promptCount, 0),
|
const tokens = serviceStats.get("tokens") || 0;
|
||||||
...(config.modelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
|
const tokenCost = serviceStats.get("tokenCost") || 0;
|
||||||
openaiKeys,
|
|
||||||
anthropicKeys,
|
const allowDalle = config.allowedModelFamilies.includes("dall-e");
|
||||||
|
|
||||||
|
const endpoints = {
|
||||||
|
...(openaiKeys ? { openai: baseUrl + "/openai" } : {}),
|
||||||
|
...(openaiKeys ? { openai2: baseUrl + "/openai/turbo-instruct" } : {}),
|
||||||
|
...(openaiKeys && allowDalle
|
||||||
|
? { ["openai-image"]: baseUrl + "/openai-image" }
|
||||||
|
: {}),
|
||||||
|
...(anthropicKeys ? { anthropic: baseUrl + "/anthropic" } : {}),
|
||||||
|
...(palmKeys ? { "google-palm": baseUrl + "/google-palm" } : {}),
|
||||||
|
...(awsKeys ? { aws: baseUrl + "/aws/claude" } : {}),
|
||||||
|
...(azureKeys ? { azure: baseUrl + "/azure/openai" } : {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
const stats = {
|
||||||
|
proompts,
|
||||||
|
tookens: `${prettyTokens(tokens)}${getCostString(tokenCost)}`,
|
||||||
|
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
const keyInfo = { openaiKeys, anthropicKeys, palmKeys, awsKeys, azureKeys };
|
||||||
|
for (const key of Object.keys(keyInfo)) {
|
||||||
|
if (!(keyInfo as any)[key]) delete (keyInfo as any)[key];
|
||||||
|
}
|
||||||
|
|
||||||
|
const providerInfo = {
|
||||||
...(openaiKeys ? getOpenAIInfo() : {}),
|
...(openaiKeys ? getOpenAIInfo() : {}),
|
||||||
...(anthropicKeys ? getAnthropicInfo() : {}),
|
...(anthropicKeys ? getAnthropicInfo() : {}),
|
||||||
|
...(palmKeys ? getPalmInfo() : {}),
|
||||||
|
...(awsKeys ? getAwsInfo() : {}),
|
||||||
|
...(azureKeys ? getAzureInfo() : {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
if (hideFullInfo) {
|
||||||
|
for (const provider of Object.keys(providerInfo)) {
|
||||||
|
delete (providerInfo as any)[provider].proomptersInQueue;
|
||||||
|
delete (providerInfo as any)[provider].estimatedQueueTime;
|
||||||
|
delete (providerInfo as any)[provider].usage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const info = {
|
||||||
|
uptime: Math.floor(process.uptime()),
|
||||||
|
endpoints,
|
||||||
|
...(hideFullInfo ? {} : stats),
|
||||||
|
...keyInfo,
|
||||||
|
...providerInfo,
|
||||||
config: listConfig(),
|
config: listConfig(),
|
||||||
build: process.env.BUILD_INFO || "dev",
|
build: process.env.BUILD_INFO || "dev",
|
||||||
};
|
};
|
||||||
@@ -54,7 +161,7 @@ function cacheInfoPageHtml(baseUrl: string) {
|
|||||||
const title = getServerTitle();
|
const title = getServerTitle();
|
||||||
const headerHtml = buildInfoPageHeader(new showdown.Converter(), title);
|
const headerHtml = buildInfoPageHeader(new showdown.Converter(), title);
|
||||||
|
|
||||||
const pageBody = `<!DOCTYPE html>
|
return `<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8" />
|
<meta charset="utf-8" />
|
||||||
@@ -66,161 +173,400 @@ function cacheInfoPageHtml(baseUrl: string) {
|
|||||||
<hr />
|
<hr />
|
||||||
<h2>Service Info</h2>
|
<h2>Service Info</h2>
|
||||||
<pre>${JSON.stringify(info, null, 2)}</pre>
|
<pre>${JSON.stringify(info, null, 2)}</pre>
|
||||||
|
${getSelfServiceLinks()}
|
||||||
</body>
|
</body>
|
||||||
</html>`;
|
</html>`;
|
||||||
|
|
||||||
infoPageHtml = pageBody;
|
|
||||||
infoPageLastUpdated = Date.now();
|
|
||||||
|
|
||||||
return pageBody;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceInfo = {
|
function getUniqueOpenAIOrgs(keys: KeyPoolKey[]) {
|
||||||
activeKeys: number;
|
const orgIds = new Set(
|
||||||
trialKeys?: number;
|
keys.filter((k) => k.service === "openai").map((k: any) => k.organizationId)
|
||||||
quota: string;
|
);
|
||||||
proomptersInQueue: number;
|
return orgIds.size;
|
||||||
estimatedQueueTime: string;
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// this has long since outgrown this awful "dump everything in a <pre> tag" approach
|
function increment<T extends keyof ServiceAggregates | ModelAggregateKey>(
|
||||||
// but I really don't want to spend time on a proper UI for this right now
|
map: Map<T, number>,
|
||||||
|
key: T,
|
||||||
|
delta = 1
|
||||||
|
) {
|
||||||
|
map.set(key, (map.get(key) || 0) + delta);
|
||||||
|
}
|
||||||
|
|
||||||
|
function addKeyToAggregates(k: KeyPoolKey) {
|
||||||
|
increment(serviceStats, "proompts", k.promptCount);
|
||||||
|
increment(serviceStats, "openaiKeys", k.service === "openai" ? 1 : 0);
|
||||||
|
increment(serviceStats, "anthropicKeys", k.service === "anthropic" ? 1 : 0);
|
||||||
|
increment(serviceStats, "palmKeys", k.service === "google-palm" ? 1 : 0);
|
||||||
|
increment(serviceStats, "awsKeys", k.service === "aws" ? 1 : 0);
|
||||||
|
increment(serviceStats, "azureKeys", k.service === "azure" ? 1 : 0);
|
||||||
|
|
||||||
|
let sumTokens = 0;
|
||||||
|
let sumCost = 0;
|
||||||
|
|
||||||
|
switch (k.service) {
|
||||||
|
case "openai":
|
||||||
|
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
|
||||||
|
increment(
|
||||||
|
serviceStats,
|
||||||
|
"openAiUncheckedKeys",
|
||||||
|
Boolean(k.lastChecked) ? 0 : 1
|
||||||
|
);
|
||||||
|
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
const tokens = k[`${f}Tokens`];
|
||||||
|
sumTokens += tokens;
|
||||||
|
sumCost += getTokenCostUsd(f, tokens);
|
||||||
|
increment(modelStats, `${f}__tokens`, tokens);
|
||||||
|
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
|
||||||
|
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "azure":
|
||||||
|
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
|
||||||
|
k.modelFamilies.forEach((f) => {
|
||||||
|
const tokens = k[`${f}Tokens`];
|
||||||
|
sumTokens += tokens;
|
||||||
|
sumCost += getTokenCostUsd(f, tokens);
|
||||||
|
increment(modelStats, `${f}__tokens`, tokens);
|
||||||
|
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "anthropic": {
|
||||||
|
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
|
||||||
|
const family = "claude";
|
||||||
|
sumTokens += k.claudeTokens;
|
||||||
|
sumCost += getTokenCostUsd(family, k.claudeTokens);
|
||||||
|
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
increment(modelStats, `${family}__tokens`, k.claudeTokens);
|
||||||
|
increment(modelStats, `${family}__pozzed`, k.isPozzed ? 1 : 0);
|
||||||
|
increment(
|
||||||
|
serviceStats,
|
||||||
|
"anthropicUncheckedKeys",
|
||||||
|
Boolean(k.lastChecked) ? 0 : 1
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "google-palm": {
|
||||||
|
if (!keyIsGooglePalmKey(k)) throw new Error("Invalid key type");
|
||||||
|
const family = "bison";
|
||||||
|
sumTokens += k.bisonTokens;
|
||||||
|
sumCost += getTokenCostUsd(family, k.bisonTokens);
|
||||||
|
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
increment(modelStats, `${family}__tokens`, k.bisonTokens);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "aws": {
|
||||||
|
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
|
||||||
|
const family = "aws-claude";
|
||||||
|
sumTokens += k["aws-claudeTokens"];
|
||||||
|
sumCost += getTokenCostUsd(family, k["aws-claudeTokens"]);
|
||||||
|
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
|
||||||
|
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
|
||||||
|
increment(modelStats, `${family}__tokens`, k["aws-claudeTokens"]);
|
||||||
|
|
||||||
|
// Ignore revoked keys for aws logging stats, but include keys where the
|
||||||
|
// logging status is unknown.
|
||||||
|
const countAsLogged =
|
||||||
|
k.lastChecked && !k.isDisabled && k.awsLoggingStatus !== "disabled";
|
||||||
|
increment(modelStats, `${family}__awsLogged`, countAsLogged ? 1 : 0);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
assertNever(k.service);
|
||||||
|
}
|
||||||
|
|
||||||
|
increment(serviceStats, "tokens", sumTokens);
|
||||||
|
increment(serviceStats, "tokenCost", sumCost);
|
||||||
|
}
|
||||||
|
|
||||||
function getOpenAIInfo() {
|
function getOpenAIInfo() {
|
||||||
const info: { [model: string]: Partial<ServiceInfo> } = {};
|
const info: { status?: string; openaiKeys?: number; openaiOrgs?: number } & {
|
||||||
const keys = keyPool.list().filter((k) => k.service === "openai");
|
[modelFamily in OpenAIModelFamily]?: {
|
||||||
const hasGpt4 = keys.some((k) => k.isGpt4);
|
usage?: string;
|
||||||
|
activeKeys: number;
|
||||||
|
trialKeys?: number;
|
||||||
|
revokedKeys?: number;
|
||||||
|
overQuotaKeys?: number;
|
||||||
|
proomptersInQueue?: number;
|
||||||
|
estimatedQueueTime?: string;
|
||||||
|
};
|
||||||
|
} = {};
|
||||||
|
|
||||||
if (keyPool.anyUnchecked()) {
|
const keys = keyPool.list().filter(keyIsOpenAIKey);
|
||||||
const uncheckedKeys = keys.filter((k) => !k.lastChecked);
|
const enabledFamilies = new Set(config.allowedModelFamilies);
|
||||||
info.status = `Still checking ${uncheckedKeys.length} keys...` as any;
|
const accessibleFamilies = keys
|
||||||
} else {
|
.flatMap((k) => k.modelFamilies)
|
||||||
delete info.status;
|
.filter((f) => enabledFamilies.has(f))
|
||||||
}
|
.concat("turbo");
|
||||||
|
const familySet = new Set(accessibleFamilies);
|
||||||
|
|
||||||
if (config.checkKeys) {
|
if (config.checkKeys) {
|
||||||
const turboKeys = keys.filter((k) => !k.isGpt4 && !k.isDisabled);
|
const unchecked = serviceStats.get("openAiUncheckedKeys") || 0;
|
||||||
const gpt4Keys = keys.filter((k) => k.isGpt4 && !k.isDisabled);
|
if (unchecked > 0) {
|
||||||
|
info.status = `Checking ${unchecked} keys...`;
|
||||||
const quota: Record<string, string> = { turbo: "", gpt4: "" };
|
|
||||||
const turboQuota = keyPool.remainingQuota("openai") * 100;
|
|
||||||
const gpt4Quota = keyPool.remainingQuota("openai", { gpt4: true }) * 100;
|
|
||||||
|
|
||||||
if (config.quotaDisplayMode === "full") {
|
|
||||||
const turboUsage = keyPool.usageInUsd("openai");
|
|
||||||
const gpt4Usage = keyPool.usageInUsd("openai", { gpt4: true });
|
|
||||||
quota.turbo = `${turboUsage} (${Math.round(turboQuota)}% remaining)`;
|
|
||||||
quota.gpt4 = `${gpt4Usage} (${Math.round(gpt4Quota)}% remaining)`;
|
|
||||||
} else {
|
|
||||||
quota.turbo = `${Math.round(turboQuota)}%`;
|
|
||||||
quota.gpt4 = `${Math.round(gpt4Quota * 100)}%`;
|
|
||||||
}
|
}
|
||||||
|
info.openaiKeys = keys.length;
|
||||||
|
info.openaiOrgs = getUniqueOpenAIOrgs(keys);
|
||||||
|
|
||||||
info.turbo = {
|
familySet.forEach((f) => {
|
||||||
activeKeys: turboKeys.filter((k) => !k.isDisabled).length,
|
const tokens = modelStats.get(`${f}__tokens`) || 0;
|
||||||
trialKeys: turboKeys.filter((k) => k.isTrial).length,
|
const cost = getTokenCostUsd(f, tokens);
|
||||||
quota: quota.turbo,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (hasGpt4) {
|
info[f] = {
|
||||||
info.gpt4 = {
|
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
|
||||||
activeKeys: gpt4Keys.filter((k) => !k.isDisabled).length,
|
activeKeys: modelStats.get(`${f}__active`) || 0,
|
||||||
trialKeys: gpt4Keys.filter((k) => k.isTrial).length,
|
trialKeys: modelStats.get(`${f}__trial`) || 0,
|
||||||
quota: quota.gpt4,
|
revokedKeys: modelStats.get(`${f}__revoked`) || 0,
|
||||||
|
overQuotaKeys: modelStats.get(`${f}__overQuota`) || 0,
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
|
||||||
if (config.quotaDisplayMode === "none") {
|
// Don't show trial/revoked keys for non-turbo families.
|
||||||
delete info.turbo?.quota;
|
// Generally those stats only make sense for the lowest-tier model.
|
||||||
delete info.gpt4?.quota;
|
if (f !== "turbo") {
|
||||||
}
|
delete info[f]!.trialKeys;
|
||||||
|
delete info[f]!.revokedKeys;
|
||||||
|
}
|
||||||
|
});
|
||||||
} else {
|
} else {
|
||||||
info.status = "Key checking is disabled." as any;
|
info.status = "Key checking is disabled.";
|
||||||
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
|
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
|
||||||
|
info.gpt4 = {
|
||||||
|
activeKeys: keys.filter(
|
||||||
|
(k) => !k.isDisabled && k.modelFamilies.includes("gpt4")
|
||||||
|
).length,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.queueMode !== "none") {
|
familySet.forEach((f) => {
|
||||||
const turboQueue = getQueueInformation("turbo");
|
if (enabledFamilies.has(f)) {
|
||||||
|
if (!info[f]) info[f] = { activeKeys: 0 }; // may occur if checkKeys is disabled
|
||||||
info.turbo.proomptersInQueue = turboQueue.proomptersInQueue;
|
const { estimatedQueueTime, proomptersInQueue } = getQueueInformation(f);
|
||||||
info.turbo.estimatedQueueTime = turboQueue.estimatedQueueTime;
|
info[f]!.proomptersInQueue = proomptersInQueue;
|
||||||
|
info[f]!.estimatedQueueTime = estimatedQueueTime;
|
||||||
if (hasGpt4) {
|
} else {
|
||||||
const gpt4Queue = getQueueInformation("gpt-4");
|
(info[f]! as any).status = "GPT-3.5-Turbo is disabled on this proxy.";
|
||||||
info.gpt4.proomptersInQueue = gpt4Queue.proomptersInQueue;
|
|
||||||
info.gpt4.estimatedQueueTime = gpt4Queue.estimatedQueueTime;
|
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
|
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getAnthropicInfo() {
|
function getAnthropicInfo() {
|
||||||
const claudeInfo: Partial<ServiceInfo> = {};
|
const claudeInfo: Partial<ModelAggregates> = {
|
||||||
const keys = keyPool.list().filter((k) => k.service === "anthropic");
|
active: modelStats.get("claude__active") || 0,
|
||||||
claudeInfo.activeKeys = keys.filter((k) => !k.isDisabled).length;
|
pozzed: modelStats.get("claude__pozzed") || 0,
|
||||||
if (config.queueMode !== "none") {
|
revoked: modelStats.get("claude__revoked") || 0,
|
||||||
const queue = getQueueInformation("claude");
|
};
|
||||||
claudeInfo.proomptersInQueue = queue.proomptersInQueue;
|
|
||||||
claudeInfo.estimatedQueueTime = queue.estimatedQueueTime;
|
const queue = getQueueInformation("claude");
|
||||||
}
|
claudeInfo.queued = queue.proomptersInQueue;
|
||||||
return { claude: claudeInfo };
|
claudeInfo.queueTime = queue.estimatedQueueTime;
|
||||||
|
|
||||||
|
const tokens = modelStats.get("claude__tokens") || 0;
|
||||||
|
const cost = getTokenCostUsd("claude", tokens);
|
||||||
|
|
||||||
|
const unchecked =
|
||||||
|
(config.checkKeys && serviceStats.get("anthropicUncheckedKeys")) || 0;
|
||||||
|
|
||||||
|
return {
|
||||||
|
claude: {
|
||||||
|
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
|
||||||
|
...(unchecked > 0 ? { status: `Checking ${unchecked} keys...` } : {}),
|
||||||
|
activeKeys: claudeInfo.active,
|
||||||
|
revokedKeys: claudeInfo.revoked,
|
||||||
|
...(config.checkKeys ? { pozzedKeys: claudeInfo.pozzed } : {}),
|
||||||
|
proomptersInQueue: claudeInfo.queued,
|
||||||
|
estimatedQueueTime: claudeInfo.queueTime,
|
||||||
|
},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getPalmInfo() {
|
||||||
|
const bisonInfo: Partial<ModelAggregates> = {
|
||||||
|
active: modelStats.get("bison__active") || 0,
|
||||||
|
revoked: modelStats.get("bison__revoked") || 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const queue = getQueueInformation("bison");
|
||||||
|
bisonInfo.queued = queue.proomptersInQueue;
|
||||||
|
bisonInfo.queueTime = queue.estimatedQueueTime;
|
||||||
|
|
||||||
|
const tokens = modelStats.get("bison__tokens") || 0;
|
||||||
|
const cost = getTokenCostUsd("bison", tokens);
|
||||||
|
|
||||||
|
return {
|
||||||
|
bison: {
|
||||||
|
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
|
||||||
|
activeKeys: bisonInfo.active,
|
||||||
|
revokedKeys: bisonInfo.revoked,
|
||||||
|
proomptersInQueue: bisonInfo.queued,
|
||||||
|
estimatedQueueTime: bisonInfo.queueTime,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAwsInfo() {
|
||||||
|
const awsInfo: Partial<ModelAggregates> = {
|
||||||
|
active: modelStats.get("aws-claude__active") || 0,
|
||||||
|
revoked: modelStats.get("aws-claude__revoked") || 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const queue = getQueueInformation("aws-claude");
|
||||||
|
awsInfo.queued = queue.proomptersInQueue;
|
||||||
|
awsInfo.queueTime = queue.estimatedQueueTime;
|
||||||
|
|
||||||
|
const tokens = modelStats.get("aws-claude__tokens") || 0;
|
||||||
|
const cost = getTokenCostUsd("aws-claude", tokens);
|
||||||
|
|
||||||
|
const logged = modelStats.get("aws-claude__awsLogged") || 0;
|
||||||
|
const logMsg = config.allowAwsLogging
|
||||||
|
? `${logged} active keys are potentially logged.`
|
||||||
|
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
|
||||||
|
|
||||||
|
return {
|
||||||
|
"aws-claude": {
|
||||||
|
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
|
||||||
|
activeKeys: awsInfo.active,
|
||||||
|
revokedKeys: awsInfo.revoked,
|
||||||
|
proomptersInQueue: awsInfo.queued,
|
||||||
|
estimatedQueueTime: awsInfo.queueTime,
|
||||||
|
...(logged > 0 ? { privacy: logMsg } : {}),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAzureInfo() {
|
||||||
|
const azureFamilies = [
|
||||||
|
"azure-turbo",
|
||||||
|
"azure-gpt4",
|
||||||
|
"azure-gpt4-turbo",
|
||||||
|
"azure-gpt4-32k",
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
const azureInfo: {
|
||||||
|
[modelFamily in AzureOpenAIModelFamily]?: {
|
||||||
|
usage?: string;
|
||||||
|
activeKeys: number;
|
||||||
|
revokedKeys?: number;
|
||||||
|
proomptersInQueue?: number;
|
||||||
|
estimatedQueueTime?: string;
|
||||||
|
};
|
||||||
|
} = {};
|
||||||
|
for (const family of azureFamilies) {
|
||||||
|
const familyAllowed = config.allowedModelFamilies.includes(family);
|
||||||
|
const activeKeys = modelStats.get(`${family}__active`) || 0;
|
||||||
|
|
||||||
|
if (!familyAllowed || activeKeys === 0) continue;
|
||||||
|
|
||||||
|
azureInfo[family] = {
|
||||||
|
activeKeys,
|
||||||
|
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const queue = getQueueInformation(family);
|
||||||
|
azureInfo[family]!.proomptersInQueue = queue.proomptersInQueue;
|
||||||
|
azureInfo[family]!.estimatedQueueTime = queue.estimatedQueueTime;
|
||||||
|
|
||||||
|
const tokens = modelStats.get(`${family}__tokens`) || 0;
|
||||||
|
const cost = getTokenCostUsd(family, tokens);
|
||||||
|
azureInfo[family]!.usage = `${prettyTokens(tokens)} tokens${getCostString(
|
||||||
|
cost
|
||||||
|
)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return azureInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
const customGreeting = fs.existsSync("greeting.md")
|
||||||
|
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
|
||||||
|
: "";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the server operator provides a `greeting.md` file, it will be included in
|
* If the server operator provides a `greeting.md` file, it will be included in
|
||||||
* the rendered info page.
|
* the rendered info page.
|
||||||
**/
|
**/
|
||||||
function buildInfoPageHeader(converter: showdown.Converter, title: string) {
|
function buildInfoPageHeader(converter: showdown.Converter, title: string) {
|
||||||
const customGreeting = fs.existsSync("greeting.md")
|
|
||||||
? fs.readFileSync("greeting.md", "utf8")
|
|
||||||
: null;
|
|
||||||
|
|
||||||
// TODO: use some templating engine instead of this mess
|
// TODO: use some templating engine instead of this mess
|
||||||
|
|
||||||
let infoBody = `<!-- Header for Showdown's parser, don't remove this line -->
|
let infoBody = `<!-- Header for Showdown's parser, don't remove this line -->
|
||||||
# ${title}`;
|
# ${title}`;
|
||||||
if (config.promptLogging) {
|
if (config.promptLogging) {
|
||||||
infoBody += `\n## Prompt logging is enabled!
|
infoBody += `\n## Prompt Logging Enabled
|
||||||
The server operator has enabled prompt logging. The prompts you send to this proxy and the AI responses you receive may be saved.
|
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
|
||||||
|
|
||||||
Logs are anonymous and do not contain IP addresses or timestamps. [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/prompt-logging/index.ts).
|
[You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/shared/prompt-logging/index.ts).
|
||||||
|
|
||||||
**If you are uncomfortable with this, don't send prompts to this proxy!**`;
|
**If you are uncomfortable with this, don't send prompts to this proxy!**`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.queueMode !== "none") {
|
if (config.staticServiceInfo) {
|
||||||
const waits = [];
|
return converter.makeHtml(infoBody + customGreeting);
|
||||||
infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
|
|
||||||
|
|
||||||
if (config.openaiKey) {
|
|
||||||
const turboWait = getQueueInformation("turbo").estimatedQueueTime;
|
|
||||||
const gpt4Wait = getQueueInformation("gpt-4").estimatedQueueTime;
|
|
||||||
waits.push(`**Turbo:** ${turboWait}`);
|
|
||||||
if (keyPool.list().some((k) => k.isGpt4)) {
|
|
||||||
waits.push(`**GPT-4:** ${gpt4Wait}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config.anthropicKey) {
|
|
||||||
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
|
|
||||||
waits.push(`**Claude:** ${claudeWait}`);
|
|
||||||
}
|
|
||||||
infoBody += "\n\n" + waits.join(" / ");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (customGreeting) {
|
const waits: string[] = [];
|
||||||
infoBody += `\n## Server Greeting\n
|
infoBody += `\n## Estimated Wait Times`;
|
||||||
${customGreeting}`;
|
|
||||||
|
if (config.openaiKey) {
|
||||||
|
// TODO: un-fuck this
|
||||||
|
const keys = keyPool.list().filter((k) => k.service === "openai");
|
||||||
|
|
||||||
|
const turboWait = getQueueInformation("turbo").estimatedQueueTime;
|
||||||
|
waits.push(`**Turbo:** ${turboWait}`);
|
||||||
|
|
||||||
|
const gpt4Wait = getQueueInformation("gpt4").estimatedQueueTime;
|
||||||
|
const hasGpt4 = keys.some((k) => k.modelFamilies.includes("gpt4"));
|
||||||
|
const allowedGpt4 = config.allowedModelFamilies.includes("gpt4");
|
||||||
|
if (hasGpt4 && allowedGpt4) {
|
||||||
|
waits.push(`**GPT-4:** ${gpt4Wait}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const gpt432kWait = getQueueInformation("gpt4-32k").estimatedQueueTime;
|
||||||
|
const hasGpt432k = keys.some((k) => k.modelFamilies.includes("gpt4-32k"));
|
||||||
|
const allowedGpt432k = config.allowedModelFamilies.includes("gpt4-32k");
|
||||||
|
if (hasGpt432k && allowedGpt432k) {
|
||||||
|
waits.push(`**GPT-4-32k:** ${gpt432kWait}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const dalleWait = getQueueInformation("dall-e").estimatedQueueTime;
|
||||||
|
const hasDalle = keys.some((k) => k.modelFamilies.includes("dall-e"));
|
||||||
|
const allowedDalle = config.allowedModelFamilies.includes("dall-e");
|
||||||
|
if (hasDalle && allowedDalle) {
|
||||||
|
waits.push(`**DALL-E:** ${dalleWait}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (config.anthropicKey) {
|
||||||
|
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
|
||||||
|
waits.push(`**Claude:** ${claudeWait}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.awsCredentials) {
|
||||||
|
const awsClaudeWait = getQueueInformation("aws-claude").estimatedQueueTime;
|
||||||
|
waits.push(`**Claude (AWS):** ${awsClaudeWait}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
infoBody += "\n\n" + waits.join(" / ");
|
||||||
|
|
||||||
|
infoBody += customGreeting;
|
||||||
|
|
||||||
|
infoBody += buildRecentImageSection();
|
||||||
|
|
||||||
return converter.makeHtml(infoBody);
|
return converter.makeHtml(infoBody);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getSelfServiceLinks() {
|
||||||
|
if (config.gatekeeper !== "user_token") return "";
|
||||||
|
return `<footer style="font-size: 0.8em;"><hr /><a target="_blank" href="/user/lookup">Check your user token info</a></footer>`;
|
||||||
|
}
|
||||||
|
|
||||||
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
|
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
|
||||||
function getQueueInformation(partition: QueuePartition) {
|
function getQueueInformation(partition: ModelFamily) {
|
||||||
if (config.queueMode === "none") {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
const waitMs = getEstimatedWaitTime(partition);
|
const waitMs = getEstimatedWaitTime(partition);
|
||||||
const waitTime =
|
const waitTime =
|
||||||
waitMs < 60000
|
waitMs < 60000
|
||||||
@@ -253,9 +599,44 @@ function getServerTitle() {
|
|||||||
return "OAI Reverse Proxy";
|
return "OAI Reverse Proxy";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function buildRecentImageSection() {
|
||||||
|
if (
|
||||||
|
!config.allowedModelFamilies.includes("dall-e") ||
|
||||||
|
!config.showRecentImages
|
||||||
|
) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
let html = `<h2>Recent DALL-E Generations</h2>`;
|
||||||
|
const recentImages = getLastNImages(12).reverse();
|
||||||
|
if (recentImages.length === 0) {
|
||||||
|
html += `<p>No images yet.</p>`;
|
||||||
|
return html;
|
||||||
|
}
|
||||||
|
|
||||||
|
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
|
||||||
|
for (const { url, prompt } of recentImages) {
|
||||||
|
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
|
||||||
|
const escapedPrompt = escapeHtml(prompt);
|
||||||
|
html += `<div style="margin: 0.5em;" class="recent-image">
|
||||||
|
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
html += `</div>`;
|
||||||
|
|
||||||
|
return html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function escapeHtml(unsafe: string) {
|
||||||
|
return unsafe
|
||||||
|
.replace(/&/g, "&")
|
||||||
|
.replace(/</g, "<")
|
||||||
|
.replace(/>/g, ">")
|
||||||
|
.replace(/"/g, """)
|
||||||
|
.replace(/'/g, "'");
|
||||||
|
}
|
||||||
|
|
||||||
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
|
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
|
||||||
// Huggingface broke their amazon elb config and no longer sends the
|
|
||||||
// x-forwarded-host header. This is a workaround.
|
|
||||||
try {
|
try {
|
||||||
const [username, spacename] = spaceId.split("/");
|
const [username, spacename] = spaceId.split("/");
|
||||||
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
|
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
import type * as http from "http";
|
|
||||||
import { AnthropicKeyProvider, AnthropicKeyUpdate } from "./anthropic/provider";
|
|
||||||
import { Key, Model, KeyProvider, AIService } from "./index";
|
|
||||||
import { OpenAIKeyProvider, OpenAIKeyUpdate } from "./openai/provider";
|
|
||||||
|
|
||||||
type AllowedPartial = OpenAIKeyUpdate | AnthropicKeyUpdate;
|
|
||||||
|
|
||||||
export class KeyPool {
|
|
||||||
private keyProviders: KeyProvider[] = [];
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
this.keyProviders.push(new OpenAIKeyProvider());
|
|
||||||
this.keyProviders.push(new AnthropicKeyProvider());
|
|
||||||
}
|
|
||||||
|
|
||||||
public init() {
|
|
||||||
this.keyProviders.forEach((provider) => provider.init());
|
|
||||||
const availableKeys = this.available("all");
|
|
||||||
if (availableKeys === 0) {
|
|
||||||
throw new Error(
|
|
||||||
"No keys loaded. Ensure either OPENAI_KEY or ANTHROPIC_KEY is set."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public get(model: Model): Key {
|
|
||||||
const service = this.getService(model);
|
|
||||||
return this.getKeyProvider(service).get(model);
|
|
||||||
}
|
|
||||||
|
|
||||||
public list(): Omit<Key, "key">[] {
|
|
||||||
return this.keyProviders.flatMap((provider) => provider.list());
|
|
||||||
}
|
|
||||||
|
|
||||||
public disable(key: Key): void {
|
|
||||||
const service = this.getKeyProvider(key.service);
|
|
||||||
service.disable(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
public update(key: Key, props: AllowedPartial): void {
|
|
||||||
const service = this.getKeyProvider(key.service);
|
|
||||||
service.update(key.hash, props);
|
|
||||||
}
|
|
||||||
|
|
||||||
public available(service: AIService | "all" = "all"): number {
|
|
||||||
return this.keyProviders.reduce((sum, provider) => {
|
|
||||||
const includeProvider = service === "all" || service === provider.service;
|
|
||||||
return sum + (includeProvider ? provider.available() : 0);
|
|
||||||
}, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
public anyUnchecked(): boolean {
|
|
||||||
return this.keyProviders.some((provider) => provider.anyUnchecked());
|
|
||||||
}
|
|
||||||
|
|
||||||
public incrementPrompt(key: Key): void {
|
|
||||||
const provider = this.getKeyProvider(key.service);
|
|
||||||
provider.incrementPrompt(key.hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
public getLockoutPeriod(model: Model): number {
|
|
||||||
const service = this.getService(model);
|
|
||||||
return this.getKeyProvider(service).getLockoutPeriod(model);
|
|
||||||
}
|
|
||||||
|
|
||||||
public markRateLimited(key: Key): void {
|
|
||||||
const provider = this.getKeyProvider(key.service);
|
|
||||||
provider.markRateLimited(key.hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
public updateRateLimits(key: Key, headers: http.IncomingHttpHeaders): void {
|
|
||||||
const provider = this.getKeyProvider(key.service);
|
|
||||||
if (provider instanceof OpenAIKeyProvider) {
|
|
||||||
provider.updateRateLimits(key.hash, headers);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public remainingQuota(
|
|
||||||
service: AIService,
|
|
||||||
options?: Record<string, unknown>
|
|
||||||
): number {
|
|
||||||
return this.getKeyProvider(service).remainingQuota(options);
|
|
||||||
}
|
|
||||||
|
|
||||||
public usageInUsd(
|
|
||||||
service: AIService,
|
|
||||||
options?: Record<string, unknown>
|
|
||||||
): string {
|
|
||||||
return this.getKeyProvider(service).usageInUsd(options);
|
|
||||||
}
|
|
||||||
|
|
||||||
private getService(model: Model): AIService {
|
|
||||||
if (model.startsWith("gpt")) {
|
|
||||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
|
||||||
return "openai";
|
|
||||||
} else if (model.startsWith("claude-")) {
|
|
||||||
// https://console.anthropic.com/docs/api/reference#parameters
|
|
||||||
return "anthropic";
|
|
||||||
}
|
|
||||||
throw new Error(`Unknown service for model '${model}'`);
|
|
||||||
}
|
|
||||||
|
|
||||||
private getKeyProvider(service: AIService): KeyProvider {
|
|
||||||
return this.keyProviders.find((provider) => provider.service === service)!;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,278 +0,0 @@
|
|||||||
import axios, { AxiosError } from "axios";
|
|
||||||
import { Configuration, OpenAIApi } from "openai";
|
|
||||||
import { logger } from "../../logger";
|
|
||||||
import type { OpenAIKey, OpenAIKeyProvider } from "./provider";
|
|
||||||
|
|
||||||
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
|
|
||||||
const KEY_CHECK_PERIOD = 5 * 60 * 1000; // 5 minutes
|
|
||||||
|
|
||||||
const GET_SUBSCRIPTION_URL =
|
|
||||||
"https://api.openai.com/dashboard/billing/subscription";
|
|
||||||
const GET_USAGE_URL = "https://api.openai.com/dashboard/billing/usage";
|
|
||||||
|
|
||||||
type GetSubscriptionResponse = {
|
|
||||||
plan: { title: string };
|
|
||||||
has_payment_method: boolean;
|
|
||||||
soft_limit_usd: number;
|
|
||||||
hard_limit_usd: number;
|
|
||||||
system_hard_limit_usd: number;
|
|
||||||
};
|
|
||||||
|
|
||||||
type GetUsageResponse = {
|
|
||||||
total_usage: number;
|
|
||||||
};
|
|
||||||
|
|
||||||
type OpenAIError = {
|
|
||||||
error: { type: string; code: string; param: unknown; message: string };
|
|
||||||
};
|
|
||||||
|
|
||||||
type UpdateFn = typeof OpenAIKeyProvider.prototype.update;
|
|
||||||
|
|
||||||
export class OpenAIKeyChecker {
|
|
||||||
private readonly keys: OpenAIKey[];
|
|
||||||
private log = logger.child({ module: "key-checker", service: "openai" });
|
|
||||||
private timeout?: NodeJS.Timeout;
|
|
||||||
private updateKey: UpdateFn;
|
|
||||||
private lastCheck = 0;
|
|
||||||
|
|
||||||
constructor(keys: OpenAIKey[], updateKey: UpdateFn) {
|
|
||||||
this.keys = keys;
|
|
||||||
this.updateKey = updateKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
public start() {
|
|
||||||
this.log.info("Starting key checker...");
|
|
||||||
this.scheduleNextCheck();
|
|
||||||
}
|
|
||||||
|
|
||||||
public stop() {
|
|
||||||
if (this.timeout) {
|
|
||||||
clearTimeout(this.timeout);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Schedules the next check. If there are still keys yet to be checked, it
|
|
||||||
* will schedule a check immediately for the next unchecked key. Otherwise,
|
|
||||||
* it will schedule a check in several minutes for the oldest key.
|
|
||||||
**/
|
|
||||||
private scheduleNextCheck() {
|
|
||||||
const enabledKeys = this.keys.filter((key) => !key.isDisabled);
|
|
||||||
|
|
||||||
if (enabledKeys.length === 0) {
|
|
||||||
this.log.warn("All keys are disabled. Key checker stopping.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform startup checks for any keys that haven't been checked yet.
|
|
||||||
const uncheckedKeys = enabledKeys.filter((key) => !key.lastChecked);
|
|
||||||
if (uncheckedKeys.length > 0) {
|
|
||||||
// Check up to 12 keys at once to speed up startup.
|
|
||||||
const keysToCheck = uncheckedKeys.slice(0, 12);
|
|
||||||
|
|
||||||
this.log.info(
|
|
||||||
{
|
|
||||||
key: keysToCheck.map((key) => key.hash),
|
|
||||||
remaining: uncheckedKeys.length - keysToCheck.length,
|
|
||||||
},
|
|
||||||
"Scheduling initial checks for key batch."
|
|
||||||
);
|
|
||||||
this.timeout = setTimeout(async () => {
|
|
||||||
const promises = keysToCheck.map((key) => this.checkKey(key));
|
|
||||||
try {
|
|
||||||
await Promise.all(promises);
|
|
||||||
} catch (error) {
|
|
||||||
this.log.error({ error }, "Error checking one or more keys.");
|
|
||||||
}
|
|
||||||
this.scheduleNextCheck();
|
|
||||||
}, 250);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schedule the next check for the oldest key.
|
|
||||||
const oldestKey = enabledKeys.reduce((oldest, key) =>
|
|
||||||
key.lastChecked < oldest.lastChecked ? key : oldest
|
|
||||||
);
|
|
||||||
|
|
||||||
// Don't check any individual key more than once every 5 minutes.
|
|
||||||
// Also, don't check anything more often than once every 3 seconds.
|
|
||||||
const nextCheck = Math.max(
|
|
||||||
oldestKey.lastChecked + KEY_CHECK_PERIOD,
|
|
||||||
this.lastCheck + MIN_CHECK_INTERVAL
|
|
||||||
);
|
|
||||||
|
|
||||||
this.log.debug(
|
|
||||||
{ key: oldestKey.hash, nextCheck: new Date(nextCheck) },
|
|
||||||
"Scheduling next check."
|
|
||||||
);
|
|
||||||
|
|
||||||
const delay = nextCheck - Date.now();
|
|
||||||
this.timeout = setTimeout(() => this.checkKey(oldestKey), delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
private async checkKey(key: OpenAIKey) {
|
|
||||||
// It's possible this key might have been disabled while we were waiting
|
|
||||||
// for the next check.
|
|
||||||
if (key.isDisabled) {
|
|
||||||
this.log.warn({ key: key.hash }, "Skipping check for disabled key.");
|
|
||||||
this.scheduleNextCheck();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
this.log.debug({ key: key.hash }, "Checking key...");
|
|
||||||
let isInitialCheck = !key.lastChecked;
|
|
||||||
try {
|
|
||||||
// During the initial check we need to get the subscription first because
|
|
||||||
// trials have different behavior.
|
|
||||||
if (isInitialCheck) {
|
|
||||||
const subscription = await this.getSubscription(key);
|
|
||||||
this.updateKey(key.hash, { isTrial: !subscription.has_payment_method });
|
|
||||||
if (key.isTrial) {
|
|
||||||
this.log.debug(
|
|
||||||
{ key: key.hash },
|
|
||||||
"Attempting generation on trial key."
|
|
||||||
);
|
|
||||||
await this.assertCanGenerate(key);
|
|
||||||
}
|
|
||||||
const [provisionedModels, usage] = await Promise.all([
|
|
||||||
this.getProvisionedModels(key),
|
|
||||||
this.getUsage(key),
|
|
||||||
]);
|
|
||||||
const updates = {
|
|
||||||
isGpt4: provisionedModels.gpt4,
|
|
||||||
softLimit: subscription.soft_limit_usd,
|
|
||||||
hardLimit: subscription.hard_limit_usd,
|
|
||||||
systemHardLimit: subscription.system_hard_limit_usd,
|
|
||||||
usage,
|
|
||||||
};
|
|
||||||
this.updateKey(key.hash, updates);
|
|
||||||
} else {
|
|
||||||
// Don't check provisioned models after the initial check because it's
|
|
||||||
// not likely to change.
|
|
||||||
const [subscription, usage] = await Promise.all([
|
|
||||||
this.getSubscription(key),
|
|
||||||
this.getUsage(key),
|
|
||||||
]);
|
|
||||||
const updates = {
|
|
||||||
softLimit: subscription.soft_limit_usd,
|
|
||||||
hardLimit: subscription.hard_limit_usd,
|
|
||||||
systemHardLimit: subscription.system_hard_limit_usd,
|
|
||||||
usage,
|
|
||||||
};
|
|
||||||
this.updateKey(key.hash, updates);
|
|
||||||
}
|
|
||||||
this.log.info(
|
|
||||||
{ key: key.hash, usage: key.usage, hardLimit: key.hardLimit },
|
|
||||||
"Key check complete."
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
// touch the key so we don't check it again for a while
|
|
||||||
this.updateKey(key.hash, {});
|
|
||||||
this.handleAxiosError(key, error as AxiosError);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.lastCheck = Date.now();
|
|
||||||
// Only enqueue the next check if this wasn't a startup check, since those
|
|
||||||
// are batched together elsewhere.
|
|
||||||
if (!isInitialCheck) {
|
|
||||||
this.scheduleNextCheck();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private async getProvisionedModels(
|
|
||||||
key: OpenAIKey
|
|
||||||
): Promise<{ turbo: boolean; gpt4: boolean }> {
|
|
||||||
const openai = new OpenAIApi(new Configuration({ apiKey: key.key }));
|
|
||||||
const models = (await openai.listModels()!).data.data;
|
|
||||||
const turbo = models.some(({ id }) => id.startsWith("gpt-3.5"));
|
|
||||||
const gpt4 = models.some(({ id }) => id.startsWith("gpt-4"));
|
|
||||||
return { turbo, gpt4 };
|
|
||||||
}
|
|
||||||
|
|
||||||
private async getSubscription(key: OpenAIKey) {
|
|
||||||
const { data } = await axios.get<GetSubscriptionResponse>(
|
|
||||||
GET_SUBSCRIPTION_URL,
|
|
||||||
{ headers: { Authorization: `Bearer ${key.key}` } }
|
|
||||||
);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
private async getUsage(key: OpenAIKey) {
|
|
||||||
const querystring = OpenAIKeyChecker.getUsageQuerystring(key.isTrial);
|
|
||||||
const url = `${GET_USAGE_URL}?${querystring}`;
|
|
||||||
const { data } = await axios.get<GetUsageResponse>(url, {
|
|
||||||
headers: { Authorization: `Bearer ${key.key}` },
|
|
||||||
});
|
|
||||||
return parseFloat((data.total_usage / 100).toFixed(2));
|
|
||||||
}
|
|
||||||
|
|
||||||
private handleAxiosError(key: OpenAIKey, error: AxiosError) {
|
|
||||||
if (error.response && OpenAIKeyChecker.errorIsOpenAiError(error)) {
|
|
||||||
const { status, data } = error.response;
|
|
||||||
if (status === 401) {
|
|
||||||
this.log.warn(
|
|
||||||
{ key: key.hash, error: data },
|
|
||||||
"Key is invalid or revoked. Disabling key."
|
|
||||||
);
|
|
||||||
this.updateKey(key.hash, { isDisabled: true });
|
|
||||||
} else if (status === 429 && data.error.type === "insufficient_quota") {
|
|
||||||
this.log.warn(
|
|
||||||
{ key: key.hash, isTrial: key.isTrial, error: data },
|
|
||||||
"Key is out of quota. Disabling key."
|
|
||||||
);
|
|
||||||
this.updateKey(key.hash, { isDisabled: true });
|
|
||||||
} else {
|
|
||||||
this.log.error(
|
|
||||||
{ key: key.hash, status, error: data },
|
|
||||||
"Encountered API error while checking key."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.log.error(
|
|
||||||
{ key: key.hash, error },
|
|
||||||
"Network error while checking key; trying again later."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Trial key usage reporting is inaccurate, so we need to run an actual
|
|
||||||
* completion to test them for liveness.
|
|
||||||
*/
|
|
||||||
private async assertCanGenerate(key: OpenAIKey): Promise<void> {
|
|
||||||
const openai = new OpenAIApi(new Configuration({ apiKey: key.key }));
|
|
||||||
// This will throw an AxiosError if the key is invalid or out of quota.
|
|
||||||
await openai.createChatCompletion({
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [{ role: "user", content: "Hello" }],
|
|
||||||
max_tokens: 1,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
static getUsageQuerystring(isTrial: boolean) {
|
|
||||||
// For paid keys, the limit resets every month, so we can use the first day
|
|
||||||
// of the current month.
|
|
||||||
// For trial keys, the limit does not reset and we don't know when the key
|
|
||||||
// was created, so we use 99 days ago because that's as far back as the API
|
|
||||||
// will let us go.
|
|
||||||
|
|
||||||
// End date needs to be set to the beginning of the next day so that we get
|
|
||||||
// usage for the current day.
|
|
||||||
|
|
||||||
const today = new Date();
|
|
||||||
const startDate = isTrial
|
|
||||||
? new Date(today.getTime() - 99 * 24 * 60 * 60 * 1000)
|
|
||||||
: new Date(today.getFullYear(), today.getMonth(), 1);
|
|
||||||
const endDate = new Date(today.getTime() + 24 * 60 * 60 * 1000);
|
|
||||||
return `start_date=${startDate.toISOString().split("T")[0]}&end_date=${
|
|
||||||
endDate.toISOString().split("T")[0]
|
|
||||||
}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
static errorIsOpenAiError(
|
|
||||||
error: AxiosError
|
|
||||||
): error is AxiosError<OpenAIError> {
|
|
||||||
const data = error.response?.data as any;
|
|
||||||
return data?.error?.type;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,360 +0,0 @@
|
|||||||
/* Manages OpenAI API keys. Tracks usage, disables expired keys, and provides
|
|
||||||
round-robin access to keys. Keys are stored in the OPENAI_KEY environment
|
|
||||||
variable as a comma-separated list of keys. */
|
|
||||||
import crypto from "crypto";
|
|
||||||
import fs from "fs";
|
|
||||||
import http from "http";
|
|
||||||
import path from "path";
|
|
||||||
import { KeyProvider, Key, Model } from "../index";
|
|
||||||
import { config } from "../../config";
|
|
||||||
import { logger } from "../../logger";
|
|
||||||
import { OpenAIKeyChecker } from "./checker";
|
|
||||||
|
|
||||||
export type OpenAIModel = "gpt-3.5-turbo" | "gpt-4";
|
|
||||||
export const OPENAI_SUPPORTED_MODELS: readonly OpenAIModel[] = [
|
|
||||||
"gpt-3.5-turbo",
|
|
||||||
"gpt-4",
|
|
||||||
] as const;
|
|
||||||
|
|
||||||
export interface OpenAIKey extends Key {
|
|
||||||
readonly service: "openai";
|
|
||||||
/** The current usage of this key. */
|
|
||||||
usage: number;
|
|
||||||
/** Threshold at which a warning email will be sent by OpenAI. */
|
|
||||||
softLimit: number;
|
|
||||||
/** Threshold at which the key will be disabled because it has reached the user-defined limit. */
|
|
||||||
hardLimit: number;
|
|
||||||
/** The maximum quota allocated to this key by OpenAI. */
|
|
||||||
systemHardLimit: number;
|
|
||||||
/** The time at which this key was last rate limited. */
|
|
||||||
rateLimitedAt: number;
|
|
||||||
/**
|
|
||||||
* Last known X-RateLimit-Requests-Reset header from OpenAI, converted to a
|
|
||||||
* number.
|
|
||||||
* Formatted as a `\d+(m|s)` string denoting the time until the limit resets.
|
|
||||||
* Specifically, it seems to indicate the time until the key's quota will be
|
|
||||||
* fully restored; the key may be usable before this time as the limit is a
|
|
||||||
* rolling window.
|
|
||||||
*
|
|
||||||
* Requests which return a 429 do not count against the quota.
|
|
||||||
*
|
|
||||||
* Requests which fail for other reasons (e.g. 401) count against the quota.
|
|
||||||
*/
|
|
||||||
rateLimitRequestsReset: number;
|
|
||||||
/**
|
|
||||||
* Last known X-RateLimit-Tokens-Reset header from OpenAI, converted to a
|
|
||||||
* number.
|
|
||||||
* Appears to follow the same format as `rateLimitRequestsReset`.
|
|
||||||
*
|
|
||||||
* Requests which fail do not count against the quota as they do not consume
|
|
||||||
* tokens.
|
|
||||||
*/
|
|
||||||
rateLimitTokensReset: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
export type OpenAIKeyUpdate = Omit<
|
|
||||||
Partial<OpenAIKey>,
|
|
||||||
"key" | "hash" | "lastUsed" | "lastChecked" | "promptCount"
|
|
||||||
>;
|
|
||||||
|
|
||||||
export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
|
|
||||||
readonly service = "openai" as const;
|
|
||||||
|
|
||||||
private keys: OpenAIKey[] = [];
|
|
||||||
private checker?: OpenAIKeyChecker;
|
|
||||||
private log = logger.child({ module: "key-provider", service: this.service });
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
const keyString = config.openaiKey?.trim();
|
|
||||||
if (!keyString) {
|
|
||||||
this.log.warn("OPENAI_KEY is not set. OpenAI API will not be available.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let bareKeys: string[];
|
|
||||||
bareKeys = keyString.split(",").map((k) => k.trim());
|
|
||||||
bareKeys = [...new Set(bareKeys)];
|
|
||||||
for (const k of bareKeys) {
|
|
||||||
const newKey = {
|
|
||||||
key: k,
|
|
||||||
service: "openai" as const,
|
|
||||||
isGpt4: false,
|
|
||||||
isTrial: false,
|
|
||||||
isDisabled: false,
|
|
||||||
softLimit: 0,
|
|
||||||
hardLimit: 0,
|
|
||||||
systemHardLimit: 0,
|
|
||||||
usage: 0,
|
|
||||||
lastUsed: 0,
|
|
||||||
lastChecked: 0,
|
|
||||||
promptCount: 0,
|
|
||||||
hash: `oai-${crypto
|
|
||||||
.createHash("sha256")
|
|
||||||
.update(k)
|
|
||||||
.digest("hex")
|
|
||||||
.slice(0, 8)}`,
|
|
||||||
rateLimitedAt: 0,
|
|
||||||
rateLimitRequestsReset: 0,
|
|
||||||
rateLimitTokensReset: 0,
|
|
||||||
};
|
|
||||||
this.keys.push(newKey);
|
|
||||||
}
|
|
||||||
this.log.info({ keyCount: this.keys.length }, "Loaded OpenAI keys.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public init() {
|
|
||||||
if (config.checkKeys) {
|
|
||||||
this.checker = new OpenAIKeyChecker(this.keys, this.update.bind(this));
|
|
||||||
this.checker.start();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a list of all keys, with the key field removed.
|
|
||||||
* Don't mutate returned keys, use a KeyPool method instead.
|
|
||||||
**/
|
|
||||||
public list() {
|
|
||||||
return this.keys.map((key) => {
|
|
||||||
return Object.freeze({
|
|
||||||
...key,
|
|
||||||
key: undefined,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
public get(model: Model) {
|
|
||||||
const needGpt4 = model.startsWith("gpt-4");
|
|
||||||
const availableKeys = this.keys.filter(
|
|
||||||
(key) => !key.isDisabled && (!needGpt4 || key.isGpt4)
|
|
||||||
);
|
|
||||||
if (availableKeys.length === 0) {
|
|
||||||
let message = needGpt4
|
|
||||||
? "No active OpenAI keys available."
|
|
||||||
: "No GPT-4 keys available. Try selecting a non-GPT-4 model.";
|
|
||||||
throw new Error(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select a key, from highest priority to lowest priority:
|
|
||||||
// 1. Keys which are not rate limited
|
|
||||||
// a. We ignore rate limits from over a minute ago
|
|
||||||
// b. If all keys were rate limited in the last minute, select the
|
|
||||||
// least recently rate limited key
|
|
||||||
// 2. Keys which are trials
|
|
||||||
// 3. Keys which have not been used in the longest time
|
|
||||||
|
|
||||||
const now = Date.now();
|
|
||||||
const rateLimitThreshold = 60 * 1000;
|
|
||||||
|
|
||||||
const keysByPriority = availableKeys.sort((a, b) => {
|
|
||||||
const aRateLimited = now - a.rateLimitedAt < rateLimitThreshold;
|
|
||||||
const bRateLimited = now - b.rateLimitedAt < rateLimitThreshold;
|
|
||||||
|
|
||||||
if (aRateLimited && !bRateLimited) return 1;
|
|
||||||
if (!aRateLimited && bRateLimited) return -1;
|
|
||||||
if (aRateLimited && bRateLimited) {
|
|
||||||
return a.rateLimitedAt - b.rateLimitedAt;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (a.isTrial && !b.isTrial) return -1;
|
|
||||||
if (!a.isTrial && b.isTrial) return 1;
|
|
||||||
|
|
||||||
return a.lastUsed - b.lastUsed;
|
|
||||||
});
|
|
||||||
|
|
||||||
const selectedKey = keysByPriority[0];
|
|
||||||
selectedKey.lastUsed = now;
|
|
||||||
|
|
||||||
// When a key is selected, we rate-limit it for a brief period of time to
|
|
||||||
// prevent the queue processor from immediately flooding it with requests
|
|
||||||
// while the initial request is still being processed (which is when we will
|
|
||||||
// get new rate limit headers).
|
|
||||||
// Instead, we will let a request through every second until the key
|
|
||||||
// becomes fully saturated and locked out again.
|
|
||||||
selectedKey.rateLimitedAt = now;
|
|
||||||
selectedKey.rateLimitRequestsReset = 1000;
|
|
||||||
return { ...selectedKey };
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Called by the key checker to update key information. */
|
|
||||||
public update(keyHash: string, update: OpenAIKeyUpdate) {
|
|
||||||
const keyFromPool = this.keys.find((k) => k.hash === keyHash)!;
|
|
||||||
Object.assign(keyFromPool, { ...update, lastChecked: Date.now() });
|
|
||||||
// this.writeKeyStatus();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Disables a key, or does nothing if the key isn't in this pool. */
|
|
||||||
public disable(key: Key) {
|
|
||||||
const keyFromPool = this.keys.find((k) => k.key === key.key);
|
|
||||||
if (!keyFromPool || keyFromPool.isDisabled) return;
|
|
||||||
keyFromPool.isDisabled = true;
|
|
||||||
// If it's disabled just set the usage to the hard limit so it doesn't
|
|
||||||
// mess with the aggregate usage.
|
|
||||||
keyFromPool.usage = keyFromPool.hardLimit;
|
|
||||||
this.log.warn({ key: key.hash }, "Key disabled");
|
|
||||||
}
|
|
||||||
|
|
||||||
public available() {
|
|
||||||
return this.keys.filter((k) => !k.isDisabled).length;
|
|
||||||
}
|
|
||||||
|
|
||||||
public anyUnchecked() {
|
|
||||||
return !!config.checkKeys && this.keys.some((key) => !key.lastChecked);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a model, returns the period until a key will be available to service
|
|
||||||
* the request, or returns 0 if a key is ready immediately.
|
|
||||||
*/
|
|
||||||
public getLockoutPeriod(model: Model = "gpt-4"): number {
|
|
||||||
const needGpt4 = model.startsWith("gpt-4");
|
|
||||||
const activeKeys = this.keys.filter(
|
|
||||||
(key) => !key.isDisabled && (!needGpt4 || key.isGpt4)
|
|
||||||
);
|
|
||||||
|
|
||||||
if (activeKeys.length === 0) {
|
|
||||||
// If there are no active keys for this model we can't fulfill requests.
|
|
||||||
// We'll return 0 to let the request through and return an error,
|
|
||||||
// otherwise the request will be stuck in the queue forever.
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A key is rate-limited if its `rateLimitedAt` plus the greater of its
|
|
||||||
// `rateLimitRequestsReset` and `rateLimitTokensReset` is after the
|
|
||||||
// current time.
|
|
||||||
|
|
||||||
// If there are any keys that are not rate-limited, we can fulfill requests.
|
|
||||||
const now = Date.now();
|
|
||||||
const rateLimitedKeys = activeKeys.filter((key) => {
|
|
||||||
const resetTime = Math.max(
|
|
||||||
key.rateLimitRequestsReset,
|
|
||||||
key.rateLimitTokensReset
|
|
||||||
);
|
|
||||||
return now < key.rateLimitedAt + resetTime;
|
|
||||||
}).length;
|
|
||||||
const anyNotRateLimited = rateLimitedKeys < activeKeys.length;
|
|
||||||
|
|
||||||
if (anyNotRateLimited) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If all keys are rate-limited, return the time until the first key is
|
|
||||||
// ready.
|
|
||||||
const timeUntilFirstReady = Math.min(
|
|
||||||
...activeKeys.map((key) => {
|
|
||||||
const resetTime = Math.max(
|
|
||||||
key.rateLimitRequestsReset,
|
|
||||||
key.rateLimitTokensReset
|
|
||||||
);
|
|
||||||
return key.rateLimitedAt + resetTime - now;
|
|
||||||
})
|
|
||||||
);
|
|
||||||
return timeUntilFirstReady;
|
|
||||||
}
|
|
||||||
|
|
||||||
public markRateLimited(keyHash: string) {
|
|
||||||
this.log.warn({ key: keyHash }, "Key rate limited");
|
|
||||||
const key = this.keys.find((k) => k.hash === keyHash)!;
|
|
||||||
key.rateLimitedAt = Date.now();
|
|
||||||
}
|
|
||||||
|
|
||||||
public incrementPrompt(keyHash?: string) {
|
|
||||||
const key = this.keys.find((k) => k.hash === keyHash);
|
|
||||||
if (!key) return;
|
|
||||||
key.promptCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
public updateRateLimits(keyHash: string, headers: http.IncomingHttpHeaders) {
|
|
||||||
const key = this.keys.find((k) => k.hash === keyHash)!;
|
|
||||||
const requestsReset = headers["x-ratelimit-reset-requests"];
|
|
||||||
const tokensReset = headers["x-ratelimit-reset-tokens"];
|
|
||||||
|
|
||||||
// Sometimes OpenAI only sends one of the two rate limit headers, it's
|
|
||||||
// unclear why.
|
|
||||||
|
|
||||||
if (requestsReset && typeof requestsReset === "string") {
|
|
||||||
this.log.info(
|
|
||||||
{ key: key.hash, requestsReset },
|
|
||||||
`Updating rate limit requests reset time`
|
|
||||||
);
|
|
||||||
key.rateLimitRequestsReset = getResetDurationMillis(requestsReset);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tokensReset && typeof tokensReset === "string") {
|
|
||||||
this.log.info(
|
|
||||||
{ key: key.hash, tokensReset },
|
|
||||||
`Updating rate limit tokens reset time`
|
|
||||||
);
|
|
||||||
key.rateLimitTokensReset = getResetDurationMillis(tokensReset);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!requestsReset && !tokensReset) {
|
|
||||||
this.log.warn(
|
|
||||||
{ key: key.hash },
|
|
||||||
`No rate limit headers in OpenAI response; skipping update`
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the remaining aggregate quota for all keys as a percentage. */
|
|
||||||
public remainingQuota({ gpt4 }: { gpt4: boolean } = { gpt4: false }): number {
|
|
||||||
const keys = this.keys.filter((k) => k.isGpt4 === gpt4);
|
|
||||||
if (keys.length === 0) return 0;
|
|
||||||
|
|
||||||
const totalUsage = keys.reduce((acc, key) => {
|
|
||||||
// Keys can slightly exceed their quota
|
|
||||||
return acc + Math.min(key.usage, key.hardLimit);
|
|
||||||
}, 0);
|
|
||||||
const totalLimit = keys.reduce((acc, { hardLimit }) => acc + hardLimit, 0);
|
|
||||||
|
|
||||||
return 1 - totalUsage / totalLimit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns used and available usage in USD. */
|
|
||||||
public usageInUsd({ gpt4 }: { gpt4: boolean } = { gpt4: false }): string {
|
|
||||||
const keys = this.keys.filter((k) => k.isGpt4 === gpt4);
|
|
||||||
if (keys.length === 0) return "???";
|
|
||||||
|
|
||||||
const totalHardLimit = keys.reduce(
|
|
||||||
(acc, { hardLimit }) => acc + hardLimit,
|
|
||||||
0
|
|
||||||
);
|
|
||||||
const totalUsage = keys.reduce((acc, key) => {
|
|
||||||
// Keys can slightly exceed their quota
|
|
||||||
return acc + Math.min(key.usage, key.hardLimit);
|
|
||||||
}, 0);
|
|
||||||
|
|
||||||
return `$${totalUsage.toFixed(2)} / $${totalHardLimit.toFixed(2)}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Writes key status to disk. */
|
|
||||||
// public writeKeyStatus() {
|
|
||||||
// const keys = this.keys.map((key) => ({
|
|
||||||
// key: key.key,
|
|
||||||
// isGpt4: key.isGpt4,
|
|
||||||
// usage: key.usage,
|
|
||||||
// hardLimit: key.hardLimit,
|
|
||||||
// isDisabled: key.isDisabled,
|
|
||||||
// }));
|
|
||||||
// fs.writeFileSync(
|
|
||||||
// path.join(__dirname, "..", "keys.json"),
|
|
||||||
// JSON.stringify(keys, null, 2)
|
|
||||||
// );
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Converts reset string ("21.0032s" or "21ms") to a number of milliseconds.
|
|
||||||
* Result is clamped to 10s even though the API returns up to 60s, because the
|
|
||||||
* API returns the time until the entire quota is reset, even if a key may be
|
|
||||||
* able to fulfill requests before then due to partial resets.
|
|
||||||
**/
|
|
||||||
function getResetDurationMillis(resetDuration?: string): number {
|
|
||||||
const match = resetDuration?.match(/(\d+(\.\d+)?)(s|ms)/);
|
|
||||||
if (match) {
|
|
||||||
const [, time, , unit] = match;
|
|
||||||
const value = parseFloat(time);
|
|
||||||
const result = unit === "s" ? value * 1000 : value;
|
|
||||||
return Math.min(result, 10000);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,20 @@
|
|||||||
import pino from "pino";
|
import pino from "pino";
|
||||||
import { config } from "./config";
|
import { config } from "./config";
|
||||||
|
|
||||||
|
const transport =
|
||||||
|
process.env.NODE_ENV === "production"
|
||||||
|
? undefined
|
||||||
|
: {
|
||||||
|
target: "pino-pretty",
|
||||||
|
options: {
|
||||||
|
singleLine: true,
|
||||||
|
messageFormat: "{if module}\x1b[90m[{module}] \x1b[39m{end}{msg}",
|
||||||
|
ignore: "module",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
export const logger = pino({
|
export const logger = pino({
|
||||||
level: config.logLevel,
|
level: config.logLevel,
|
||||||
|
base: { pid: process.pid, module: "server" },
|
||||||
|
transport,
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import { Request, RequestHandler, Router } from "express";
|
import { Request, RequestHandler, Router } from "express";
|
||||||
import * as http from "http";
|
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { logger } from "../logger";
|
import { logger } from "../logger";
|
||||||
@@ -11,8 +10,7 @@ import {
|
|||||||
addAnthropicPreamble,
|
addAnthropicPreamble,
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
languageFilter,
|
createOnProxyReqHandler,
|
||||||
limitOutputTokens,
|
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import {
|
||||||
ProxyResHandlerWithBody,
|
ProxyResHandlerWithBody,
|
||||||
@@ -41,6 +39,9 @@ const getModelsResponse = () => {
|
|||||||
"claude-instant-v1.1",
|
"claude-instant-v1.1",
|
||||||
"claude-instant-v1.1-100k",
|
"claude-instant-v1.1-100k",
|
||||||
"claude-instant-v1.0",
|
"claude-instant-v1.0",
|
||||||
|
"claude-2",
|
||||||
|
"claude-2.0",
|
||||||
|
"claude-2.1",
|
||||||
];
|
];
|
||||||
|
|
||||||
const models = claudeVariants.map((id) => ({
|
const models = claudeVariants.map((id) => ({
|
||||||
@@ -63,29 +64,6 @@ const handleModelRequest: RequestHandler = (_req, res) => {
|
|||||||
res.status(200).json(getModelsResponse());
|
res.status(200).json(getModelsResponse());
|
||||||
};
|
};
|
||||||
|
|
||||||
const rewriteAnthropicRequest = (
|
|
||||||
proxyReq: http.ClientRequest,
|
|
||||||
req: Request,
|
|
||||||
res: http.ServerResponse
|
|
||||||
) => {
|
|
||||||
const rewriterPipeline = [
|
|
||||||
addKey,
|
|
||||||
addAnthropicPreamble,
|
|
||||||
languageFilter,
|
|
||||||
limitOutputTokens,
|
|
||||||
finalizeBody,
|
|
||||||
];
|
|
||||||
|
|
||||||
try {
|
|
||||||
for (const rewriter of rewriterPipeline) {
|
|
||||||
rewriter(proxyReq, req, res, {});
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
req.log.error(error, "Error while executing proxy rewriter");
|
|
||||||
proxyReq.destroy(error as Error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Only used for non-streaming requests. */
|
/** Only used for non-streaming requests. */
|
||||||
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
_proxyRes,
|
_proxyRes,
|
||||||
@@ -102,10 +80,15 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!req.originalUrl.includes("/v1/complete")) {
|
if (req.inboundApi === "openai") {
|
||||||
req.log.info("Transforming Anthropic response to OpenAI format");
|
req.log.info("Transforming Anthropic response to OpenAI format");
|
||||||
body = transformAnthropicResponse(body);
|
body = transformAnthropicResponse(body, req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
res.status(200).json(body);
|
res.status(200).json(body);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -116,17 +99,19 @@ const anthropicResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
* on-the-fly.
|
* on-the-fly.
|
||||||
*/
|
*/
|
||||||
function transformAnthropicResponse(
|
function transformAnthropicResponse(
|
||||||
anthropicBody: Record<string, any>
|
anthropicBody: Record<string, any>,
|
||||||
|
req: Request
|
||||||
): Record<string, any> {
|
): Record<string, any> {
|
||||||
|
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
return {
|
return {
|
||||||
id: "ant-" + anthropicBody.log_id,
|
id: "ant-" + anthropicBody.log_id,
|
||||||
object: "chat.completion",
|
object: "chat.completion",
|
||||||
created: Date.now(),
|
created: Date.now(),
|
||||||
model: anthropicBody.model,
|
model: anthropicBody.model,
|
||||||
usage: {
|
usage: {
|
||||||
prompt_tokens: 0,
|
prompt_tokens: req.promptTokens,
|
||||||
completion_tokens: 0,
|
completion_tokens: req.outputTokens,
|
||||||
total_tokens: 0,
|
total_tokens: totalTokens,
|
||||||
},
|
},
|
||||||
choices: [
|
choices: [
|
||||||
{
|
{
|
||||||
@@ -141,54 +126,63 @@ function transformAnthropicResponse(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const anthropicProxy = createQueueMiddleware(
|
const anthropicProxy = createQueueMiddleware({
|
||||||
createProxyMiddleware({
|
proxyMiddleware: createProxyMiddleware({
|
||||||
target: "https://api.anthropic.com",
|
target: "https://api.anthropic.com",
|
||||||
changeOrigin: true,
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
on: {
|
on: {
|
||||||
proxyReq: rewriteAnthropicRequest,
|
proxyReq: createOnProxyReqHandler({
|
||||||
|
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
|
||||||
|
}),
|
||||||
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
|
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
|
||||||
error: handleProxyError,
|
error: handleProxyError,
|
||||||
},
|
},
|
||||||
selfHandleResponse: true,
|
|
||||||
logger,
|
|
||||||
pathRewrite: {
|
pathRewrite: {
|
||||||
// Send OpenAI-compat requests to the real Anthropic endpoint.
|
// Send OpenAI-compat requests to the real Anthropic endpoint.
|
||||||
"^/v1/chat/completions": "/v1/complete",
|
"^/v1/chat/completions": "/v1/complete",
|
||||||
},
|
},
|
||||||
})
|
}),
|
||||||
);
|
});
|
||||||
|
|
||||||
const anthropicRouter = Router();
|
const anthropicRouter = Router();
|
||||||
// Fix paths because clients don't consistently use the /v1 prefix.
|
|
||||||
anthropicRouter.use((req, _res, next) => {
|
|
||||||
if (!req.path.startsWith("/v1/")) {
|
|
||||||
req.url = `/v1${req.url}`;
|
|
||||||
}
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
anthropicRouter.get("/v1/models", handleModelRequest);
|
anthropicRouter.get("/v1/models", handleModelRequest);
|
||||||
|
// Native Anthropic chat completion endpoint.
|
||||||
anthropicRouter.post(
|
anthropicRouter.post(
|
||||||
"/v1/complete",
|
"/v1/complete",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({ inApi: "anthropic", outApi: "anthropic" }),
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "anthropic",
|
||||||
|
outApi: "anthropic",
|
||||||
|
service: "anthropic",
|
||||||
|
}),
|
||||||
anthropicProxy
|
anthropicProxy
|
||||||
);
|
);
|
||||||
// OpenAI-to-Anthropic compatibility endpoint.
|
// OpenAI-to-Anthropic compatibility endpoint.
|
||||||
anthropicRouter.post(
|
anthropicRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({ inApi: "openai", outApi: "anthropic" }),
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "anthropic", service: "anthropic" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
),
|
||||||
anthropicProxy
|
anthropicProxy
|
||||||
);
|
);
|
||||||
// Redirect browser requests to the homepage.
|
|
||||||
anthropicRouter.get("*", (req, res, next) => {
|
function maybeReassignModel(req: Request) {
|
||||||
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
|
const model = req.body.model;
|
||||||
if (isBrowser) {
|
if (!model.startsWith("gpt-")) return;
|
||||||
res.redirect("/");
|
|
||||||
} else {
|
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
|
||||||
next();
|
const contextSize = req.promptTokens! + req.outputTokens!;
|
||||||
|
if (contextSize > 8500) {
|
||||||
|
req.log.debug(
|
||||||
|
{ model: bigModel, contextSize },
|
||||||
|
"Using Claude 100k model for OpenAI-to-Anthropic request"
|
||||||
|
);
|
||||||
|
req.body.model = bigModel;
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
|
||||||
export const anthropic = anthropicRouter;
|
export const anthropic = anthropicRouter;
|
||||||
|
|||||||
@@ -1,211 +0,0 @@
|
|||||||
/**
|
|
||||||
* Basic user management. Handles creation and tracking of proxy users, personal
|
|
||||||
* access tokens, and quota management. Supports in-memory and Firebase Realtime
|
|
||||||
* Database persistence stores.
|
|
||||||
*
|
|
||||||
* Users are identified solely by their personal access token. The token is
|
|
||||||
* used to authenticate the user for all proxied requests.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import admin from "firebase-admin";
|
|
||||||
import { v4 as uuid } from "uuid";
|
|
||||||
import { config, getFirebaseApp } from "../../config";
|
|
||||||
import { logger } from "../../logger";
|
|
||||||
|
|
||||||
export interface User {
|
|
||||||
/** The user's personal access token. */
|
|
||||||
token: string;
|
|
||||||
/** The IP addresses the user has connected from. */
|
|
||||||
ip: string[];
|
|
||||||
/** The user's privilege level. */
|
|
||||||
type: UserType;
|
|
||||||
/** The number of prompts the user has made. */
|
|
||||||
promptCount: number;
|
|
||||||
/** The number of tokens the user has consumed. Not yet implemented. */
|
|
||||||
tokenCount: number;
|
|
||||||
/** The time at which the user was created. */
|
|
||||||
createdAt: number;
|
|
||||||
/** The time at which the user last connected. */
|
|
||||||
lastUsedAt?: number;
|
|
||||||
/** The time at which the user was disabled, if applicable. */
|
|
||||||
disabledAt?: number;
|
|
||||||
/** The reason for which the user was disabled, if applicable. */
|
|
||||||
disabledReason?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Possible privilege levels for a user.
|
|
||||||
* - `normal`: Default role. Subject to usual rate limits and quotas.
|
|
||||||
* - `special`: Special role. Higher quotas and exempt from auto-ban/lockout.
|
|
||||||
* TODO: implement auto-ban/lockout for normal users when they do naughty shit
|
|
||||||
*/
|
|
||||||
export type UserType = "normal" | "special";
|
|
||||||
|
|
||||||
type UserUpdate = Partial<User> & Pick<User, "token">;
|
|
||||||
|
|
||||||
const MAX_IPS_PER_USER = config.maxIpsPerUser;
|
|
||||||
|
|
||||||
const users: Map<string, User> = new Map();
|
|
||||||
const usersToFlush = new Set<string>();
|
|
||||||
|
|
||||||
export async function init() {
|
|
||||||
logger.info({ store: config.gatekeeperStore }, "Initializing user store...");
|
|
||||||
if (config.gatekeeperStore === "firebase_rtdb") {
|
|
||||||
await initFirebase();
|
|
||||||
}
|
|
||||||
logger.info("User store initialized.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Creates a new user and returns their token. */
|
|
||||||
export function createUser() {
|
|
||||||
const token = uuid();
|
|
||||||
users.set(token, {
|
|
||||||
token,
|
|
||||||
ip: [],
|
|
||||||
type: "normal",
|
|
||||||
promptCount: 0,
|
|
||||||
tokenCount: 0,
|
|
||||||
createdAt: Date.now(),
|
|
||||||
});
|
|
||||||
usersToFlush.add(token);
|
|
||||||
return token;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the user with the given token if they exist. */
|
|
||||||
export function getUser(token: string) {
|
|
||||||
return users.get(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a list of all users. */
|
|
||||||
export function getUsers() {
|
|
||||||
return Array.from(users.values()).map((user) => ({ ...user }));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Upserts the given user. Intended for use with the /admin API for updating
|
|
||||||
* user information via JSON. Use other functions for more specific operations.
|
|
||||||
*/
|
|
||||||
export function upsertUser(user: UserUpdate) {
|
|
||||||
const existing: User = users.get(user.token) ?? {
|
|
||||||
token: user.token,
|
|
||||||
ip: [],
|
|
||||||
type: "normal",
|
|
||||||
promptCount: 0,
|
|
||||||
tokenCount: 0,
|
|
||||||
createdAt: Date.now(),
|
|
||||||
};
|
|
||||||
|
|
||||||
users.set(user.token, {
|
|
||||||
...existing,
|
|
||||||
...user,
|
|
||||||
});
|
|
||||||
usersToFlush.add(user.token);
|
|
||||||
|
|
||||||
// Immediately schedule a flush to the database if we're using Firebase.
|
|
||||||
if (config.gatekeeperStore === "firebase_rtdb") {
|
|
||||||
setImmediate(flushUsers);
|
|
||||||
}
|
|
||||||
|
|
||||||
return users.get(user.token);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Increments the prompt count for the given user. */
|
|
||||||
export function incrementPromptCount(token: string) {
|
|
||||||
const user = users.get(token);
|
|
||||||
if (!user) return;
|
|
||||||
user.promptCount++;
|
|
||||||
usersToFlush.add(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Increments the token count for the given user by the given amount. */
|
|
||||||
export function incrementTokenCount(token: string, amount = 1) {
|
|
||||||
const user = users.get(token);
|
|
||||||
if (!user) return;
|
|
||||||
user.tokenCount += amount;
|
|
||||||
usersToFlush.add(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a user's token and IP address, authenticates the user and adds the IP
|
|
||||||
* to the user's list of IPs. Returns the user if they exist and are not
|
|
||||||
* disabled, otherwise returns undefined.
|
|
||||||
*/
|
|
||||||
export function authenticate(token: string, ip: string) {
|
|
||||||
const user = users.get(token);
|
|
||||||
if (!user || user.disabledAt) return;
|
|
||||||
if (!user.ip.includes(ip)) user.ip.push(ip);
|
|
||||||
|
|
||||||
// If too many IPs are associated with the user, disable the account.
|
|
||||||
const ipLimit =
|
|
||||||
user.type === "special" || !MAX_IPS_PER_USER ? Infinity : MAX_IPS_PER_USER;
|
|
||||||
if (user.ip.length > ipLimit) {
|
|
||||||
disableUser(token, "Too many IP addresses associated with this token.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
user.lastUsedAt = Date.now();
|
|
||||||
usersToFlush.add(token);
|
|
||||||
return user;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Disables the given user, optionally providing a reason. */
|
|
||||||
export function disableUser(token: string, reason?: string) {
|
|
||||||
const user = users.get(token);
|
|
||||||
if (!user) return;
|
|
||||||
user.disabledAt = Date.now();
|
|
||||||
user.disabledReason = reason;
|
|
||||||
usersToFlush.add(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Firebase persistence is pretend right now and just polls the in-memory
|
|
||||||
// store to sync it with Firebase when it changes. Will refactor to abstract
|
|
||||||
// persistence layer later so we can support multiple stores.
|
|
||||||
let firebaseTimeout: NodeJS.Timeout | undefined;
|
|
||||||
|
|
||||||
async function initFirebase() {
|
|
||||||
logger.info("Connecting to Firebase...");
|
|
||||||
const app = getFirebaseApp();
|
|
||||||
const db = admin.database(app);
|
|
||||||
const usersRef = db.ref("users");
|
|
||||||
const snapshot = await usersRef.once("value");
|
|
||||||
const users: Record<string, User> | null = snapshot.val();
|
|
||||||
firebaseTimeout = setInterval(flushUsers, 20 * 1000);
|
|
||||||
if (!users) {
|
|
||||||
logger.info("No users found in Firebase.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
for (const token in users) {
|
|
||||||
upsertUser(users[token]);
|
|
||||||
}
|
|
||||||
usersToFlush.clear();
|
|
||||||
const numUsers = Object.keys(users).length;
|
|
||||||
logger.info({ users: numUsers }, "Loaded users from Firebase");
|
|
||||||
}
|
|
||||||
|
|
||||||
async function flushUsers() {
|
|
||||||
const app = getFirebaseApp();
|
|
||||||
const db = admin.database(app);
|
|
||||||
const usersRef = db.ref("users");
|
|
||||||
const updates: Record<string, User> = {};
|
|
||||||
|
|
||||||
for (const token of usersToFlush) {
|
|
||||||
const user = users.get(token);
|
|
||||||
if (!user) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
updates[token] = user;
|
|
||||||
}
|
|
||||||
|
|
||||||
usersToFlush.clear();
|
|
||||||
|
|
||||||
const numUpdates = Object.keys(updates).length;
|
|
||||||
if (numUpdates === 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
await usersRef.update(updates);
|
|
||||||
logger.info(
|
|
||||||
{ users: Object.keys(updates).length },
|
|
||||||
"Flushed users to Firebase"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,218 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
|
import { v4 } from "uuid";
|
||||||
|
import { config } from "../config";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
import { createQueueMiddleware } from "./queue";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { handleProxyError } from "./middleware/common";
|
||||||
|
import {
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
signAwsRequest,
|
||||||
|
finalizeSignedRequest,
|
||||||
|
createOnProxyReqHandler,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import {
|
||||||
|
ProxyResHandlerWithBody,
|
||||||
|
createOnProxyResHandler,
|
||||||
|
} from "./middleware/response";
|
||||||
|
|
||||||
|
const LATEST_AWS_V2_MINOR_VERSION = "1";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const getModelsResponse = () => {
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config.awsCredentials) return { object: "list", data: [] };
|
||||||
|
|
||||||
|
const variants = [
|
||||||
|
"anthropic.claude-v1",
|
||||||
|
"anthropic.claude-v2",
|
||||||
|
"anthropic.claude-v2:1",
|
||||||
|
];
|
||||||
|
|
||||||
|
const models = variants.map((id) => ({
|
||||||
|
id,
|
||||||
|
object: "model",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
owned_by: "anthropic",
|
||||||
|
permission: [],
|
||||||
|
root: "claude",
|
||||||
|
parent: null,
|
||||||
|
}));
|
||||||
|
|
||||||
|
modelsCache = { object: "list", data: models };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
|
||||||
|
return modelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
|
res.status(200).json(getModelsResponse());
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Only used for non-streaming requests. */
|
||||||
|
const awsResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.promptLogging) {
|
||||||
|
const host = req.get("host");
|
||||||
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai") {
|
||||||
|
req.log.info("Transforming AWS Claude response to OpenAI format");
|
||||||
|
body = transformAwsResponse(body, req);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWS does not confirm the model in the response, so we have to add it
|
||||||
|
body.model = req.body.model;
|
||||||
|
|
||||||
|
res.status(200).json(body);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms a model response from the Anthropic API to match those from the
|
||||||
|
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||||
|
* is only used for non-streaming requests as streaming requests are handled
|
||||||
|
* on-the-fly.
|
||||||
|
*/
|
||||||
|
function transformAwsResponse(
|
||||||
|
awsBody: Record<string, any>,
|
||||||
|
req: Request
|
||||||
|
): Record<string, any> {
|
||||||
|
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
|
return {
|
||||||
|
id: "aws-" + v4(),
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Date.now(),
|
||||||
|
model: req.body.model,
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: req.promptTokens,
|
||||||
|
completion_tokens: req.outputTokens,
|
||||||
|
total_tokens: totalTokens,
|
||||||
|
},
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: awsBody.completion?.trim(),
|
||||||
|
},
|
||||||
|
finish_reason: awsBody.stop_reason,
|
||||||
|
index: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const awsProxy = createQueueMiddleware({
|
||||||
|
beforeProxy: signAwsRequest,
|
||||||
|
proxyMiddleware: createProxyMiddleware({
|
||||||
|
target: "bad-target-will-be-rewritten",
|
||||||
|
router: ({ signedRequest }) => {
|
||||||
|
if (!signedRequest) throw new Error("Must sign request before proxying");
|
||||||
|
return `${signedRequest.protocol}//${signedRequest.hostname}`;
|
||||||
|
},
|
||||||
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
|
on: {
|
||||||
|
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||||
|
proxyRes: createOnProxyResHandler([awsResponseHandler]),
|
||||||
|
error: handleProxyError,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const awsRouter = Router();
|
||||||
|
awsRouter.get("/v1/models", handleModelRequest);
|
||||||
|
// Native(ish) Anthropic chat completion endpoint.
|
||||||
|
awsRouter.post(
|
||||||
|
"/v1/complete",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "anthropic", outApi: "anthropic", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
),
|
||||||
|
awsProxy
|
||||||
|
);
|
||||||
|
// OpenAI-to-AWS Anthropic compatibility endpoint.
|
||||||
|
awsRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "anthropic", service: "aws" },
|
||||||
|
{ afterTransform: [maybeReassignModel] }
|
||||||
|
),
|
||||||
|
awsProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tries to deal with:
|
||||||
|
* - frontends sending AWS model names even when they want to use the OpenAI-
|
||||||
|
* compatible endpoint
|
||||||
|
* - frontends sending Anthropic model names that AWS doesn't recognize
|
||||||
|
* - frontends sending OpenAI model names because they expect the proxy to
|
||||||
|
* translate them
|
||||||
|
*/
|
||||||
|
function maybeReassignModel(req: Request) {
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
// If client already specified an AWS Claude model ID, use it
|
||||||
|
if (model.includes("anthropic.claude")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const pattern = /^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?$/i;
|
||||||
|
const match = model.match(pattern);
|
||||||
|
|
||||||
|
// If there's no match, return the latest v2 model
|
||||||
|
if (!match) {
|
||||||
|
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const [, , instant, , major, , minor] = match;
|
||||||
|
|
||||||
|
if (instant) {
|
||||||
|
req.body.model = "anthropic.claude-instant-v1";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's only one v1 model
|
||||||
|
if (major === "1") {
|
||||||
|
req.body.model = "anthropic.claude-v1";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to map Anthropic API v2 models to AWS v2 models
|
||||||
|
if (major === "2") {
|
||||||
|
if (minor === "0") {
|
||||||
|
req.body.model = "anthropic.claude-v2";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to latest v2 model
|
||||||
|
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const aws = awsRouter;
|
||||||
@@ -0,0 +1,128 @@
|
|||||||
|
import { RequestHandler, Router } from "express";
|
||||||
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
|
import { config } from "../config";
|
||||||
|
import { keyPool } from "../shared/key-management";
|
||||||
|
import {
|
||||||
|
ModelFamily,
|
||||||
|
AzureOpenAIModelFamily,
|
||||||
|
getAzureOpenAIModelFamily,
|
||||||
|
} from "../shared/models";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
import { KNOWN_OPENAI_MODELS } from "./openai";
|
||||||
|
import { createQueueMiddleware } from "./queue";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { handleProxyError } from "./middleware/common";
|
||||||
|
import {
|
||||||
|
addAzureKey,
|
||||||
|
createOnProxyReqHandler,
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeSignedRequest,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import {
|
||||||
|
createOnProxyResHandler,
|
||||||
|
ProxyResHandlerWithBody,
|
||||||
|
} from "./middleware/response";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
function getModelsResponse() {
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
let available = new Set<AzureOpenAIModelFamily>();
|
||||||
|
for (const key of keyPool.list()) {
|
||||||
|
if (key.isDisabled || key.service !== "azure") continue;
|
||||||
|
key.modelFamilies.forEach((family) =>
|
||||||
|
available.add(family as AzureOpenAIModelFamily)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||||
|
available = new Set([...available].filter((x) => allowed.has(x)));
|
||||||
|
|
||||||
|
const models = KNOWN_OPENAI_MODELS.map((id) => ({
|
||||||
|
id,
|
||||||
|
object: "model",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
owned_by: "azure",
|
||||||
|
permission: [
|
||||||
|
{
|
||||||
|
id: "modelperm-" + id,
|
||||||
|
object: "model_permission",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
organization: "*",
|
||||||
|
group: null,
|
||||||
|
is_blocking: false,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
root: id,
|
||||||
|
parent: null,
|
||||||
|
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
|
||||||
|
|
||||||
|
modelsCache = { object: "list", data: models };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
|
res.status(200).json(getModelsResponse());
|
||||||
|
};
|
||||||
|
|
||||||
|
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.promptLogging) {
|
||||||
|
const host = req.get("host");
|
||||||
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json(body);
|
||||||
|
};
|
||||||
|
|
||||||
|
const azureOpenAIProxy = createQueueMiddleware({
|
||||||
|
beforeProxy: addAzureKey,
|
||||||
|
proxyMiddleware: createProxyMiddleware({
|
||||||
|
target: "will be set by router",
|
||||||
|
router: (req) => {
|
||||||
|
if (!req.signedRequest) throw new Error("signedRequest not set");
|
||||||
|
const { hostname, path } = req.signedRequest;
|
||||||
|
return `https://${hostname}${path}`;
|
||||||
|
},
|
||||||
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
|
on: {
|
||||||
|
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
|
||||||
|
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
|
||||||
|
error: handleProxyError,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const azureOpenAIRouter = Router();
|
||||||
|
azureOpenAIRouter.get("/v1/models", handleModelRequest);
|
||||||
|
azureOpenAIRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai",
|
||||||
|
outApi: "openai",
|
||||||
|
service: "azure",
|
||||||
|
}),
|
||||||
|
azureOpenAIProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
export const azure = azureOpenAIRouter;
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
/**
|
||||||
|
* Authenticates RisuAI.xyz users using a special x-risu-tk header provided by
|
||||||
|
* RisuAI.xyz. This lets us rate limit and limit queue concurrency properly,
|
||||||
|
* since otherwise RisuAI.xyz users share the same IP address and can't be
|
||||||
|
* distinguished.
|
||||||
|
* Contributors: @kwaroran
|
||||||
|
*/
|
||||||
|
import crypto from "crypto";
|
||||||
|
import { Request, Response, NextFunction } from "express";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
|
||||||
|
const log = logger.child({ module: "check-risu-token" });
|
||||||
|
|
||||||
|
const RISUAI_PUBLIC_KEY = `
|
||||||
|
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArEXBmHQfy/YdNIu9lfNC
|
||||||
|
xHbVwb2aYx07pBEmqQJtvVEOISj80fASxg+cMJH+/0a/Z4gQgzUJl0HszRpMXAfu
|
||||||
|
wmRoetedyC/6CLraHke0Qad/AEHAKwG9A+NwsHRv/cDfP8euAr20cnOyVa79bZsl
|
||||||
|
1wlHYQQGo+ve+P/FXtjLGJ/KZYr479F5jkIRKZxPE8mRmkhAVS/u+18QM94BzfoI
|
||||||
|
0LlbwvvCHe18QSX6viDK+HsqhhyYDh+0FgGNJw6xKYLdExbQt77FSukH7NaJmVAs
|
||||||
|
kYuIJbnAGw5Oq0L6dXFW2DFwlcLz51kPVOmDc159FsQjyuPnta7NiZAANS8KM1CJ
|
||||||
|
pwIDAQAB`;
|
||||||
|
let IMPORTED_RISU_KEY: CryptoKey | null = null;
|
||||||
|
|
||||||
|
type RisuToken = { id: string; expiresIn: number };
|
||||||
|
type SignedToken = { data: RisuToken; sig: string };
|
||||||
|
|
||||||
|
(async () => {
|
||||||
|
try {
|
||||||
|
log.debug("Importing Risu public key");
|
||||||
|
IMPORTED_RISU_KEY = await crypto.subtle.importKey(
|
||||||
|
"spki",
|
||||||
|
Buffer.from(RISUAI_PUBLIC_KEY.replace(/\s/g, ""), "base64"),
|
||||||
|
{ name: "RSASSA-PKCS1-v1_5", hash: "SHA-256" },
|
||||||
|
true,
|
||||||
|
["verify"]
|
||||||
|
);
|
||||||
|
log.debug("Imported Risu public key");
|
||||||
|
} catch (err) {
|
||||||
|
log.warn({ error: err.message }, "Error importing Risu public key");
|
||||||
|
IMPORTED_RISU_KEY = null;
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
|
||||||
|
export async function checkRisuToken(
|
||||||
|
req: Request,
|
||||||
|
_res: Response,
|
||||||
|
next: NextFunction
|
||||||
|
) {
|
||||||
|
let header = req.header("x-risu-tk") || null;
|
||||||
|
if (!header || !IMPORTED_RISU_KEY) {
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { valid, data } = await validCheck(header);
|
||||||
|
|
||||||
|
if (!valid || !data) {
|
||||||
|
req.log.warn(
|
||||||
|
{ token: header, data },
|
||||||
|
"Invalid RisuAI token; using IP instead"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
req.log.info("RisuAI token validated");
|
||||||
|
req.risuToken = String(data.id);
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
req.log.warn(
|
||||||
|
{ error: err.message },
|
||||||
|
"Error validating RisuAI token; using IP instead"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
|
||||||
|
async function validCheck(header: string) {
|
||||||
|
let tk: SignedToken;
|
||||||
|
try {
|
||||||
|
tk = JSON.parse(
|
||||||
|
Buffer.from(decodeURIComponent(header), "base64").toString("utf-8")
|
||||||
|
);
|
||||||
|
} catch (err) {
|
||||||
|
log.warn({ error: err.message }, "Provided unparseable RisuAI token");
|
||||||
|
return { valid: false };
|
||||||
|
}
|
||||||
|
const data: RisuToken = tk.data;
|
||||||
|
const sig = Buffer.from(tk.sig, "base64");
|
||||||
|
|
||||||
|
if (data.expiresIn < Math.floor(Date.now() / 1000)) {
|
||||||
|
log.warn({ token: header }, "Provided expired RisuAI token");
|
||||||
|
return { valid: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
const valid = await crypto.subtle.verify(
|
||||||
|
{ name: "RSASSA-PKCS1-v1_5" },
|
||||||
|
IMPORTED_RISU_KEY!,
|
||||||
|
sig,
|
||||||
|
Buffer.from(JSON.stringify(data))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!valid) {
|
||||||
|
log.warn({ token: header }, "RisuAI token failed signature check");
|
||||||
|
}
|
||||||
|
|
||||||
|
return { valid, data };
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import type { Request, RequestHandler } from "express";
|
import type { Request, RequestHandler } from "express";
|
||||||
import { config } from "../../config";
|
import { config } from "../config";
|
||||||
import { authenticate, getUser } from "./user-store";
|
import { authenticate, getUser } from "../shared/users/user-store";
|
||||||
|
|
||||||
const GATEKEEPER = config.gatekeeper;
|
const GATEKEEPER = config.gatekeeper;
|
||||||
const PROXY_KEY = config.proxyKey;
|
const PROXY_KEY = config.proxyKey;
|
||||||
@@ -33,7 +33,7 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
|
|||||||
// TODO: Generate anonymous users based on IP address for public or proxy_key
|
// TODO: Generate anonymous users based on IP address for public or proxy_key
|
||||||
// modes so that all middleware can assume a user of some sort is present.
|
// modes so that all middleware can assume a user of some sort is present.
|
||||||
|
|
||||||
if (token === ADMIN_KEY) {
|
if (ADMIN_KEY && token === ADMIN_KEY) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,19 +46,22 @@ export const gatekeeper: RequestHandler = (req, res, next) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (GATEKEEPER === "user_token" && token) {
|
if (GATEKEEPER === "user_token" && token) {
|
||||||
const user = authenticate(token, req.ip);
|
const { user, result } = authenticate(token, req.ip);
|
||||||
if (user) {
|
|
||||||
req.user = user;
|
switch (result) {
|
||||||
return next();
|
case "success":
|
||||||
} else {
|
req.user = user;
|
||||||
const maybeBannedUser = getUser(token);
|
return next();
|
||||||
if (maybeBannedUser?.disabledAt) {
|
case "limited":
|
||||||
return res.status(403).json({
|
return res.status(403).json({
|
||||||
error: `Forbidden: ${
|
error: `Forbidden: no more IPs can authenticate with this token`,
|
||||||
maybeBannedUser.disabledReason || "Token disabled"
|
|
||||||
}`,
|
|
||||||
});
|
});
|
||||||
}
|
case "disabled":
|
||||||
|
const bannedUser = getUser(token);
|
||||||
|
if (bannedUser?.disabledAt) {
|
||||||
|
const reason = bannedUser.disabledReason || "Token disabled";
|
||||||
|
return res.status(403).json({ error: `Forbidden: ${reason}` });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,114 +0,0 @@
|
|||||||
/* Pretends to be a KoboldAI API endpoint and translates incoming Kobold
|
|
||||||
requests to OpenAI API equivalents. */
|
|
||||||
|
|
||||||
import { Request, Response, Router } from "express";
|
|
||||||
import http from "http";
|
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
|
||||||
import { config } from "../config";
|
|
||||||
import { logger } from "../logger";
|
|
||||||
import { ipLimiter } from "./rate-limit";
|
|
||||||
import { handleProxyError } from "./middleware/common";
|
|
||||||
import {
|
|
||||||
addKey,
|
|
||||||
createPreprocessorMiddleware,
|
|
||||||
finalizeBody,
|
|
||||||
languageFilter,
|
|
||||||
limitOutputTokens,
|
|
||||||
transformKoboldPayload,
|
|
||||||
} from "./middleware/request";
|
|
||||||
import {
|
|
||||||
createOnProxyResHandler,
|
|
||||||
ProxyResHandlerWithBody,
|
|
||||||
} from "./middleware/response";
|
|
||||||
|
|
||||||
export const handleModelRequest = (_req: Request, res: Response) => {
|
|
||||||
res.status(200).json({ result: "Connected to OpenAI reverse proxy" });
|
|
||||||
};
|
|
||||||
|
|
||||||
export const handleSoftPromptsRequest = (_req: Request, res: Response) => {
|
|
||||||
res.status(200).json({ soft_prompts_list: [] });
|
|
||||||
};
|
|
||||||
|
|
||||||
const rewriteRequest = (
|
|
||||||
proxyReq: http.ClientRequest,
|
|
||||||
req: Request,
|
|
||||||
res: Response
|
|
||||||
) => {
|
|
||||||
if (config.queueMode !== "none") {
|
|
||||||
const msg = `Queueing is enabled on this proxy instance and is incompatible with the KoboldAI endpoint. Use the OpenAI endpoint instead.`;
|
|
||||||
proxyReq.destroy(new Error(msg));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
req.body.stream = false;
|
|
||||||
const rewriterPipeline = [
|
|
||||||
addKey,
|
|
||||||
transformKoboldPayload,
|
|
||||||
languageFilter,
|
|
||||||
limitOutputTokens,
|
|
||||||
finalizeBody,
|
|
||||||
];
|
|
||||||
|
|
||||||
try {
|
|
||||||
for (const rewriter of rewriterPipeline) {
|
|
||||||
rewriter(proxyReq, req, res, {});
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
logger.error(error, "Error while executing proxy rewriter");
|
|
||||||
proxyReq.destroy(error as Error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const koboldResponseHandler: ProxyResHandlerWithBody = async (
|
|
||||||
_proxyRes,
|
|
||||||
req,
|
|
||||||
res,
|
|
||||||
body
|
|
||||||
) => {
|
|
||||||
if (typeof body !== "object") {
|
|
||||||
throw new Error("Expected body to be an object");
|
|
||||||
}
|
|
||||||
|
|
||||||
const koboldResponse = {
|
|
||||||
results: [{ text: body.choices[0].message.content }],
|
|
||||||
model: body.model,
|
|
||||||
...(config.promptLogging && {
|
|
||||||
proxy_note: `Prompt logging is enabled on this proxy instance. See ${req.get(
|
|
||||||
"host"
|
|
||||||
)} for more information.`,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
res.send(JSON.stringify(koboldResponse));
|
|
||||||
};
|
|
||||||
|
|
||||||
const koboldOaiProxy = createProxyMiddleware({
|
|
||||||
target: "https://api.openai.com",
|
|
||||||
changeOrigin: true,
|
|
||||||
pathRewrite: {
|
|
||||||
"^/api/v1/generate": "/v1/chat/completions",
|
|
||||||
},
|
|
||||||
on: {
|
|
||||||
proxyReq: rewriteRequest,
|
|
||||||
proxyRes: createOnProxyResHandler([koboldResponseHandler]),
|
|
||||||
error: handleProxyError,
|
|
||||||
},
|
|
||||||
selfHandleResponse: true,
|
|
||||||
logger,
|
|
||||||
});
|
|
||||||
|
|
||||||
const koboldRouter = Router();
|
|
||||||
koboldRouter.get("/api/v1/model", handleModelRequest);
|
|
||||||
koboldRouter.get("/api/v1/config/soft_prompts_list", handleSoftPromptsRequest);
|
|
||||||
koboldRouter.post(
|
|
||||||
"/api/v1/generate",
|
|
||||||
ipLimiter,
|
|
||||||
createPreprocessorMiddleware({ inApi: "kobold", outApi: "openai" }),
|
|
||||||
koboldOaiProxy
|
|
||||||
);
|
|
||||||
koboldRouter.use((req, res) => {
|
|
||||||
logger.warn(`Unhandled kobold request: ${req.method} ${req.path}`);
|
|
||||||
res.status(404).json({ error: "Not found" });
|
|
||||||
});
|
|
||||||
|
|
||||||
export const kobold = koboldRouter;
|
|
||||||
@@ -1,18 +1,38 @@
|
|||||||
import { Request, Response } from "express";
|
import { Request, Response } from "express";
|
||||||
import httpProxy from "http-proxy";
|
import httpProxy from "http-proxy";
|
||||||
import { ZodError } from "zod";
|
import { ZodError } from "zod";
|
||||||
|
import { generateErrorMessage } from "zod-error";
|
||||||
|
import { buildFakeSse } from "../../shared/streaming";
|
||||||
|
import { assertNever } from "../../shared/utils";
|
||||||
|
import { QuotaExceededError } from "./request/preprocessors/apply-quota-limits";
|
||||||
|
|
||||||
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
|
const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
|
||||||
|
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
|
||||||
|
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
|
||||||
|
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
|
||||||
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
|
||||||
|
|
||||||
/** Returns true if we're making a request to a completion endpoint. */
|
export function isTextGenerationRequest(req: Request) {
|
||||||
export function isCompletionRequest(req: Request) {
|
|
||||||
return (
|
return (
|
||||||
req.method === "POST" &&
|
req.method === "POST" &&
|
||||||
[OPENAI_CHAT_COMPLETION_ENDPOINT, ANTHROPIC_COMPLETION_ENDPOINT].some(
|
[
|
||||||
(endpoint) => req.path.startsWith(endpoint)
|
OPENAI_CHAT_COMPLETION_ENDPOINT,
|
||||||
)
|
OPENAI_TEXT_COMPLETION_ENDPOINT,
|
||||||
|
ANTHROPIC_COMPLETION_ENDPOINT,
|
||||||
|
].some((endpoint) => req.path.startsWith(endpoint))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isImageGenerationRequest(req: Request) {
|
||||||
|
return (
|
||||||
|
req.method === "POST" &&
|
||||||
|
req.path.startsWith(OPENAI_IMAGE_COMPLETION_ENDPOINT)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isEmbeddingsRequest(req: Request) {
|
||||||
|
return (
|
||||||
|
req.method === "POST" && req.path.startsWith(OPENAI_EMBEDDINGS_ENDPOINT)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,7 +42,7 @@ export function writeErrorResponse(
|
|||||||
statusCode: number,
|
statusCode: number,
|
||||||
errorPayload: Record<string, any>
|
errorPayload: Record<string, any>
|
||||||
) {
|
) {
|
||||||
const errorSource = errorPayload.error?.type.startsWith("proxy")
|
const errorSource = errorPayload.error?.type?.startsWith("proxy")
|
||||||
? "proxy"
|
? "proxy"
|
||||||
: "upstream";
|
: "upstream";
|
||||||
|
|
||||||
@@ -30,91 +50,157 @@ export function writeErrorResponse(
|
|||||||
// the stream. Otherwise just send a normal error response.
|
// the stream. Otherwise just send a normal error response.
|
||||||
if (
|
if (
|
||||||
res.headersSent ||
|
res.headersSent ||
|
||||||
res.getHeader("content-type") === "text/event-stream"
|
String(res.getHeader("content-type")).startsWith("text/event-stream")
|
||||||
) {
|
) {
|
||||||
const msg = buildFakeSseMessage(
|
const errorTitle = `${errorSource} error (${statusCode})`;
|
||||||
`${errorSource} error (${statusCode})`,
|
const errorContent = JSON.stringify(errorPayload, null, 2);
|
||||||
JSON.stringify(errorPayload, null, 2),
|
const msg = buildFakeSse(errorTitle, errorContent, req);
|
||||||
req
|
|
||||||
);
|
|
||||||
res.write(msg);
|
res.write(msg);
|
||||||
res.write(`data: [DONE]\n\n`);
|
res.write(`data: [DONE]\n\n`);
|
||||||
res.end();
|
res.end();
|
||||||
} else {
|
} else {
|
||||||
|
if (req.tokenizerInfo && typeof errorPayload.error === "object") {
|
||||||
|
errorPayload.error.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
res.status(statusCode).json(errorPayload);
|
res.status(statusCode).json(errorPayload);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
|
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
|
||||||
req.log.error({ err }, `Error during proxy request middleware`);
|
req.log.error(err, `Error during http-proxy-middleware request`);
|
||||||
handleInternalError(err, req as Request, res as Response);
|
classifyErrorAndSend(err, req as Request, res as Response);
|
||||||
};
|
};
|
||||||
|
|
||||||
export const handleInternalError = (
|
export const classifyErrorAndSend = (
|
||||||
err: Error,
|
err: Error,
|
||||||
req: Request,
|
req: Request,
|
||||||
res: Response
|
res: Response
|
||||||
) => {
|
) => {
|
||||||
try {
|
try {
|
||||||
const isZod = err instanceof ZodError;
|
const { status, userMessage, ...errorDetails } = classifyError(err);
|
||||||
if (isZod) {
|
writeErrorResponse(req, res, status, {
|
||||||
writeErrorResponse(req, res, 400, {
|
error: { message: userMessage, ...errorDetails },
|
||||||
error: {
|
});
|
||||||
type: "proxy_validation_error",
|
} catch (error) {
|
||||||
proxy_note: `Reverse proxy couldn't validate your request when trying to transform it. Your client may be sending invalid data.`,
|
req.log.error(error, `Error writing error response headers, giving up.`);
|
||||||
issues: err.issues,
|
|
||||||
stack: err.stack,
|
|
||||||
message: err.message,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
writeErrorResponse(req, res, 500, {
|
|
||||||
error: {
|
|
||||||
type: "proxy_rewriter_error",
|
|
||||||
proxy_note: `Reverse proxy encountered an error before it could reach the upstream API.`,
|
|
||||||
message: err.message,
|
|
||||||
stack: err.stack,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
req.log.error(
|
|
||||||
{ error: e },
|
|
||||||
`Error writing error response headers, giving up.`
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
export function buildFakeSseMessage(
|
function classifyError(err: Error): {
|
||||||
type: string,
|
/** HTTP status code returned to the client. */
|
||||||
string: string,
|
status: number;
|
||||||
req: Request
|
/** Message displayed to the user. */
|
||||||
) {
|
userMessage: string;
|
||||||
let fakeEvent;
|
/** Short error type, e.g. "proxy_validation_error". */
|
||||||
|
type: string;
|
||||||
|
} & Record<string, any> {
|
||||||
|
const defaultError = {
|
||||||
|
status: 500,
|
||||||
|
userMessage: `Reverse proxy error: ${err.message}`,
|
||||||
|
type: "proxy_internal_error",
|
||||||
|
stack: err.stack,
|
||||||
|
};
|
||||||
|
|
||||||
if (req.inboundApi === "anthropic") {
|
switch (err.constructor.name) {
|
||||||
fakeEvent = {
|
case "ZodError":
|
||||||
completion: `\`\`\`\n[${type}: ${string}]\n\`\`\`\n`,
|
const userMessage = generateErrorMessage((err as ZodError).issues, {
|
||||||
stop_reason: type,
|
prefix: "Request validation failed. ",
|
||||||
truncated: false, // I've never seen this be true
|
path: { enabled: true, label: null, type: "breadcrumbs" },
|
||||||
stop: null,
|
code: { enabled: false },
|
||||||
model: req.body?.model,
|
maxErrors: 3,
|
||||||
log_id: "proxy-req-" + req.id,
|
transform: ({ issue, ...rest }) => {
|
||||||
};
|
return `At '${rest.pathComponent}': ${issue.message}`;
|
||||||
} else {
|
|
||||||
fakeEvent = {
|
|
||||||
id: "chatcmpl-" + req.id,
|
|
||||||
object: "chat.completion.chunk",
|
|
||||||
created: Date.now(),
|
|
||||||
model: req.body?.model,
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
delta: { content: `\`\`\`\n[${type}: ${string}]\n\`\`\`\n` },
|
|
||||||
index: 0,
|
|
||||||
finish_reason: type,
|
|
||||||
},
|
},
|
||||||
],
|
});
|
||||||
};
|
return { status: 400, userMessage, type: "proxy_validation_error" };
|
||||||
|
case "ForbiddenError":
|
||||||
|
// Mimics a ban notice from OpenAI, thrown when blockZoomerOrigins blocks
|
||||||
|
// a request.
|
||||||
|
return {
|
||||||
|
status: 403,
|
||||||
|
userMessage: `Your account has been disabled for violating our terms of service.`,
|
||||||
|
type: "organization_account_disabled",
|
||||||
|
code: "policy_violation",
|
||||||
|
};
|
||||||
|
case "QuotaExceededError":
|
||||||
|
return {
|
||||||
|
status: 429,
|
||||||
|
userMessage: `You've exceeded your token quota for this model type.`,
|
||||||
|
type: "proxy_quota_exceeded",
|
||||||
|
info: (err as QuotaExceededError).quotaInfo,
|
||||||
|
};
|
||||||
|
case "Error":
|
||||||
|
if ("code" in err) {
|
||||||
|
switch (err.code) {
|
||||||
|
case "ENOTFOUND":
|
||||||
|
return {
|
||||||
|
status: 502,
|
||||||
|
userMessage: `Reverse proxy encountered a DNS error while trying to connect to the upstream service.`,
|
||||||
|
type: "proxy_network_error",
|
||||||
|
code: err.code,
|
||||||
|
};
|
||||||
|
case "ECONNREFUSED":
|
||||||
|
return {
|
||||||
|
status: 502,
|
||||||
|
userMessage: `Reverse proxy couldn't connect to the upstream service.`,
|
||||||
|
type: "proxy_network_error",
|
||||||
|
code: err.code,
|
||||||
|
};
|
||||||
|
case "ECONNRESET":
|
||||||
|
return {
|
||||||
|
status: 504,
|
||||||
|
userMessage: `Reverse proxy timed out while waiting for the upstream service to respond.`,
|
||||||
|
type: "proxy_network_error",
|
||||||
|
code: err.code,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return defaultError;
|
||||||
|
default:
|
||||||
|
return defaultError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getCompletionFromBody(req: Request, body: Record<string, any>) {
|
||||||
|
const format = req.outboundApi;
|
||||||
|
switch (format) {
|
||||||
|
case "openai":
|
||||||
|
return body.choices[0].message.content;
|
||||||
|
case "openai-text":
|
||||||
|
return body.choices[0].text;
|
||||||
|
case "anthropic":
|
||||||
|
if (!body.completion) {
|
||||||
|
req.log.error(
|
||||||
|
{ body: JSON.stringify(body) },
|
||||||
|
"Received empty Anthropic completion"
|
||||||
|
);
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
return body.completion.trim();
|
||||||
|
case "google-palm":
|
||||||
|
return body.candidates[0].output;
|
||||||
|
case "openai-image":
|
||||||
|
return body.data?.map((item: any) => item.url).join("\n");
|
||||||
|
default:
|
||||||
|
assertNever(format);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getModelFromBody(req: Request, body: Record<string, any>) {
|
||||||
|
const format = req.outboundApi;
|
||||||
|
switch (format) {
|
||||||
|
case "openai":
|
||||||
|
case "openai-text":
|
||||||
|
return body.model;
|
||||||
|
case "openai-image":
|
||||||
|
return req.body.model;
|
||||||
|
case "anthropic":
|
||||||
|
// Anthropic confirms the model in the response, but AWS Claude doesn't.
|
||||||
|
return body.model || req.body.model;
|
||||||
|
case "google-palm":
|
||||||
|
// Google doesn't confirm the model in the response.
|
||||||
|
return req.body.model;
|
||||||
|
default:
|
||||||
|
assertNever(format);
|
||||||
}
|
}
|
||||||
return `data: ${JSON.stringify(fakeEvent)}\n\n`;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
import { Key, keyPool } from "../../../key-management";
|
|
||||||
import { isCompletionRequest } from "../common";
|
|
||||||
import { ProxyRequestMiddleware } from ".";
|
|
||||||
|
|
||||||
/** Add a key that can service this request to the request object. */
|
|
||||||
export const addKey: ProxyRequestMiddleware = (proxyReq, req) => {
|
|
||||||
let assignedKey: Key;
|
|
||||||
|
|
||||||
if (!isCompletionRequest(req)) {
|
|
||||||
// Horrible, horrible hack to stop the proxy from complaining about clients
|
|
||||||
// not sending a model when they are requesting the list of models (which
|
|
||||||
// requires a key, but obviously not a model).
|
|
||||||
// TODO: shouldn't even proxy /models to the upstream API, just fake it
|
|
||||||
// using the models our key pool has available.
|
|
||||||
req.body.model = "gpt-3.5-turbo";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.inboundApi || !req.outboundApi) {
|
|
||||||
const err = new Error(
|
|
||||||
"Request API format missing. Did you forget to add the request preprocessor to your router?"
|
|
||||||
);
|
|
||||||
req.log.error(
|
|
||||||
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
|
|
||||||
err.message
|
|
||||||
);
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!req.body?.model) {
|
|
||||||
throw new Error("You must specify a model with your request.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// This should happen somewhere else but addKey is guaranteed to run first.
|
|
||||||
req.isStreaming = req.body.stream === true || req.body.stream === "true";
|
|
||||||
req.body.stream = req.isStreaming;
|
|
||||||
|
|
||||||
// Anthropic support has a special endpoint that accepts OpenAI-formatted
|
|
||||||
// requests and translates them into Anthropic requests. On this endpoint,
|
|
||||||
// the requested model is an OpenAI one even though we're actually sending
|
|
||||||
// an Anthropic request.
|
|
||||||
// For such cases, ignore the requested model entirely.
|
|
||||||
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
|
|
||||||
req.log.debug("Using an Anthropic key for an OpenAI-compatible request");
|
|
||||||
// We don't assign the model here, that will happen when transforming the
|
|
||||||
// request body.
|
|
||||||
assignedKey = keyPool.get("claude-v1");
|
|
||||||
} else {
|
|
||||||
assignedKey = keyPool.get(req.body.model);
|
|
||||||
}
|
|
||||||
|
|
||||||
req.key = assignedKey;
|
|
||||||
req.log.info(
|
|
||||||
{
|
|
||||||
key: assignedKey.hash,
|
|
||||||
model: req.body?.model,
|
|
||||||
fromApi: req.inboundApi,
|
|
||||||
toApi: req.outboundApi,
|
|
||||||
},
|
|
||||||
"Assigned key to request"
|
|
||||||
);
|
|
||||||
|
|
||||||
if (assignedKey.service === "anthropic") {
|
|
||||||
proxyReq.setHeader("X-API-Key", assignedKey.key);
|
|
||||||
} else {
|
|
||||||
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
@@ -2,19 +2,30 @@ import type { Request } from "express";
|
|||||||
import type { ClientRequest } from "http";
|
import type { ClientRequest } from "http";
|
||||||
import type { ProxyReqCallback } from "http-proxy";
|
import type { ProxyReqCallback } from "http-proxy";
|
||||||
|
|
||||||
// Express middleware (runs before http-proxy-middleware, can be async)
|
export { createOnProxyReqHandler } from "./onproxyreq-factory";
|
||||||
export { createPreprocessorMiddleware } from "./preprocess";
|
export {
|
||||||
export { setApiFormat } from "./set-api-format";
|
createPreprocessorMiddleware,
|
||||||
export { transformOutboundPayload } from "./transform-outbound-payload";
|
createEmbeddingsPreprocessorMiddleware,
|
||||||
|
} from "./preprocessor-factory";
|
||||||
|
|
||||||
// HPM middleware (runs on onProxyReq, cannot be async)
|
// Express middleware (runs before http-proxy-middleware, can be async)
|
||||||
export { addKey } from "./add-key";
|
export { addAzureKey } from "./preprocessors/add-azure-key";
|
||||||
export { addAnthropicPreamble } from "./add-anthropic-preamble";
|
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
|
||||||
export { finalizeBody } from "./finalize-body";
|
export { validateContextSize } from "./preprocessors/validate-context-size";
|
||||||
export { languageFilter } from "./language-filter";
|
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
|
||||||
export { limitCompletions } from "./limit-completions";
|
export { languageFilter } from "./preprocessors/language-filter";
|
||||||
export { limitOutputTokens } from "./limit-output-tokens";
|
export { setApiFormat } from "./preprocessors/set-api-format";
|
||||||
export { transformKoboldPayload } from "./transform-kobold-payload";
|
export { signAwsRequest } from "./preprocessors/sign-aws-request";
|
||||||
|
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
|
||||||
|
|
||||||
|
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
|
||||||
|
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
|
||||||
|
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
|
||||||
|
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
|
||||||
|
export { checkModelFamily } from "./onproxyreq/check-model-family";
|
||||||
|
export { finalizeBody } from "./onproxyreq/finalize-body";
|
||||||
|
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
|
||||||
|
export { stripHeaders } from "./onproxyreq/strip-headers";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Middleware that runs prior to the request being handled by http-proxy-
|
* Middleware that runs prior to the request being handled by http-proxy-
|
||||||
@@ -33,7 +44,7 @@ export { transformKoboldPayload } from "./transform-kobold-payload";
|
|||||||
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Middleware that runs immediately before the request is sent to the API in
|
* Callbacks that run immediately before the request is sent to the API in
|
||||||
* response to http-proxy-middleware's `proxyReq` event.
|
* response to http-proxy-middleware's `proxyReq` event.
|
||||||
*
|
*
|
||||||
* Async functions cannot be used here as HPM's event emitter is not async and
|
* Async functions cannot be used here as HPM's event emitter is not async and
|
||||||
@@ -43,4 +54,7 @@ export type RequestPreprocessor = (req: Request) => void | Promise<void>;
|
|||||||
* first attempt is rate limited and the request is automatically retried by the
|
* first attempt is rate limited and the request is automatically retried by the
|
||||||
* request queue middleware.
|
* request queue middleware.
|
||||||
*/
|
*/
|
||||||
export type ProxyRequestMiddleware = ProxyReqCallback<ClientRequest, Request>;
|
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
|
||||||
|
|
||||||
|
export const forceModel = (model: string) => (req: Request) =>
|
||||||
|
void (req.body.model = model);
|
||||||
|
|||||||
@@ -1,51 +0,0 @@
|
|||||||
import { Request } from "express";
|
|
||||||
import { config } from "../../../config";
|
|
||||||
import { logger } from "../../../logger";
|
|
||||||
import { isCompletionRequest } from "../common";
|
|
||||||
import { ProxyRequestMiddleware } from ".";
|
|
||||||
|
|
||||||
const DISALLOWED_REGEX =
|
|
||||||
/[\u2E80-\u2E99\u2E9B-\u2EF3\u2F00-\u2FD5\u3005\u3007\u3021-\u3029\u3038-\u303B\u3400-\u4DB5\u4E00-\u9FD5\uF900-\uFA6D\uFA70-\uFAD9]/;
|
|
||||||
|
|
||||||
// Our shitty free-tier VMs will fall over if we test every single character in
|
|
||||||
// each 15k character request ten times a second. So we'll just sample 20% of
|
|
||||||
// the characters and hope that's enough.
|
|
||||||
const containsDisallowedCharacters = (text: string) => {
|
|
||||||
const sampleSize = Math.ceil(text.length * 0.2);
|
|
||||||
const sample = text
|
|
||||||
.split("")
|
|
||||||
.sort(() => 0.5 - Math.random())
|
|
||||||
.slice(0, sampleSize)
|
|
||||||
.join("");
|
|
||||||
return DISALLOWED_REGEX.test(sample);
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Block requests containing too many disallowed characters. */
|
|
||||||
export const languageFilter: ProxyRequestMiddleware = (_proxyReq, req) => {
|
|
||||||
if (!config.rejectDisallowed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isCompletionRequest(req)) {
|
|
||||||
const combinedText = getPromptFromRequest(req);
|
|
||||||
if (containsDisallowedCharacters(combinedText)) {
|
|
||||||
logger.warn(`Blocked request containing bad characters`);
|
|
||||||
_proxyReq.destroy(new Error(config.rejectMessage));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
function getPromptFromRequest(req: Request) {
|
|
||||||
const service = req.outboundApi;
|
|
||||||
const body = req.body;
|
|
||||||
switch (service) {
|
|
||||||
case "anthropic":
|
|
||||||
return body.prompt;
|
|
||||||
case "openai":
|
|
||||||
return body.messages
|
|
||||||
.map((m: { content: string }) => m.content)
|
|
||||||
.join("\n");
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown service: ${service}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
import { isCompletionRequest } from "../common";
|
|
||||||
import { ProxyRequestMiddleware } from ".";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Don't allow multiple completions to be requested to prevent abuse.
|
|
||||||
* OpenAI-only, Anthropic provides no such parameter.
|
|
||||||
**/
|
|
||||||
export const limitCompletions: ProxyRequestMiddleware = (_proxyReq, req) => {
|
|
||||||
if (isCompletionRequest(req) && req.outboundApi === "openai") {
|
|
||||||
const originalN = req.body?.n || 1;
|
|
||||||
req.body.n = 1;
|
|
||||||
if (originalN !== req.body.n) {
|
|
||||||
req.log.warn(`Limiting completion choices from ${originalN} to 1`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
import { Request } from "express";
|
|
||||||
import { config } from "../../../config";
|
|
||||||
import { isCompletionRequest } from "../common";
|
|
||||||
import { ProxyRequestMiddleware } from ".";
|
|
||||||
|
|
||||||
/** Enforce a maximum number of tokens requested from the model. */
|
|
||||||
export const limitOutputTokens: ProxyRequestMiddleware = (_proxyReq, req) => {
|
|
||||||
// TODO: do all of this shit in the zod validator
|
|
||||||
if (isCompletionRequest(req)) {
|
|
||||||
const requestedMax = Number.parseInt(getMaxTokensFromRequest(req));
|
|
||||||
const apiMax =
|
|
||||||
req.outboundApi === "openai"
|
|
||||||
? config.maxOutputTokensOpenAI
|
|
||||||
: config.maxOutputTokensAnthropic;
|
|
||||||
let maxTokens = requestedMax;
|
|
||||||
|
|
||||||
if (typeof requestedMax !== "number") {
|
|
||||||
maxTokens = apiMax;
|
|
||||||
}
|
|
||||||
|
|
||||||
maxTokens = Math.min(maxTokens, apiMax);
|
|
||||||
if (req.outboundApi === "openai") {
|
|
||||||
req.body.max_tokens = maxTokens;
|
|
||||||
} else if (req.outboundApi === "anthropic") {
|
|
||||||
req.body.max_tokens_to_sample = maxTokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (requestedMax !== maxTokens) {
|
|
||||||
req.log.info(
|
|
||||||
{ requestedMax, configMax: apiMax, final: maxTokens },
|
|
||||||
"Limiting user's requested max output tokens"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
function getMaxTokensFromRequest(req: Request) {
|
|
||||||
switch (req.outboundApi) {
|
|
||||||
case "anthropic":
|
|
||||||
return req.body?.max_tokens_to_sample;
|
|
||||||
case "openai":
|
|
||||||
return req.body?.max_tokens;
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown service: ${req.outboundApi}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
import {
|
||||||
|
applyQuotaLimits,
|
||||||
|
blockZoomerOrigins,
|
||||||
|
checkModelFamily,
|
||||||
|
HPMRequestCallback,
|
||||||
|
stripHeaders,
|
||||||
|
} from "./index";
|
||||||
|
|
||||||
|
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an http-proxy-middleware request handler that runs the given set of
|
||||||
|
* onProxyReq callback functions in sequence.
|
||||||
|
*
|
||||||
|
* These will run each time a request is proxied, including on automatic retries
|
||||||
|
* by the queue after encountering a rate limit.
|
||||||
|
*/
|
||||||
|
export const createOnProxyReqHandler = ({
|
||||||
|
pipeline,
|
||||||
|
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
|
||||||
|
const callbackPipeline = [
|
||||||
|
checkModelFamily,
|
||||||
|
applyQuotaLimits,
|
||||||
|
blockZoomerOrigins,
|
||||||
|
stripHeaders,
|
||||||
|
...pipeline,
|
||||||
|
];
|
||||||
|
return (proxyReq, req, res, options) => {
|
||||||
|
// The streaming flag must be set before any other onProxyReq handler runs,
|
||||||
|
// as it may influence the behavior of subsequent handlers.
|
||||||
|
// Image generation requests can't be streamed.
|
||||||
|
req.isStreaming = req.body.stream === true || req.body.stream === "true";
|
||||||
|
req.body.stream = req.isStreaming;
|
||||||
|
|
||||||
|
try {
|
||||||
|
for (const fn of callbackPipeline) {
|
||||||
|
fn(proxyReq, req, res, options);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
proxyReq.destroy(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -1,17 +1,17 @@
|
|||||||
import { AnthropicKey, Key } from "../../../key-management";
|
import { AnthropicKey, Key } from "../../../../shared/key-management";
|
||||||
import { isCompletionRequest } from "../common";
|
import { isTextGenerationRequest } from "../../common";
|
||||||
import { ProxyRequestMiddleware } from ".";
|
import { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
|
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
|
||||||
* know this without trying to send the request and seeing if it fails. If a
|
* know this without trying to send the request and seeing if it fails. If a
|
||||||
* key is marked as requiring a preamble, it will be added here.
|
* key is marked as requiring a preamble, it will be added here.
|
||||||
*/
|
*/
|
||||||
export const addAnthropicPreamble: ProxyRequestMiddleware = (
|
export const addAnthropicPreamble: HPMRequestCallback = (
|
||||||
_proxyReq,
|
_proxyReq,
|
||||||
req
|
req
|
||||||
) => {
|
) => {
|
||||||
if (!isCompletionRequest(req) || req.key?.service !== "anthropic") {
|
if (!isTextGenerationRequest(req) || req.key?.service !== "anthropic") {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,128 @@
|
|||||||
|
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
|
||||||
|
import { isEmbeddingsRequest } from "../../common";
|
||||||
|
import { HPMRequestCallback } from "../index";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
|
||||||
|
/** Add a key that can service this request to the request object. */
|
||||||
|
export const addKey: HPMRequestCallback = (proxyReq, req) => {
|
||||||
|
let assignedKey: Key;
|
||||||
|
|
||||||
|
if (!req.inboundApi || !req.outboundApi) {
|
||||||
|
const err = new Error(
|
||||||
|
"Request API format missing. Did you forget to add the request preprocessor to your router?"
|
||||||
|
);
|
||||||
|
req.log.error(
|
||||||
|
{ in: req.inboundApi, out: req.outboundApi, path: req.path },
|
||||||
|
err.message
|
||||||
|
);
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!req.body?.model) {
|
||||||
|
throw new Error("You must specify a model with your request.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === req.outboundApi) {
|
||||||
|
assignedKey = keyPool.get(req.body.model);
|
||||||
|
} else {
|
||||||
|
switch (req.outboundApi) {
|
||||||
|
// If we are translating between API formats we may need to select a model
|
||||||
|
// for the user, because the provided model is for the inbound API.
|
||||||
|
case "anthropic":
|
||||||
|
assignedKey = keyPool.get("claude-v1");
|
||||||
|
break;
|
||||||
|
case "google-palm":
|
||||||
|
assignedKey = keyPool.get("text-bison-001");
|
||||||
|
delete req.body.stream;
|
||||||
|
break;
|
||||||
|
case "openai-text":
|
||||||
|
assignedKey = keyPool.get("gpt-3.5-turbo-instruct");
|
||||||
|
break;
|
||||||
|
case "openai":
|
||||||
|
throw new Error(
|
||||||
|
"OpenAI Chat as an API translation target is not supported"
|
||||||
|
);
|
||||||
|
case "openai-image":
|
||||||
|
assignedKey = keyPool.get("dall-e-3");
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assertNever(req.outboundApi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.key = assignedKey;
|
||||||
|
req.log.info(
|
||||||
|
{
|
||||||
|
key: assignedKey.hash,
|
||||||
|
model: req.body?.model,
|
||||||
|
fromApi: req.inboundApi,
|
||||||
|
toApi: req.outboundApi,
|
||||||
|
},
|
||||||
|
"Assigned key to request"
|
||||||
|
);
|
||||||
|
|
||||||
|
// TODO: KeyProvider should assemble all necessary headers
|
||||||
|
switch (assignedKey.service) {
|
||||||
|
case "anthropic":
|
||||||
|
proxyReq.setHeader("X-API-Key", assignedKey.key);
|
||||||
|
break;
|
||||||
|
case "openai":
|
||||||
|
const key: OpenAIKey = assignedKey as OpenAIKey;
|
||||||
|
if (key.organizationId) {
|
||||||
|
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||||
|
}
|
||||||
|
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
|
||||||
|
break;
|
||||||
|
case "google-palm":
|
||||||
|
const originalPath = proxyReq.path;
|
||||||
|
proxyReq.path = originalPath.replace(
|
||||||
|
/(\?.*)?$/,
|
||||||
|
`?key=${assignedKey.key}`
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "azure":
|
||||||
|
const azureKey = assignedKey.key;
|
||||||
|
proxyReq.setHeader("api-key", azureKey);
|
||||||
|
break;
|
||||||
|
case "aws":
|
||||||
|
throw new Error(
|
||||||
|
"add-key should not be used for AWS security credentials. Use sign-aws-request instead."
|
||||||
|
);
|
||||||
|
default:
|
||||||
|
assertNever(assignedKey.service);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Special case for embeddings requests which don't go through the normal
|
||||||
|
* request pipeline.
|
||||||
|
*/
|
||||||
|
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
|
||||||
|
proxyReq,
|
||||||
|
req
|
||||||
|
) => {
|
||||||
|
if (!isEmbeddingsRequest(req)) {
|
||||||
|
throw new Error(
|
||||||
|
"addKeyForEmbeddingsRequest called on non-embeddings request"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi !== "openai") {
|
||||||
|
throw new Error("Embeddings requests must be from OpenAI");
|
||||||
|
}
|
||||||
|
|
||||||
|
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
|
||||||
|
|
||||||
|
const key = keyPool.get("text-embedding-ada-002") as OpenAIKey;
|
||||||
|
|
||||||
|
req.key = key;
|
||||||
|
req.log.info(
|
||||||
|
{ key: key.hash, toApi: req.outboundApi },
|
||||||
|
"Assigned Turbo key to embeddings request"
|
||||||
|
);
|
||||||
|
|
||||||
|
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
|
||||||
|
if (key.organizationId) {
|
||||||
|
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
import { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
|
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
|
||||||
|
|
||||||
|
class ForbiddenError extends Error {
|
||||||
|
constructor(message: string) {
|
||||||
|
super(message);
|
||||||
|
this.name = "ForbiddenError";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Blocks requests from Janitor AI users with a fake, scary error message so I
|
||||||
|
* stop getting emails asking for tech support.
|
||||||
|
*/
|
||||||
|
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
|
||||||
|
const origin = req.headers.origin || req.headers.referer;
|
||||||
|
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
|
||||||
|
// Venus-derivatives send a test prompt to check if the proxy is working.
|
||||||
|
// We don't want to block that just yet.
|
||||||
|
if (req.body.messages[0]?.content === "Just say TEST") {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new ForbiddenError(
|
||||||
|
`Your access was terminated due to violation of our policies, please check your email for more information. If you believe this is in error and would like to appeal, please contact us through our help center at help.openai.com.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
import { HPMRequestCallback } from "../index";
|
||||||
|
import { config } from "../../../../config";
|
||||||
|
import { getModelFamilyForRequest } from "../../../../shared/models";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures the selected model family is enabled by the proxy configuration.
|
||||||
|
**/
|
||||||
|
export const checkModelFamily: HPMRequestCallback = (proxyReq, req) => {
|
||||||
|
const family = getModelFamilyForRequest(req);
|
||||||
|
if (!config.allowedModelFamilies.includes(family)) {
|
||||||
|
throw new Error(`Model family ${family} is not permitted on this proxy`);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
import { fixRequestBody } from "http-proxy-middleware";
|
import { fixRequestBody } from "http-proxy-middleware";
|
||||||
import type { ProxyRequestMiddleware } from ".";
|
import type { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
/** Finalize the rewritten request body. Must be the last rewriter. */
|
/** Finalize the rewritten request body. Must be the last rewriter. */
|
||||||
export const finalizeBody: ProxyRequestMiddleware = (proxyReq, req) => {
|
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
|
||||||
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
|
||||||
|
// For image generation requests, remove stream flag.
|
||||||
|
if (req.outboundApi === "openai-image") {
|
||||||
|
delete req.body.stream;
|
||||||
|
}
|
||||||
|
|
||||||
const updatedBody = JSON.stringify(req.body);
|
const updatedBody = JSON.stringify(req.body);
|
||||||
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
|
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
|
||||||
(req as any).rawBody = Buffer.from(updatedBody);
|
(req as any).rawBody = Buffer.from(updatedBody);
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
import type { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For AWS/Azure requests, the body is signed earlier in the request pipeline,
|
||||||
|
* before the proxy middleware. This function just assigns the path and headers
|
||||||
|
* to the proxy request.
|
||||||
|
*/
|
||||||
|
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
|
||||||
|
if (!req.signedRequest) {
|
||||||
|
throw new Error("Expected req.signedRequest to be set");
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path depends on the selected model and the assigned key's region.
|
||||||
|
proxyReq.path = req.signedRequest.path;
|
||||||
|
|
||||||
|
// Amazon doesn't want extra headers, so we need to remove all of them and
|
||||||
|
// reassign only the ones specified in the signed request.
|
||||||
|
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
|
||||||
|
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
|
||||||
|
proxyReq.setHeader(key, value);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Don't use fixRequestBody here because it adds a content-length header.
|
||||||
|
// Amazon doesn't want that and it breaks the signature.
|
||||||
|
proxyReq.write(req.signedRequest.body);
|
||||||
|
};
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
import { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes origin and referer headers before sending the request to the API for
|
||||||
|
* privacy reasons.
|
||||||
|
**/
|
||||||
|
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
|
||||||
|
proxyReq.setHeader("origin", "");
|
||||||
|
proxyReq.setHeader("referer", "");
|
||||||
|
|
||||||
|
proxyReq.removeHeader("cf-connecting-ip");
|
||||||
|
proxyReq.removeHeader("forwarded");
|
||||||
|
proxyReq.removeHeader("true-client-ip");
|
||||||
|
proxyReq.removeHeader("x-forwarded-for");
|
||||||
|
proxyReq.removeHeader("x-real-ip");
|
||||||
|
};
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
import { RequestHandler } from "express";
|
|
||||||
import { handleInternalError } from "../common";
|
|
||||||
import { RequestPreprocessor, setApiFormat, transformOutboundPayload } from ".";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a middleware function that processes the request body into the given
|
|
||||||
* API format, and then sequentially runs the given additional preprocessors.
|
|
||||||
*/
|
|
||||||
export const createPreprocessorMiddleware = (
|
|
||||||
apiFormat: Parameters<typeof setApiFormat>[0],
|
|
||||||
additionalPreprocessors?: RequestPreprocessor[]
|
|
||||||
): RequestHandler => {
|
|
||||||
const preprocessors: RequestPreprocessor[] = [
|
|
||||||
setApiFormat(apiFormat),
|
|
||||||
transformOutboundPayload,
|
|
||||||
...(additionalPreprocessors ?? []),
|
|
||||||
];
|
|
||||||
|
|
||||||
return async function executePreprocessors(req, res, next) {
|
|
||||||
try {
|
|
||||||
for (const preprocessor of preprocessors) {
|
|
||||||
await preprocessor(req);
|
|
||||||
}
|
|
||||||
next();
|
|
||||||
} catch (error) {
|
|
||||||
req.log.error(error, "Error while executing request preprocessor");
|
|
||||||
handleInternalError(error as Error, req, res);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
@@ -0,0 +1,101 @@
|
|||||||
|
import { RequestHandler } from "express";
|
||||||
|
import { initializeSseStream } from "../../../shared/streaming";
|
||||||
|
import { classifyErrorAndSend } from "../common";
|
||||||
|
import {
|
||||||
|
RequestPreprocessor,
|
||||||
|
validateContextSize,
|
||||||
|
countPromptTokens,
|
||||||
|
setApiFormat,
|
||||||
|
transformOutboundPayload,
|
||||||
|
languageFilter,
|
||||||
|
} from ".";
|
||||||
|
import { ZodIssue } from "zod";
|
||||||
|
|
||||||
|
type RequestPreprocessorOptions = {
|
||||||
|
/**
|
||||||
|
* Functions to run before the request body is transformed between API
|
||||||
|
* formats. Use this to change the behavior of the transformation, such as for
|
||||||
|
* endpoints which can accept multiple API formats.
|
||||||
|
*/
|
||||||
|
beforeTransform?: RequestPreprocessor[];
|
||||||
|
/**
|
||||||
|
* Functions to run after the request body is transformed and token counts are
|
||||||
|
* assigned. Use this to perform validation or other actions that depend on
|
||||||
|
* the request body being in the final API format.
|
||||||
|
*/
|
||||||
|
afterTransform?: RequestPreprocessor[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a middleware function that processes the request body into the given
|
||||||
|
* API format, and then sequentially runs the given additional preprocessors.
|
||||||
|
*
|
||||||
|
* These run first in the request lifecycle, a single time per request before it
|
||||||
|
* is added to the request queue. They aren't run again if the request is
|
||||||
|
* re-attempted after a rate limit.
|
||||||
|
*
|
||||||
|
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
|
||||||
|
* It will run after these preprocessors, but before the request is sent to
|
||||||
|
* http-proxy-middleware.
|
||||||
|
*/
|
||||||
|
export const createPreprocessorMiddleware = (
|
||||||
|
apiFormat: Parameters<typeof setApiFormat>[0],
|
||||||
|
{ beforeTransform, afterTransform }: RequestPreprocessorOptions = {}
|
||||||
|
): RequestHandler => {
|
||||||
|
const preprocessors: RequestPreprocessor[] = [
|
||||||
|
setApiFormat(apiFormat),
|
||||||
|
...(beforeTransform ?? []),
|
||||||
|
transformOutboundPayload,
|
||||||
|
countPromptTokens,
|
||||||
|
languageFilter,
|
||||||
|
...(afterTransform ?? []),
|
||||||
|
validateContextSize,
|
||||||
|
];
|
||||||
|
return async (...args) => executePreprocessors(preprocessors, args);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a middleware function that specifically prepares requests for
|
||||||
|
* OpenAI's embeddings API. Tokens are not counted because embeddings requests
|
||||||
|
* are basically free.
|
||||||
|
*/
|
||||||
|
export const createEmbeddingsPreprocessorMiddleware = (): RequestHandler => {
|
||||||
|
const preprocessors: RequestPreprocessor[] = [
|
||||||
|
setApiFormat({ inApi: "openai", outApi: "openai", service: "openai" }),
|
||||||
|
(req) => void (req.promptTokens = req.outputTokens = 0),
|
||||||
|
];
|
||||||
|
return async (...args) => executePreprocessors(preprocessors, args);
|
||||||
|
};
|
||||||
|
|
||||||
|
async function executePreprocessors(
|
||||||
|
preprocessors: RequestPreprocessor[],
|
||||||
|
[req, res, next]: Parameters<RequestHandler>
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
for (const preprocessor of preprocessors) {
|
||||||
|
await preprocessor(req);
|
||||||
|
}
|
||||||
|
next();
|
||||||
|
} catch (error) {
|
||||||
|
if (error.constructor.name === "ZodError") {
|
||||||
|
const msg = error?.issues
|
||||||
|
?.map((issue: ZodIssue) => issue.message)
|
||||||
|
.join("; ");
|
||||||
|
req.log.info(msg, "Prompt validation failed.");
|
||||||
|
} else {
|
||||||
|
req.log.error(error, "Error while executing request preprocessor");
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the requested has opted into streaming, the client probably won't
|
||||||
|
// handle a non-eventstream response, but we haven't initialized the SSE
|
||||||
|
// stream yet as that is typically done later by the request queue. We'll
|
||||||
|
// do that here and then call classifyErrorAndSend to use the streaming
|
||||||
|
// error handler.
|
||||||
|
const { stream } = req.body;
|
||||||
|
const isStreaming = stream === "true" || stream === true;
|
||||||
|
if (isStreaming && !res.headersSent) {
|
||||||
|
initializeSseStream(res);
|
||||||
|
}
|
||||||
|
classifyErrorAndSend(error as Error, req, res);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,50 @@
|
|||||||
|
import { AzureOpenAIKey, keyPool } from "../../../../shared/key-management";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
|
export const addAzureKey: RequestPreprocessor = (req) => {
|
||||||
|
const apisValid = req.inboundApi === "openai" && req.outboundApi === "openai";
|
||||||
|
const serviceValid = req.service === "azure";
|
||||||
|
if (!apisValid || !serviceValid) {
|
||||||
|
throw new Error("addAzureKey called on invalid request");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!req.body?.model) {
|
||||||
|
throw new Error("You must specify a model with your request.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const model = req.body.model.startsWith("azure-")
|
||||||
|
? req.body.model
|
||||||
|
: `azure-${req.body.model}`;
|
||||||
|
|
||||||
|
req.key = keyPool.get(model);
|
||||||
|
req.body.model = model;
|
||||||
|
|
||||||
|
req.log.info(
|
||||||
|
{ key: req.key.hash, model },
|
||||||
|
"Assigned Azure OpenAI key to request"
|
||||||
|
);
|
||||||
|
|
||||||
|
const cred = req.key as AzureOpenAIKey;
|
||||||
|
const { resourceName, deploymentId, apiKey } = getCredentialsFromKey(cred);
|
||||||
|
|
||||||
|
req.signedRequest = {
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: `${resourceName}.openai.azure.com`,
|
||||||
|
path: `/openai/deployments/${deploymentId}/chat/completions?api-version=2023-09-01-preview`,
|
||||||
|
headers: {
|
||||||
|
["host"]: `${resourceName}.openai.azure.com`,
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
["api-key"]: apiKey,
|
||||||
|
},
|
||||||
|
body: JSON.stringify(req.body),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
function getCredentialsFromKey(key: AzureOpenAIKey) {
|
||||||
|
const [resourceName, deploymentId, apiKey] = key.key.split(":");
|
||||||
|
if (!resourceName || !deploymentId || !apiKey) {
|
||||||
|
throw new Error("Assigned Azure OpenAI key is not in the correct format.");
|
||||||
|
}
|
||||||
|
return { resourceName, deploymentId, apiKey };
|
||||||
|
}
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
import { hasAvailableQuota } from "../../../../shared/users/user-store";
|
||||||
|
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
|
||||||
|
import { HPMRequestCallback } from "../index";
|
||||||
|
|
||||||
|
export class QuotaExceededError extends Error {
|
||||||
|
public quotaInfo: any;
|
||||||
|
constructor(message: string, quotaInfo: any) {
|
||||||
|
super(message);
|
||||||
|
this.name = "QuotaExceededError";
|
||||||
|
this.quotaInfo = quotaInfo;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
|
||||||
|
const subjectToQuota =
|
||||||
|
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
||||||
|
if (!subjectToQuota || !req.user) return;
|
||||||
|
|
||||||
|
const requestedTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
|
if (
|
||||||
|
!hasAvailableQuota({
|
||||||
|
userToken: req.user.token,
|
||||||
|
model: req.body.model,
|
||||||
|
api: req.outboundApi,
|
||||||
|
requested: requestedTokens,
|
||||||
|
})
|
||||||
|
) {
|
||||||
|
throw new QuotaExceededError(
|
||||||
|
"You have exceeded your proxy token quota for this model.",
|
||||||
|
{
|
||||||
|
quota: req.user.tokenLimits,
|
||||||
|
used: req.user.tokenCounts,
|
||||||
|
requested: requestedTokens,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
import { countTokens } from "../../../../shared/tokenization";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import type { OpenAIChatMessage } from "./transform-outbound-payload";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a request with an already-transformed body, counts the number of
|
||||||
|
* tokens and assigns the count to the request.
|
||||||
|
*/
|
||||||
|
export const countPromptTokens: RequestPreprocessor = async (req) => {
|
||||||
|
const service = req.outboundApi;
|
||||||
|
let result;
|
||||||
|
|
||||||
|
switch (service) {
|
||||||
|
case "openai": {
|
||||||
|
req.outputTokens = req.body.max_tokens;
|
||||||
|
const prompt: OpenAIChatMessage[] = req.body.messages;
|
||||||
|
result = await countTokens({ req, prompt, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "openai-text": {
|
||||||
|
req.outputTokens = req.body.max_tokens;
|
||||||
|
const prompt: string = req.body.prompt;
|
||||||
|
result = await countTokens({ req, prompt, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "anthropic": {
|
||||||
|
req.outputTokens = req.body.max_tokens_to_sample;
|
||||||
|
const prompt: string = req.body.prompt;
|
||||||
|
result = await countTokens({ req, prompt, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "google-palm": {
|
||||||
|
req.outputTokens = req.body.maxOutputTokens;
|
||||||
|
const prompt: string = req.body.prompt.text;
|
||||||
|
result = await countTokens({ req, prompt, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "openai-image": {
|
||||||
|
req.outputTokens = 1;
|
||||||
|
result = await countTokens({ req, service });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
assertNever(service);
|
||||||
|
}
|
||||||
|
|
||||||
|
req.promptTokens = result.token_count;
|
||||||
|
|
||||||
|
req.log.debug({ result: result }, "Counted prompt tokens.");
|
||||||
|
req.tokenizerInfo = req.tokenizerInfo ?? {};
|
||||||
|
req.tokenizerInfo = { ...req.tokenizerInfo, ...result };
|
||||||
|
};
|
||||||
@@ -0,0 +1,76 @@
|
|||||||
|
import { Request } from "express";
|
||||||
|
import { config } from "../../../../config";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
import { UserInputError } from "../../../../shared/errors";
|
||||||
|
import { OpenAIChatMessage } from "./transform-outbound-payload";
|
||||||
|
|
||||||
|
const rejectedClients = new Map<string, number>();
|
||||||
|
|
||||||
|
setInterval(() => {
|
||||||
|
rejectedClients.forEach((count, ip) => {
|
||||||
|
if (count > 0) {
|
||||||
|
rejectedClients.set(ip, Math.floor(count / 2));
|
||||||
|
} else {
|
||||||
|
rejectedClients.delete(ip);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}, 30000);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Block requests containing blacklisted phrases. Repeated rejections from the
|
||||||
|
* same IP address will be throttled.
|
||||||
|
*/
|
||||||
|
export const languageFilter: RequestPreprocessor = async (req) => {
|
||||||
|
if (!config.rejectPhrases.length) return;
|
||||||
|
|
||||||
|
const prompt = getPromptFromRequest(req);
|
||||||
|
const match = config.rejectPhrases.find((phrase) =>
|
||||||
|
prompt.match(new RegExp(phrase, "i"))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
const ip = req.ip;
|
||||||
|
const rejections = (rejectedClients.get(req.ip) || 0) + 1;
|
||||||
|
const delay = Math.min(60000, Math.pow(2, rejections - 1) * 1000);
|
||||||
|
rejectedClients.set(ip, rejections);
|
||||||
|
req.log.warn(
|
||||||
|
{ match, ip, rejections, delay },
|
||||||
|
"Prompt contains rejected phrase"
|
||||||
|
);
|
||||||
|
await new Promise((resolve) => {
|
||||||
|
req.res!.once("close", resolve);
|
||||||
|
setTimeout(resolve, delay);
|
||||||
|
});
|
||||||
|
throw new UserInputError(config.rejectMessage);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
function getPromptFromRequest(req: Request) {
|
||||||
|
const service = req.outboundApi;
|
||||||
|
const body = req.body;
|
||||||
|
switch (service) {
|
||||||
|
case "anthropic":
|
||||||
|
return body.prompt;
|
||||||
|
case "openai":
|
||||||
|
return body.messages
|
||||||
|
.map((msg: OpenAIChatMessage) => {
|
||||||
|
const text = Array.isArray(msg.content)
|
||||||
|
? msg.content
|
||||||
|
.map((c) => {
|
||||||
|
if ("text" in c) return c.text;
|
||||||
|
})
|
||||||
|
.join()
|
||||||
|
: msg.content;
|
||||||
|
return `${msg.role}: ${text}`;
|
||||||
|
})
|
||||||
|
.join("\n\n");
|
||||||
|
case "openai-text":
|
||||||
|
case "openai-image":
|
||||||
|
return body.prompt;
|
||||||
|
case "google-palm":
|
||||||
|
return body.prompt.text;
|
||||||
|
default:
|
||||||
|
assertNever(service);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
import { Request } from "express";
|
||||||
|
import { APIFormat, LLMService } from "../../../../shared/key-management";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
|
export const setApiFormat = (api: {
|
||||||
|
inApi: Request["inboundApi"];
|
||||||
|
outApi: APIFormat;
|
||||||
|
service: LLMService,
|
||||||
|
}): RequestPreprocessor => {
|
||||||
|
return function configureRequestApiFormat (req) {
|
||||||
|
req.inboundApi = api.inApi;
|
||||||
|
req.outboundApi = api.outApi;
|
||||||
|
req.service = api.service;
|
||||||
|
};
|
||||||
|
};
|
||||||
@@ -0,0 +1,96 @@
|
|||||||
|
import express from "express";
|
||||||
|
import { Sha256 } from "@aws-crypto/sha256-js";
|
||||||
|
import { SignatureV4 } from "@smithy/signature-v4";
|
||||||
|
import { HttpRequest } from "@smithy/protocol-http";
|
||||||
|
import { keyPool } from "../../../../shared/key-management";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
import { AnthropicV1CompleteSchema } from "./transform-outbound-payload";
|
||||||
|
|
||||||
|
const AMZ_HOST =
|
||||||
|
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Signs an outgoing AWS request with the appropriate headers modifies the
|
||||||
|
* request object in place to fix the path.
|
||||||
|
*/
|
||||||
|
export const signAwsRequest: RequestPreprocessor = async (req) => {
|
||||||
|
req.key = keyPool.get("anthropic.claude-v2");
|
||||||
|
|
||||||
|
const { model, stream } = req.body;
|
||||||
|
req.isStreaming = stream === true || stream === "true";
|
||||||
|
|
||||||
|
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
|
||||||
|
req.body.prompt = preamble + req.body.prompt;
|
||||||
|
|
||||||
|
// AWS supports only a subset of Anthropic's parameters and is more strict
|
||||||
|
// about unknown parameters.
|
||||||
|
// TODO: This should happen in transform-outbound-payload.ts
|
||||||
|
const strippedParams = AnthropicV1CompleteSchema.pick({
|
||||||
|
prompt: true,
|
||||||
|
max_tokens_to_sample: true,
|
||||||
|
stop_sequences: true,
|
||||||
|
temperature: true,
|
||||||
|
top_k: true,
|
||||||
|
top_p: true,
|
||||||
|
}).parse(req.body);
|
||||||
|
|
||||||
|
const credential = getCredentialParts(req);
|
||||||
|
const host = AMZ_HOST.replace("%REGION%", credential.region);
|
||||||
|
// AWS only uses 2023-06-01 and does not actually check this header, but we
|
||||||
|
// set it so that the stream adapter always selects the correct transformer.
|
||||||
|
req.headers["anthropic-version"] = "2023-06-01";
|
||||||
|
|
||||||
|
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
|
||||||
|
// with the headers generated by the SDK.
|
||||||
|
const newRequest = new HttpRequest({
|
||||||
|
method: "POST",
|
||||||
|
protocol: "https:",
|
||||||
|
hostname: host,
|
||||||
|
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
|
||||||
|
headers: {
|
||||||
|
["Host"]: host,
|
||||||
|
["content-type"]: "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify(strippedParams),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
|
||||||
|
} else {
|
||||||
|
newRequest.headers["accept"] = "*/*";
|
||||||
|
}
|
||||||
|
|
||||||
|
req.signedRequest = await sign(newRequest, getCredentialParts(req));
|
||||||
|
};
|
||||||
|
|
||||||
|
type Credential = {
|
||||||
|
accessKeyId: string;
|
||||||
|
secretAccessKey: string;
|
||||||
|
region: string;
|
||||||
|
};
|
||||||
|
function getCredentialParts(req: express.Request): Credential {
|
||||||
|
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
|
||||||
|
|
||||||
|
if (!accessKeyId || !secretAccessKey || !region) {
|
||||||
|
req.log.error(
|
||||||
|
{ key: req.key!.hash },
|
||||||
|
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
|
||||||
|
);
|
||||||
|
throw new Error("The key assigned to this request is invalid.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return { accessKeyId, secretAccessKey, region };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function sign(request: HttpRequest, credential: Credential) {
|
||||||
|
const { accessKeyId, secretAccessKey, region } = credential;
|
||||||
|
|
||||||
|
const signer = new SignatureV4({
|
||||||
|
sha256: Sha256,
|
||||||
|
credentials: { accessKeyId, secretAccessKey },
|
||||||
|
region,
|
||||||
|
service: "bedrock",
|
||||||
|
});
|
||||||
|
|
||||||
|
return signer.sign(request);
|
||||||
|
}
|
||||||
@@ -0,0 +1,430 @@
|
|||||||
|
import { Request } from "express";
|
||||||
|
import { z } from "zod";
|
||||||
|
import { config } from "../../../../config";
|
||||||
|
import { isTextGenerationRequest, isImageGenerationRequest } from "../../common";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
import { APIFormat } from "../../../../shared/key-management";
|
||||||
|
|
||||||
|
const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
|
||||||
|
const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
|
||||||
|
|
||||||
|
// TODO: move schemas to shared
|
||||||
|
|
||||||
|
// https://console.anthropic.com/docs/api/reference#-v1-complete
|
||||||
|
export const AnthropicV1CompleteSchema = z.object({
|
||||||
|
model: z.string(),
|
||||||
|
prompt: z.string({
|
||||||
|
required_error:
|
||||||
|
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
|
||||||
|
}),
|
||||||
|
max_tokens_to_sample: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
|
||||||
|
stop_sequences: z.array(z.string()).optional(),
|
||||||
|
stream: z.boolean().optional().default(false),
|
||||||
|
temperature: z.coerce.number().optional().default(1),
|
||||||
|
top_k: z.coerce.number().optional(),
|
||||||
|
top_p: z.coerce.number().optional(),
|
||||||
|
metadata: z.any().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// https://platform.openai.com/docs/api-reference/chat/create
|
||||||
|
const OpenAIV1ChatContentArraySchema = z.array(
|
||||||
|
z.union([
|
||||||
|
z.object({ type: z.literal("text"), text: z.string() }),
|
||||||
|
z.object({
|
||||||
|
type: z.literal("image_url"),
|
||||||
|
image_url: z.object({
|
||||||
|
url: z.string().url(),
|
||||||
|
detail: z.enum(["low", "auto", "high"]).optional().default("auto"),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
export const OpenAIV1ChatCompletionSchema = z.object({
|
||||||
|
model: z.string(),
|
||||||
|
messages: z.array(
|
||||||
|
z.object({
|
||||||
|
role: z.enum(["system", "user", "assistant"]),
|
||||||
|
content: z.union([z.string(), OpenAIV1ChatContentArraySchema]),
|
||||||
|
name: z.string().optional(),
|
||||||
|
}),
|
||||||
|
{
|
||||||
|
required_error:
|
||||||
|
"No `messages` found. Ensure you've set the correct completion endpoint.",
|
||||||
|
invalid_type_error:
|
||||||
|
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
temperature: z.number().optional().default(1),
|
||||||
|
top_p: z.number().optional().default(1),
|
||||||
|
n: z
|
||||||
|
.literal(1, {
|
||||||
|
errorMap: () => ({
|
||||||
|
message: "You may only request a single completion at a time.",
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
.optional(),
|
||||||
|
stream: z.boolean().optional().default(false),
|
||||||
|
stop: z.union([z.string(), z.array(z.string())]).optional(),
|
||||||
|
max_tokens: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.nullish()
|
||||||
|
.default(16)
|
||||||
|
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
|
||||||
|
frequency_penalty: z.number().optional().default(0),
|
||||||
|
presence_penalty: z.number().optional().default(0),
|
||||||
|
logit_bias: z.any().optional(),
|
||||||
|
user: z.string().optional(),
|
||||||
|
seed: z.number().int().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export type OpenAIChatMessage = z.infer<
|
||||||
|
typeof OpenAIV1ChatCompletionSchema
|
||||||
|
>["messages"][0];
|
||||||
|
|
||||||
|
const OpenAIV1TextCompletionSchema = z
|
||||||
|
.object({
|
||||||
|
model: z
|
||||||
|
.string()
|
||||||
|
.regex(
|
||||||
|
/^gpt-3.5-turbo-instruct/,
|
||||||
|
"Model must start with 'gpt-3.5-turbo-instruct'"
|
||||||
|
),
|
||||||
|
prompt: z.string({
|
||||||
|
required_error:
|
||||||
|
"No `prompt` found. Ensure you've set the correct completion endpoint.",
|
||||||
|
}),
|
||||||
|
logprobs: z.number().int().nullish().default(null),
|
||||||
|
echo: z.boolean().optional().default(false),
|
||||||
|
best_of: z.literal(1).optional(),
|
||||||
|
stop: z.union([z.string(), z.array(z.string()).max(4)]).optional(),
|
||||||
|
suffix: z.string().optional(),
|
||||||
|
})
|
||||||
|
.merge(OpenAIV1ChatCompletionSchema.omit({ messages: true }));
|
||||||
|
|
||||||
|
// https://platform.openai.com/docs/api-reference/images/create
|
||||||
|
const OpenAIV1ImagesGenerationSchema = z.object({
|
||||||
|
prompt: z.string().max(4000),
|
||||||
|
model: z.string().optional(),
|
||||||
|
quality: z.enum(["standard", "hd"]).optional().default("standard"),
|
||||||
|
n: z.number().int().min(1).max(4).optional().default(1),
|
||||||
|
response_format: z.enum(["url", "b64_json"]).optional(),
|
||||||
|
size: z
|
||||||
|
.enum(["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"])
|
||||||
|
.optional()
|
||||||
|
.default("1024x1024"),
|
||||||
|
style: z.enum(["vivid", "natural"]).optional().default("vivid"),
|
||||||
|
user: z.string().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateText
|
||||||
|
const PalmV1GenerateTextSchema = z.object({
|
||||||
|
model: z.string(),
|
||||||
|
prompt: z.object({ text: z.string() }),
|
||||||
|
temperature: z.number().optional(),
|
||||||
|
maxOutputTokens: z.coerce
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.optional()
|
||||||
|
.default(16)
|
||||||
|
.transform((v) => Math.min(v, 1024)), // TODO: Add config
|
||||||
|
candidateCount: z.literal(1).optional(),
|
||||||
|
topP: z.number().optional(),
|
||||||
|
topK: z.number().optional(),
|
||||||
|
safetySettings: z.array(z.object({})).max(0).optional(),
|
||||||
|
stopSequences: z.array(z.string()).max(5).optional(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
|
||||||
|
anthropic: AnthropicV1CompleteSchema,
|
||||||
|
openai: OpenAIV1ChatCompletionSchema,
|
||||||
|
"openai-text": OpenAIV1TextCompletionSchema,
|
||||||
|
"openai-image": OpenAIV1ImagesGenerationSchema,
|
||||||
|
"google-palm": PalmV1GenerateTextSchema,
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Transforms an incoming request body to one that matches the target API. */
|
||||||
|
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
||||||
|
const sameService = req.inboundApi === req.outboundApi;
|
||||||
|
const alreadyTransformed = req.retryCount > 0;
|
||||||
|
const notTransformable =
|
||||||
|
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
|
||||||
|
|
||||||
|
if (alreadyTransformed || notTransformable) return;
|
||||||
|
|
||||||
|
if (sameService) {
|
||||||
|
const result = VALIDATORS[req.inboundApi].safeParse(req.body);
|
||||||
|
if (!result.success) {
|
||||||
|
req.log.error(
|
||||||
|
{ issues: result.error.issues, body: req.body },
|
||||||
|
"Request validation failed"
|
||||||
|
);
|
||||||
|
throw result.error;
|
||||||
|
}
|
||||||
|
req.body = result.data;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
|
||||||
|
req.body = openaiToAnthropic(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai" && req.outboundApi === "google-palm") {
|
||||||
|
req.body = openaiToPalm(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai" && req.outboundApi === "openai-text") {
|
||||||
|
req.body = openaiToOpenaiText(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai" && req.outboundApi === "openai-image") {
|
||||||
|
req.body = openaiToOpenaiImage(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
function openaiToAnthropic(req: Request) {
|
||||||
|
const { body } = req;
|
||||||
|
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
|
||||||
|
if (!result.success) {
|
||||||
|
req.log.warn(
|
||||||
|
{ issues: result.error.issues, body },
|
||||||
|
"Invalid OpenAI-to-Anthropic request"
|
||||||
|
);
|
||||||
|
throw result.error;
|
||||||
|
}
|
||||||
|
|
||||||
|
req.headers["anthropic-version"] = "2023-06-01";
|
||||||
|
|
||||||
|
const { messages, ...rest } = result.data;
|
||||||
|
const prompt = openAIMessagesToClaudePrompt(messages);
|
||||||
|
|
||||||
|
let stops = rest.stop
|
||||||
|
? Array.isArray(rest.stop)
|
||||||
|
? rest.stop
|
||||||
|
: [rest.stop]
|
||||||
|
: [];
|
||||||
|
// Recommended by Anthropic
|
||||||
|
stops.push("\n\nHuman:");
|
||||||
|
// Helps with jailbreak prompts that send fake system messages and multi-bot
|
||||||
|
// chats that prefix bot messages with "System: Respond as <bot name>".
|
||||||
|
stops.push("\n\nSystem:");
|
||||||
|
// Remove duplicates
|
||||||
|
stops = [...new Set(stops)];
|
||||||
|
|
||||||
|
return {
|
||||||
|
// Model may be overridden in `calculate-context-size.ts` to avoid having
|
||||||
|
// a circular dependency (`calculate-context-size.ts` needs an already-
|
||||||
|
// transformed request body to count tokens, but this function would like
|
||||||
|
// to know the count to select a model).
|
||||||
|
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
|
||||||
|
prompt: prompt,
|
||||||
|
max_tokens_to_sample: rest.max_tokens,
|
||||||
|
stop_sequences: stops,
|
||||||
|
stream: rest.stream,
|
||||||
|
temperature: rest.temperature,
|
||||||
|
top_p: rest.top_p,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function openaiToOpenaiText(req: Request) {
|
||||||
|
const { body } = req;
|
||||||
|
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
|
||||||
|
if (!result.success) {
|
||||||
|
req.log.warn(
|
||||||
|
{ issues: result.error.issues, body },
|
||||||
|
"Invalid OpenAI-to-OpenAI-text request"
|
||||||
|
);
|
||||||
|
throw result.error;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { messages, ...rest } = result.data;
|
||||||
|
const prompt = flattenOpenAIChatMessages(messages);
|
||||||
|
|
||||||
|
let stops = rest.stop
|
||||||
|
? Array.isArray(rest.stop)
|
||||||
|
? rest.stop
|
||||||
|
: [rest.stop]
|
||||||
|
: [];
|
||||||
|
stops.push("\n\nUser:");
|
||||||
|
stops = [...new Set(stops)];
|
||||||
|
|
||||||
|
const transformed = { ...rest, prompt: prompt, stop: stops };
|
||||||
|
return OpenAIV1TextCompletionSchema.parse(transformed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Takes the last chat message and uses it verbatim as the image prompt.
|
||||||
|
function openaiToOpenaiImage(req: Request) {
|
||||||
|
const { body } = req;
|
||||||
|
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
|
||||||
|
if (!result.success) {
|
||||||
|
req.log.warn(
|
||||||
|
{ issues: result.error.issues, body },
|
||||||
|
"Invalid OpenAI-to-OpenAI-image request"
|
||||||
|
);
|
||||||
|
throw result.error;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { messages } = result.data;
|
||||||
|
const prompt = messages.filter((m) => m.role === "user").pop()?.content;
|
||||||
|
if (Array.isArray(prompt)) {
|
||||||
|
throw new Error("Image generation prompt must be a text message.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (body.stream) {
|
||||||
|
throw new Error(
|
||||||
|
"Streaming is not supported for image generation requests."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some frontends do weird things with the prompt, like prefixing it with a
|
||||||
|
// character name or wrapping the entire thing in quotes. We will look for
|
||||||
|
// the index of "Image:" and use everything after that as the prompt.
|
||||||
|
|
||||||
|
const index = prompt?.toLowerCase().indexOf("image:");
|
||||||
|
if (index === -1 || !prompt) {
|
||||||
|
throw new Error(
|
||||||
|
`Start your prompt with 'Image:' followed by a description of the image you want to generate (received: ${prompt}).`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Add some way to specify parameters via chat message
|
||||||
|
const transformed = {
|
||||||
|
model: body.model.includes("dall-e") ? body.model : "dall-e-3",
|
||||||
|
quality: "standard",
|
||||||
|
size: "1024x1024",
|
||||||
|
response_format: "url",
|
||||||
|
prompt: prompt.slice(index! + 6).trim(),
|
||||||
|
};
|
||||||
|
return OpenAIV1ImagesGenerationSchema.parse(transformed);
|
||||||
|
}
|
||||||
|
|
||||||
|
function openaiToPalm(req: Request): z.infer<typeof PalmV1GenerateTextSchema> {
|
||||||
|
const { body } = req;
|
||||||
|
const result = OpenAIV1ChatCompletionSchema.safeParse({
|
||||||
|
...body,
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
});
|
||||||
|
if (!result.success) {
|
||||||
|
req.log.warn(
|
||||||
|
{ issues: result.error.issues, body },
|
||||||
|
"Invalid OpenAI-to-Palm request"
|
||||||
|
);
|
||||||
|
throw result.error;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { messages, ...rest } = result.data;
|
||||||
|
const prompt = flattenOpenAIChatMessages(messages);
|
||||||
|
|
||||||
|
let stops = rest.stop
|
||||||
|
? Array.isArray(rest.stop)
|
||||||
|
? rest.stop
|
||||||
|
: [rest.stop]
|
||||||
|
: [];
|
||||||
|
|
||||||
|
stops.push("\n\nUser:");
|
||||||
|
stops = [...new Set(stops)];
|
||||||
|
|
||||||
|
z.array(z.string()).max(5).parse(stops);
|
||||||
|
|
||||||
|
return {
|
||||||
|
prompt: { text: prompt },
|
||||||
|
maxOutputTokens: rest.max_tokens,
|
||||||
|
stopSequences: stops,
|
||||||
|
model: "text-bison-001",
|
||||||
|
topP: rest.top_p,
|
||||||
|
temperature: rest.temperature,
|
||||||
|
safetySettings: [
|
||||||
|
{ category: "HARM_CATEGORY_UNSPECIFIED", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_DEROGATORY", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_TOXICITY", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_VIOLENCE", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_SEXUAL", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_MEDICAL", threshold: "BLOCK_NONE" },
|
||||||
|
{ category: "HARM_CATEGORY_DANGEROUS", threshold: "BLOCK_NONE" },
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export function openAIMessagesToClaudePrompt(messages: OpenAIChatMessage[]) {
|
||||||
|
return (
|
||||||
|
messages
|
||||||
|
.map((m) => {
|
||||||
|
let role: string = m.role;
|
||||||
|
if (role === "assistant") {
|
||||||
|
role = "Assistant";
|
||||||
|
} else if (role === "system") {
|
||||||
|
role = "System";
|
||||||
|
} else if (role === "user") {
|
||||||
|
role = "Human";
|
||||||
|
}
|
||||||
|
const name = m.name?.trim();
|
||||||
|
const content = flattenOpenAIMessageContent(m.content);
|
||||||
|
// https://console.anthropic.com/docs/prompt-design
|
||||||
|
// `name` isn't supported by Anthropic but we can still try to use it.
|
||||||
|
return `\n\n${role}: ${name ? `(as ${name}) ` : ""}${content}`;
|
||||||
|
})
|
||||||
|
.join("") + "\n\nAssistant:"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function flattenOpenAIChatMessages(messages: OpenAIChatMessage[]) {
|
||||||
|
// Temporary to allow experimenting with prompt strategies
|
||||||
|
const PROMPT_VERSION: number = 1;
|
||||||
|
switch (PROMPT_VERSION) {
|
||||||
|
case 1:
|
||||||
|
return (
|
||||||
|
messages
|
||||||
|
.map((m) => {
|
||||||
|
// Claude-style human/assistant turns
|
||||||
|
let role: string = m.role;
|
||||||
|
if (role === "assistant") {
|
||||||
|
role = "Assistant";
|
||||||
|
} else if (role === "system") {
|
||||||
|
role = "System";
|
||||||
|
} else if (role === "user") {
|
||||||
|
role = "User";
|
||||||
|
}
|
||||||
|
return `\n\n${role}: ${flattenOpenAIMessageContent(m.content)}`;
|
||||||
|
})
|
||||||
|
.join("") + "\n\nAssistant:"
|
||||||
|
);
|
||||||
|
case 2:
|
||||||
|
return messages
|
||||||
|
.map((m) => {
|
||||||
|
// Claude without prefixes (except system) and no Assistant priming
|
||||||
|
let role: string = "";
|
||||||
|
if (role === "system") {
|
||||||
|
role = "System: ";
|
||||||
|
}
|
||||||
|
return `\n\n${role}${flattenOpenAIMessageContent(m.content)}`;
|
||||||
|
})
|
||||||
|
.join("");
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown prompt version: ${PROMPT_VERSION}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function flattenOpenAIMessageContent(
|
||||||
|
content: OpenAIChatMessage["content"]
|
||||||
|
): string {
|
||||||
|
return Array.isArray(content)
|
||||||
|
? content
|
||||||
|
.map((contentItem) => {
|
||||||
|
if ("text" in contentItem) return contentItem.text;
|
||||||
|
if ("image_url" in contentItem) return "[ Uploaded Image Omitted ]";
|
||||||
|
})
|
||||||
|
.join("\n")
|
||||||
|
: content;
|
||||||
|
}
|
||||||
@@ -0,0 +1,105 @@
|
|||||||
|
import { Request } from "express";
|
||||||
|
import { z } from "zod";
|
||||||
|
import { config } from "../../../../config";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import { RequestPreprocessor } from "../index";
|
||||||
|
|
||||||
|
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
|
||||||
|
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
|
||||||
|
const BISON_MAX_CONTEXT = 8100;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
|
||||||
|
* and outbound API format, which combined determine the size of the context.
|
||||||
|
* If the context is too large, an error is thrown.
|
||||||
|
* This preprocessor should run after any preprocessor that transforms the
|
||||||
|
* request body.
|
||||||
|
*/
|
||||||
|
export const validateContextSize: RequestPreprocessor = async (req) => {
|
||||||
|
assertRequestHasTokenCounts(req);
|
||||||
|
const promptTokens = req.promptTokens;
|
||||||
|
const outputTokens = req.outputTokens;
|
||||||
|
const contextTokens = promptTokens + outputTokens;
|
||||||
|
const model = req.body.model;
|
||||||
|
|
||||||
|
let proxyMax: number;
|
||||||
|
switch (req.outboundApi) {
|
||||||
|
case "openai":
|
||||||
|
case "openai-text":
|
||||||
|
proxyMax = OPENAI_MAX_CONTEXT;
|
||||||
|
break;
|
||||||
|
case "anthropic":
|
||||||
|
proxyMax = CLAUDE_MAX_CONTEXT;
|
||||||
|
break;
|
||||||
|
case "google-palm":
|
||||||
|
proxyMax = BISON_MAX_CONTEXT;
|
||||||
|
break;
|
||||||
|
case "openai-image":
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
assertNever(req.outboundApi);
|
||||||
|
}
|
||||||
|
proxyMax ||= Number.MAX_SAFE_INTEGER;
|
||||||
|
|
||||||
|
let modelMax: number;
|
||||||
|
if (model.match(/gpt-3.5-turbo-16k/)) {
|
||||||
|
modelMax = 16384;
|
||||||
|
} else if (model.match(/gpt-4-1106(-preview)?/)) {
|
||||||
|
modelMax = 131072;
|
||||||
|
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
|
||||||
|
modelMax = 131072;
|
||||||
|
} else if (model.match(/gpt-3.5-turbo/)) {
|
||||||
|
modelMax = 4096;
|
||||||
|
} else if (model.match(/gpt-4-32k/)) {
|
||||||
|
modelMax = 32768;
|
||||||
|
} else if (model.match(/gpt-4/)) {
|
||||||
|
modelMax = 8192;
|
||||||
|
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?-100k/)) {
|
||||||
|
modelMax = 100000;
|
||||||
|
} else if (model.match(/^claude-(?:instant-)?v1(?:\.\d)?$/)) {
|
||||||
|
modelMax = 9000;
|
||||||
|
} else if (model.match(/^claude-2\.0/)) {
|
||||||
|
modelMax = 100000;
|
||||||
|
} else if (model.match(/^claude-2/)) {
|
||||||
|
modelMax = 200000;
|
||||||
|
} else if (model.match(/^text-bison-\d{3}$/)) {
|
||||||
|
modelMax = BISON_MAX_CONTEXT;
|
||||||
|
} else if (model.match(/^anthropic\.claude/)) {
|
||||||
|
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
|
||||||
|
modelMax = 100000;
|
||||||
|
} else {
|
||||||
|
req.log.warn({ model }, "Unknown model, using 200k token limit.");
|
||||||
|
modelMax = 200000;
|
||||||
|
}
|
||||||
|
|
||||||
|
const finalMax = Math.min(proxyMax, modelMax);
|
||||||
|
z.object({
|
||||||
|
tokens: z
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.max(finalMax, {
|
||||||
|
message: `Your request exceeds the context size limit. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
|
||||||
|
}),
|
||||||
|
}).parse({ tokens: contextTokens });
|
||||||
|
|
||||||
|
req.log.debug(
|
||||||
|
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
|
||||||
|
"Prompt size validated"
|
||||||
|
);
|
||||||
|
|
||||||
|
req.tokenizerInfo.prompt_tokens = promptTokens;
|
||||||
|
req.tokenizerInfo.completion_tokens = outputTokens;
|
||||||
|
req.tokenizerInfo.max_model_tokens = modelMax;
|
||||||
|
req.tokenizerInfo.max_proxy_tokens = proxyMax;
|
||||||
|
};
|
||||||
|
|
||||||
|
function assertRequestHasTokenCounts(
|
||||||
|
req: Request
|
||||||
|
): asserts req is Request & { promptTokens: number; outputTokens: number } {
|
||||||
|
z.object({
|
||||||
|
promptTokens: z.number().int().min(1),
|
||||||
|
outputTokens: z.number().int().min(1),
|
||||||
|
})
|
||||||
|
.nonstrict()
|
||||||
|
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
|
||||||
|
}
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
import { Request } from "express";
|
|
||||||
import { AIService } from "../../../key-management";
|
|
||||||
import { RequestPreprocessor } from ".";
|
|
||||||
|
|
||||||
export const setApiFormat = (api: {
|
|
||||||
inApi: Request["inboundApi"];
|
|
||||||
outApi: AIService;
|
|
||||||
}): RequestPreprocessor => {
|
|
||||||
return (req) => {
|
|
||||||
req.inboundApi = api.inApi;
|
|
||||||
req.outboundApi = api.outApi;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
/**
|
|
||||||
* Transforms a KoboldAI payload into an OpenAI payload.
|
|
||||||
* @deprecated Kobold input format isn't supported anymore as all popular
|
|
||||||
* frontends support reverse proxies or changing their base URL. It adds too
|
|
||||||
* many edge cases to be worth maintaining and doesn't work with newer features.
|
|
||||||
*/
|
|
||||||
import { logger } from "../../../logger";
|
|
||||||
import type { ProxyRequestMiddleware } from ".";
|
|
||||||
|
|
||||||
// Kobold requests look like this:
|
|
||||||
// body:
|
|
||||||
// {
|
|
||||||
// prompt: "Aqua is character from Konosuba anime. Aqua is a goddess, before life in the Fantasy World, she was a goddess of water who guided humans to the afterlife. Aqua looks like young woman with beauty no human could match. Aqua has light blue hair, blue eyes, slim figure, long legs, wide hips, blue waist-long hair that is partially tied into a loop with a spherical clip. Aqua's measurements are 83-56-83 cm. Aqua's height 157cm. Aqua wears sleeveless dark-blue dress with white trimmings, extremely short dark blue miniskirt, green bow around her chest with a blue gem in the middle, detached white sleeves with blue and golden trimmings, thigh-high blue heeled boots over white stockings with blue trimmings. Aqua is very strong in water magic, but a little stupid, so she does not always use it to the place. Aqua is high-spirited, cheerful, carefree. Aqua rarely thinks about the consequences of her actions and always acts or speaks on her whims. Because very easy to taunt Aqua with jeers or lure her with praises.\n" +
|
|
||||||
// "Aqua's personality: high-spirited, likes to party, carefree, cheerful.\n" +
|
|
||||||
// 'Circumstances and context of the dialogue: Aqua is standing in the city square and is looking for new followers\n' +
|
|
||||||
// 'This is how Aqua should talk\n' +
|
|
||||||
// 'You: Hi Aqua, I heard you like to spend time in the pub.\n' +
|
|
||||||
// "Aqua: *excitedly* Oh my goodness, yes! I just love spending time at the pub! It's so much fun to talk to all the adventurers and hear about their exciting adventures! And you are?\n" +
|
|
||||||
// "You: I'm a new here and I wanted to ask for your advice.\n" +
|
|
||||||
// 'Aqua: *giggles* Oh, advice! I love giving advice! And in gratitude for that, treat me to a drink! *gives signals to the bartender*\n' +
|
|
||||||
// 'This is how Aqua should talk\n' +
|
|
||||||
// 'You: Hello\n' +
|
|
||||||
// "Aqua: *excitedly* Hello there, dear! Are you new to Axel? Don't worry, I, Aqua the goddess of water, am here to help you! Do you need any assistance? And may I say, I look simply radiant today! *strikes a pose and looks at you with puppy eyes*\n" +
|
|
||||||
// '\n' +
|
|
||||||
// 'Then the roleplay chat between You and Aqua begins.\n' +
|
|
||||||
// "Aqua: *She is in the town square of a city named Axel. It's morning on a Saturday and she suddenly notices a person who looks like they don't know what they're doing. She approaches him and speaks* \n" +
|
|
||||||
// '\n' +
|
|
||||||
// `"Are you new here? Do you need help? Don't worry! I, Aqua the Goddess of Water, shall help you! Do I look beautiful?" \n` +
|
|
||||||
// '\n' +
|
|
||||||
// '*She strikes a pose and looks at him with puppy eyes.*\n' +
|
|
||||||
// 'You: test\n' +
|
|
||||||
// 'You: test\n' +
|
|
||||||
// 'You: t\n' +
|
|
||||||
// 'You: test\n',
|
|
||||||
// use_story: false,
|
|
||||||
// use_memory: false,
|
|
||||||
// use_authors_note: false,
|
|
||||||
// use_world_info: false,
|
|
||||||
// max_context_length: 2048,
|
|
||||||
// max_length: 180,
|
|
||||||
// rep_pen: 1.1,
|
|
||||||
// rep_pen_range: 1024,
|
|
||||||
// rep_pen_slope: 0.9,
|
|
||||||
// temperature: 0.65,
|
|
||||||
// tfs: 0.9,
|
|
||||||
// top_a: 0,
|
|
||||||
// top_k: 0,
|
|
||||||
// top_p: 0.9,
|
|
||||||
// typical: 1,
|
|
||||||
// sampler_order: [
|
|
||||||
// 6, 0, 1, 2,
|
|
||||||
// 3, 4, 5
|
|
||||||
// ],
|
|
||||||
// singleline: false
|
|
||||||
// }
|
|
||||||
|
|
||||||
// OpenAI expects this body:
|
|
||||||
// { model: 'gpt-3.5-turbo', temperature: 0.65, top_p: 0.9, max_tokens: 180, messages }
|
|
||||||
// there's also a frequency_penalty but it's not clear how that maps to kobold's
|
|
||||||
// rep_pen.
|
|
||||||
|
|
||||||
// messages is an array of { role: "system" | "assistant" | "user", content: ""}
|
|
||||||
// kobold only sends us the entire prompt. we can try to split the last two
|
|
||||||
// lines into user and assistant messages, but that's not always correct. For
|
|
||||||
// now it will have to do.
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Transforms a KoboldAI payload into an OpenAI payload.
|
|
||||||
* @deprecated Probably doesn't work anymore, idk.
|
|
||||||
**/
|
|
||||||
export const transformKoboldPayload: ProxyRequestMiddleware = (
|
|
||||||
_proxyReq,
|
|
||||||
req
|
|
||||||
) => {
|
|
||||||
if (req.inboundApi !== "kobold") {
|
|
||||||
throw new Error("transformKoboldPayload called for non-kobold request.");
|
|
||||||
}
|
|
||||||
|
|
||||||
const { body } = req;
|
|
||||||
const { prompt, max_length, rep_pen, top_p, temperature } = body;
|
|
||||||
|
|
||||||
if (!max_length) {
|
|
||||||
logger.error("KoboldAI request missing max_length.");
|
|
||||||
throw new Error("You must specify a max_length parameter.");
|
|
||||||
}
|
|
||||||
|
|
||||||
const promptLines = prompt.split("\n");
|
|
||||||
// The very last line is the contentless "Assistant: " hint to the AI.
|
|
||||||
// Tavern just leaves an empty line, Agnai includes the AI's name.
|
|
||||||
const assistantHint = promptLines.pop();
|
|
||||||
// The second-to-last line is the user's prompt, generally.
|
|
||||||
const userPrompt = promptLines.pop();
|
|
||||||
const messages = [
|
|
||||||
{ role: "system", content: promptLines.join("\n") },
|
|
||||||
{ role: "user", content: userPrompt },
|
|
||||||
{ role: "assistant", content: assistantHint },
|
|
||||||
];
|
|
||||||
|
|
||||||
// Kobold doesn't select a model. If the addKey rewriter assigned us a GPT-4
|
|
||||||
// key, use that. Otherwise, use GPT-3.5-turbo.
|
|
||||||
|
|
||||||
const model = req.key!.isGpt4 ? "gpt-4" : "gpt-3.5-turbo";
|
|
||||||
const newBody = {
|
|
||||||
model,
|
|
||||||
temperature,
|
|
||||||
top_p,
|
|
||||||
frequency_penalty: rep_pen, // remove this if model turns schizo
|
|
||||||
max_tokens: max_length,
|
|
||||||
messages,
|
|
||||||
};
|
|
||||||
req.body = newBody;
|
|
||||||
};
|
|
||||||
@@ -1,163 +0,0 @@
|
|||||||
import { Request } from "express";
|
|
||||||
import { z } from "zod";
|
|
||||||
import { isCompletionRequest } from "../common";
|
|
||||||
import { RequestPreprocessor } from ".";
|
|
||||||
// import { countTokens } from "../../../tokenization";
|
|
||||||
|
|
||||||
// https://console.anthropic.com/docs/api/reference#-v1-complete
|
|
||||||
const AnthropicV1CompleteSchema = z.object({
|
|
||||||
model: z.string().regex(/^claude-/, "Model must start with 'claude-'"),
|
|
||||||
prompt: z.string({
|
|
||||||
required_error:
|
|
||||||
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
|
|
||||||
}),
|
|
||||||
max_tokens_to_sample: z.coerce.number(),
|
|
||||||
stop_sequences: z.array(z.string()).optional(),
|
|
||||||
stream: z.boolean().optional().default(false),
|
|
||||||
temperature: z.coerce.number().optional().default(1),
|
|
||||||
top_k: z.coerce.number().optional().default(-1),
|
|
||||||
top_p: z.coerce.number().optional().default(-1),
|
|
||||||
metadata: z.any().optional(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/chat/create
|
|
||||||
const OpenAIV1ChatCompletionSchema = z.object({
|
|
||||||
model: z.string().regex(/^gpt/, "Model must start with 'gpt-'"),
|
|
||||||
messages: z.array(
|
|
||||||
z.object({
|
|
||||||
role: z.enum(["system", "user", "assistant"]),
|
|
||||||
content: z.string(),
|
|
||||||
name: z.string().optional(),
|
|
||||||
}),
|
|
||||||
{
|
|
||||||
required_error:
|
|
||||||
"No prompt found. Are you sending an Anthropic-formatted request to the OpenAI endpoint?",
|
|
||||||
}
|
|
||||||
),
|
|
||||||
temperature: z.number().optional().default(1),
|
|
||||||
top_p: z.number().optional().default(1),
|
|
||||||
n: z
|
|
||||||
.literal(1, {
|
|
||||||
errorMap: () => ({
|
|
||||||
message: "You may only request a single completion at a time.",
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
.optional(),
|
|
||||||
stream: z.boolean().optional().default(false),
|
|
||||||
stop: z.union([z.string(), z.array(z.string())]).optional(),
|
|
||||||
max_tokens: z.coerce.number().optional(),
|
|
||||||
frequency_penalty: z.number().optional().default(0),
|
|
||||||
presence_penalty: z.number().optional().default(0),
|
|
||||||
logit_bias: z.any().optional(),
|
|
||||||
user: z.string().optional(),
|
|
||||||
});
|
|
||||||
|
|
||||||
/** Transforms an incoming request body to one that matches the target API. */
|
|
||||||
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
|
|
||||||
const sameService = req.inboundApi === req.outboundApi;
|
|
||||||
const alreadyTransformed = req.retryCount > 0;
|
|
||||||
const notTransformable = !isCompletionRequest(req);
|
|
||||||
|
|
||||||
if (alreadyTransformed || notTransformable) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sameService) {
|
|
||||||
// Just validate, don't transform.
|
|
||||||
const validator =
|
|
||||||
req.outboundApi === "openai"
|
|
||||||
? OpenAIV1ChatCompletionSchema
|
|
||||||
: AnthropicV1CompleteSchema;
|
|
||||||
const result = validator.safeParse(req.body);
|
|
||||||
if (!result.success) {
|
|
||||||
req.log.error(
|
|
||||||
{ issues: result.error.issues, body: req.body },
|
|
||||||
"Request validation failed"
|
|
||||||
);
|
|
||||||
throw result.error;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
|
|
||||||
req.body = openaiToAnthropic(req.body, req);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new Error(
|
|
||||||
`'${req.inboundApi}' -> '${req.outboundApi}' request proxying is not supported. Make sure your client is configured to use the correct API.`
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
function openaiToAnthropic(body: any, req: Request) {
|
|
||||||
const result = OpenAIV1ChatCompletionSchema.safeParse(body);
|
|
||||||
if (!result.success) {
|
|
||||||
req.log.error(
|
|
||||||
{ issues: result.error.issues, body: req.body },
|
|
||||||
"Invalid OpenAI-to-Anthropic request"
|
|
||||||
);
|
|
||||||
throw result.error;
|
|
||||||
}
|
|
||||||
|
|
||||||
const { messages, ...rest } = result.data;
|
|
||||||
const prompt =
|
|
||||||
result.data.messages
|
|
||||||
.map((m) => {
|
|
||||||
let role: string = m.role;
|
|
||||||
if (role === "assistant") {
|
|
||||||
role = "Assistant";
|
|
||||||
} else if (role === "system") {
|
|
||||||
role = "System";
|
|
||||||
} else if (role === "user") {
|
|
||||||
role = "Human";
|
|
||||||
}
|
|
||||||
// https://console.anthropic.com/docs/prompt-design
|
|
||||||
// `name` isn't supported by Anthropic but we can still try to use it.
|
|
||||||
return `\n\n${role}: ${m.name?.trim() ? `(as ${m.name}) ` : ""}${
|
|
||||||
m.content
|
|
||||||
}`;
|
|
||||||
})
|
|
||||||
.join("") + "\n\nAssistant: ";
|
|
||||||
|
|
||||||
// Claude 1.2 has been selected as the default for smaller prompts because it
|
|
||||||
// is said to be less pozzed than the newer 1.3 model. But this is not based
|
|
||||||
// on any empirical testing, just speculation based on Anthropic stating that
|
|
||||||
// 1.3 is "safer and less susceptible to adversarial attacks" than 1.2.
|
|
||||||
// From my own interactions, both are pretty easy to jailbreak so I don't
|
|
||||||
// think there's much of a difference, honestly.
|
|
||||||
|
|
||||||
// If you want to override the model selection, you can set the
|
|
||||||
// CLAUDE_BIG_MODEL and CLAUDE_SMALL_MODEL environment variables in your
|
|
||||||
// .env file.
|
|
||||||
|
|
||||||
// Using "v1" of a model will automatically select the latest version of that
|
|
||||||
// model on the Anthropic side.
|
|
||||||
|
|
||||||
const CLAUDE_BIG = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
|
|
||||||
const CLAUDE_SMALL = process.env.CLAUDE_SMALL_MODEL || "claude-v1.2";
|
|
||||||
|
|
||||||
// TODO: Finish implementing tokenizer for more accurate model selection.
|
|
||||||
// This currently uses _character count_, not token count.
|
|
||||||
const model = prompt.length > 25000 ? CLAUDE_BIG : CLAUDE_SMALL;
|
|
||||||
|
|
||||||
let stops = rest.stop
|
|
||||||
? Array.isArray(rest.stop)
|
|
||||||
? rest.stop
|
|
||||||
: [rest.stop]
|
|
||||||
: [];
|
|
||||||
// Recommended by Anthropic
|
|
||||||
stops.push("\n\nHuman:");
|
|
||||||
// Helps with jailbreak prompts that send fake system messages and multi-bot
|
|
||||||
// chats that prefix bot messages with "System: Respond as <bot name>".
|
|
||||||
stops.push("\n\nSystem:");
|
|
||||||
// Remove duplicates
|
|
||||||
stops = [...new Set(stops)];
|
|
||||||
|
|
||||||
return {
|
|
||||||
...rest,
|
|
||||||
model,
|
|
||||||
prompt: prompt,
|
|
||||||
max_tokens_to_sample: rest.max_tokens,
|
|
||||||
stop_sequences: stops,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,29 +1,19 @@
|
|||||||
import { Request, Response } from "express";
|
import express from "express";
|
||||||
import * as http from "http";
|
import { pipeline } from "stream";
|
||||||
import { buildFakeSseMessage } from "../common";
|
import { promisify } from "util";
|
||||||
import { RawResponseBodyHandler, decodeResponseBody } from ".";
|
import {
|
||||||
|
buildFakeSse,
|
||||||
|
copySseResponseHeaders,
|
||||||
|
initializeSseStream,
|
||||||
|
} from "../../../shared/streaming";
|
||||||
|
import { enqueue } from "../../queue";
|
||||||
|
import { decodeResponseBody, RawResponseBodyHandler, RetryableError } from ".";
|
||||||
|
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
|
||||||
|
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
|
||||||
|
import { EventAggregator } from "./streaming/event-aggregator";
|
||||||
|
import { keyPool } from "../../../shared/key-management";
|
||||||
|
|
||||||
type OpenAiChatCompletionResponse = {
|
const pipelineAsync = promisify(pipeline);
|
||||||
id: string;
|
|
||||||
object: string;
|
|
||||||
created: number;
|
|
||||||
model: string;
|
|
||||||
choices: {
|
|
||||||
message: { role: string; content: string };
|
|
||||||
finish_reason: string | null;
|
|
||||||
index: number;
|
|
||||||
}[];
|
|
||||||
};
|
|
||||||
|
|
||||||
type AnthropicCompletionResponse = {
|
|
||||||
completion: string;
|
|
||||||
stop_reason: string;
|
|
||||||
truncated: boolean;
|
|
||||||
stop: any;
|
|
||||||
model: string;
|
|
||||||
log_id: string;
|
|
||||||
exception: null;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Consume the SSE stream and forward events to the client. Once the stream is
|
* Consume the SSE stream and forward events to the client. Once the stream is
|
||||||
@@ -34,260 +24,77 @@ type AnthropicCompletionResponse = {
|
|||||||
* in the event a streamed request results in a non-200 response, we need to
|
* in the event a streamed request results in a non-200 response, we need to
|
||||||
* fall back to the non-streaming response handler so that the error handler
|
* fall back to the non-streaming response handler so that the error handler
|
||||||
* can inspect the error response.
|
* can inspect the error response.
|
||||||
*
|
|
||||||
* Currently most frontends don't support Anthropic streaming, so users can opt
|
|
||||||
* to send requests for Claude models via an endpoint that accepts OpenAI-
|
|
||||||
* compatible requests and translates the received Anthropic SSE events into
|
|
||||||
* OpenAI ones, essentially pretending to be an OpenAI streaming API.
|
|
||||||
*/
|
*/
|
||||||
export const handleStreamedResponse: RawResponseBodyHandler = async (
|
export const handleStreamedResponse: RawResponseBodyHandler = async (
|
||||||
proxyRes,
|
proxyRes,
|
||||||
req,
|
req,
|
||||||
res
|
res
|
||||||
) => {
|
) => {
|
||||||
// If these differ, the user is using the OpenAI-compatibile endpoint, so
|
const { hash } = req.key!;
|
||||||
// we need to translate the SSE events into OpenAI completion events for their
|
|
||||||
// frontend.
|
|
||||||
if (!req.isStreaming) {
|
if (!req.isStreaming) {
|
||||||
const err = new Error(
|
throw new Error("handleStreamedResponse called for non-streaming request.");
|
||||||
"handleStreamedResponse called for non-streaming request."
|
|
||||||
);
|
|
||||||
req.log.error({ stack: err.stack, api: req.inboundApi }, err.message);
|
|
||||||
throw err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const key = req.key!;
|
if (proxyRes.statusCode! > 201) {
|
||||||
if (proxyRes.statusCode !== 200) {
|
|
||||||
// Ensure we use the non-streaming middleware stack since we won't be
|
|
||||||
// getting any events.
|
|
||||||
req.isStreaming = false;
|
req.isStreaming = false;
|
||||||
req.log.warn(
|
req.log.warn(
|
||||||
{ statusCode: proxyRes.statusCode, key: key.hash },
|
{ statusCode: proxyRes.statusCode, key: hash },
|
||||||
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
`Streaming request returned error status code. Falling back to non-streaming response handler.`
|
||||||
);
|
);
|
||||||
return decodeResponseBody(proxyRes, req, res);
|
return decodeResponseBody(proxyRes, req, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
req.log.debug(
|
||||||
req.log.info({ key: key.hash }, `Starting to proxy SSE stream.`);
|
{ headers: proxyRes.headers, key: hash },
|
||||||
|
`Starting to proxy SSE stream.`
|
||||||
|
);
|
||||||
|
|
||||||
// Queued streaming requests will already have a connection open and headers
|
// Users waiting in the queue already have a SSE connection open for the
|
||||||
// sent due to the heartbeat handler. In that case we can just start
|
// heartbeat, so we can't always send the stream headers.
|
||||||
// streaming the response without sending headers.
|
if (!res.headersSent) {
|
||||||
if (!res.headersSent) {
|
copySseResponseHeaders(proxyRes, res);
|
||||||
res.setHeader("Content-Type", "text/event-stream");
|
initializeSseStream(res);
|
||||||
res.setHeader("Cache-Control", "no-cache");
|
}
|
||||||
res.setHeader("Connection", "keep-alive");
|
|
||||||
res.setHeader("X-Accel-Buffering", "no");
|
|
||||||
copyHeaders(proxyRes, res);
|
|
||||||
res.flushHeaders();
|
|
||||||
}
|
|
||||||
|
|
||||||
const originalEvents: string[] = [];
|
const prefersNativeEvents = req.inboundApi === req.outboundApi;
|
||||||
let partialMessage = "";
|
const contentType = proxyRes.headers["content-type"];
|
||||||
let lastPosition = 0;
|
|
||||||
|
|
||||||
type ProxyResHandler<T extends unknown> = (...args: T[]) => void;
|
const adapter = new SSEStreamAdapter({ contentType });
|
||||||
function withErrorHandling<T extends unknown>(fn: ProxyResHandler<T>) {
|
const aggregator = new EventAggregator({ format: req.outboundApi });
|
||||||
return (...args: T[]) => {
|
const transformer = new SSEMessageTransformer({
|
||||||
try {
|
inputFormat: req.outboundApi,
|
||||||
fn(...args);
|
inputApiVersion: String(req.headers["anthropic-version"]),
|
||||||
} catch (error) {
|
logger: req.log,
|
||||||
proxyRes.emit("error", error);
|
requestId: String(req.id),
|
||||||
}
|
requestedModel: req.body.model,
|
||||||
};
|
})
|
||||||
}
|
.on("originalMessage", (msg: string) => {
|
||||||
|
if (prefersNativeEvents) res.write(msg);
|
||||||
proxyRes.on(
|
})
|
||||||
"data",
|
.on("data", (msg) => {
|
||||||
withErrorHandling((chunk: Buffer) => {
|
if (!prefersNativeEvents) res.write(`data: ${JSON.stringify(msg)}\n\n`);
|
||||||
// We may receive multiple (or partial) SSE messages in a single chunk,
|
aggregator.addEvent(msg);
|
||||||
// so we need to buffer and emit seperate stream events for full
|
|
||||||
// messages so we can parse/transform them properly.
|
|
||||||
const str = chunk.toString();
|
|
||||||
|
|
||||||
// Anthropic uses CRLF line endings (out-of-spec btw)
|
|
||||||
const fullMessages = (partialMessage + str).split(/\r?\n\r?\n/);
|
|
||||||
partialMessage = fullMessages.pop() || "";
|
|
||||||
|
|
||||||
for (const message of fullMessages) {
|
|
||||||
proxyRes.emit("full-sse-event", message);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
proxyRes.on(
|
|
||||||
"full-sse-event",
|
|
||||||
withErrorHandling((data) => {
|
|
||||||
originalEvents.push(data);
|
|
||||||
const { event, position } = transformEvent({
|
|
||||||
data,
|
|
||||||
requestApi: req.inboundApi,
|
|
||||||
responseApi: req.outboundApi,
|
|
||||||
lastPosition,
|
|
||||||
});
|
|
||||||
lastPosition = position;
|
|
||||||
res.write(event + "\n\n");
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
proxyRes.on(
|
|
||||||
"end",
|
|
||||||
withErrorHandling(() => {
|
|
||||||
let finalBody = convertEventsToFinalResponse(originalEvents, req);
|
|
||||||
req.log.info({ key: key.hash }, `Finished proxying SSE stream.`);
|
|
||||||
res.end();
|
|
||||||
resolve(finalBody);
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
proxyRes.on("error", (err) => {
|
|
||||||
req.log.error({ error: err, key: key.hash }, `Mid-stream error.`);
|
|
||||||
const fakeErrorEvent = buildFakeSseMessage(
|
|
||||||
"mid-stream-error",
|
|
||||||
err.message,
|
|
||||||
req
|
|
||||||
);
|
|
||||||
res.write(`data: ${JSON.stringify(fakeErrorEvent)}\n\n`);
|
|
||||||
res.write("data: [DONE]\n\n");
|
|
||||||
res.end();
|
|
||||||
reject(err);
|
|
||||||
});
|
});
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
try {
|
||||||
* Transforms SSE events from the given response API into events compatible with
|
await pipelineAsync(proxyRes, adapter, transformer);
|
||||||
* the API requested by the client.
|
req.log.debug({ key: hash }, `Finished proxying SSE stream.`);
|
||||||
*/
|
res.end();
|
||||||
function transformEvent({
|
return aggregator.getFinalResponse();
|
||||||
data,
|
} catch (err) {
|
||||||
requestApi,
|
if (err instanceof RetryableError) {
|
||||||
responseApi,
|
keyPool.markRateLimited(req.key!);
|
||||||
lastPosition,
|
req.log.warn(
|
||||||
}: {
|
{ key: req.key!.hash, retryCount: req.retryCount },
|
||||||
data: string;
|
`Re-enqueueing request due to retryable error during streaming response.`
|
||||||
requestApi: string;
|
);
|
||||||
responseApi: string;
|
req.retryCount++;
|
||||||
lastPosition: number;
|
enqueue(req);
|
||||||
}) {
|
} else {
|
||||||
if (requestApi === responseApi) {
|
const errorEvent = buildFakeSse("stream-error", err.message, req);
|
||||||
return { position: -1, event: data };
|
res.write(`${errorEvent}data: [DONE]\n\n`);
|
||||||
}
|
res.end();
|
||||||
|
|
||||||
if (requestApi === "anthropic" && responseApi === "openai") {
|
|
||||||
throw new Error(`Anthropic -> OpenAI streaming not implemented.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Anthropic sends the full completion so far with each event whereas OpenAI
|
|
||||||
// only sends the delta. To make the SSE events compatible, we remove
|
|
||||||
// everything before `lastPosition` from the completion.
|
|
||||||
if (!data.startsWith("data:")) {
|
|
||||||
return { position: lastPosition, event: data };
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data.startsWith("data: [DONE]")) {
|
|
||||||
return { position: lastPosition, event: data };
|
|
||||||
}
|
|
||||||
|
|
||||||
const event = JSON.parse(data.slice("data: ".length));
|
|
||||||
const newEvent = {
|
|
||||||
id: "ant-" + event.log_id,
|
|
||||||
object: "chat.completion.chunk",
|
|
||||||
created: Date.now(),
|
|
||||||
model: event.model,
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
delta: { content: event.completion?.slice(lastPosition) },
|
|
||||||
finish_reason: event.stop_reason,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
return {
|
|
||||||
position: event.completion.length,
|
|
||||||
event: `data: ${JSON.stringify(newEvent)}`,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Copy headers, excluding ones we're already setting for the SSE response. */
|
|
||||||
function copyHeaders(proxyRes: http.IncomingMessage, res: Response) {
|
|
||||||
const toOmit = [
|
|
||||||
"content-length",
|
|
||||||
"content-encoding",
|
|
||||||
"transfer-encoding",
|
|
||||||
"content-type",
|
|
||||||
"connection",
|
|
||||||
"cache-control",
|
|
||||||
];
|
|
||||||
for (const [key, value] of Object.entries(proxyRes.headers)) {
|
|
||||||
if (!toOmit.includes(key) && value) {
|
|
||||||
res.setHeader(key, value);
|
|
||||||
}
|
}
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Converts the list of incremental SSE events into an object that resembles a
|
|
||||||
* full, non-streamed response from the API so that subsequent middleware can
|
|
||||||
* operate on it as if it were a normal response.
|
|
||||||
* Events are expected to be in the format they were received from the API.
|
|
||||||
*/
|
|
||||||
function convertEventsToFinalResponse(events: string[], req: Request) {
|
|
||||||
if (req.outboundApi === "openai") {
|
|
||||||
let response: OpenAiChatCompletionResponse = {
|
|
||||||
id: "",
|
|
||||||
object: "",
|
|
||||||
created: 0,
|
|
||||||
model: "",
|
|
||||||
choices: [],
|
|
||||||
};
|
|
||||||
response = events.reduce((acc, event, i) => {
|
|
||||||
if (!event.startsWith("data: ")) {
|
|
||||||
return acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (event === "data: [DONE]") {
|
|
||||||
return acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = JSON.parse(event.slice("data: ".length));
|
|
||||||
if (i === 0) {
|
|
||||||
return {
|
|
||||||
id: data.id,
|
|
||||||
object: data.object,
|
|
||||||
created: data.created,
|
|
||||||
model: data.model,
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
message: { role: data.choices[0].delta.role, content: "" },
|
|
||||||
index: 0,
|
|
||||||
finish_reason: null,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data.choices[0].delta.content) {
|
|
||||||
acc.choices[0].message.content += data.choices[0].delta.content;
|
|
||||||
}
|
|
||||||
acc.choices[0].finish_reason = data.choices[0].finish_reason;
|
|
||||||
return acc;
|
|
||||||
}, response);
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
if (req.outboundApi === "anthropic") {
|
|
||||||
/*
|
|
||||||
* Full complete responses from Anthropic are conveniently just the same as
|
|
||||||
* the final SSE event before the "DONE" event, so we can reuse that
|
|
||||||
*/
|
|
||||||
const lastEvent = events[events.length - 2].toString();
|
|
||||||
const data = JSON.parse(lastEvent.slice("data: ".length));
|
|
||||||
const response: AnthropicCompletionResponse = {
|
|
||||||
...data,
|
|
||||||
log_id: req.id,
|
|
||||||
};
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
throw new Error("If you get this, something is fucked");
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,14 +3,26 @@ import { Request, Response } from "express";
|
|||||||
import * as http from "http";
|
import * as http from "http";
|
||||||
import util from "util";
|
import util from "util";
|
||||||
import zlib from "zlib";
|
import zlib from "zlib";
|
||||||
import { config } from "../../../config";
|
|
||||||
import { logger } from "../../../logger";
|
|
||||||
import { keyPool } from "../../../key-management";
|
|
||||||
import { enqueue, trackWaitTime } from "../../queue";
|
import { enqueue, trackWaitTime } from "../../queue";
|
||||||
import { incrementPromptCount } from "../../auth/user-store";
|
import { HttpError } from "../../../shared/errors";
|
||||||
import { isCompletionRequest, writeErrorResponse } from "../common";
|
import { keyPool } from "../../../shared/key-management";
|
||||||
|
import { getOpenAIModelFamily } from "../../../shared/models";
|
||||||
|
import { countTokens } from "../../../shared/tokenization";
|
||||||
|
import {
|
||||||
|
incrementPromptCount,
|
||||||
|
incrementTokenCount,
|
||||||
|
} from "../../../shared/users/user-store";
|
||||||
|
import { assertNever } from "../../../shared/utils";
|
||||||
|
import { refundLastAttempt } from "../../rate-limit";
|
||||||
|
import {
|
||||||
|
getCompletionFromBody,
|
||||||
|
isImageGenerationRequest,
|
||||||
|
isTextGenerationRequest,
|
||||||
|
writeErrorResponse,
|
||||||
|
} from "../common";
|
||||||
import { handleStreamedResponse } from "./handle-streamed-response";
|
import { handleStreamedResponse } from "./handle-streamed-response";
|
||||||
import { logPrompt } from "./log-prompt";
|
import { logPrompt } from "./log-prompt";
|
||||||
|
import { saveImage } from "./save-image";
|
||||||
|
|
||||||
const DECODER_MAP = {
|
const DECODER_MAP = {
|
||||||
gzip: util.promisify(zlib.gunzip),
|
gzip: util.promisify(zlib.gunzip),
|
||||||
@@ -24,7 +36,7 @@ const isSupportedContentEncoding = (
|
|||||||
return contentEncoding in DECODER_MAP;
|
return contentEncoding in DECODER_MAP;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RetryableError extends Error {
|
export class RetryableError extends Error {
|
||||||
constructor(message: string) {
|
constructor(message: string) {
|
||||||
super(message);
|
super(message);
|
||||||
this.name = "RetryableError";
|
this.name = "RetryableError";
|
||||||
@@ -74,7 +86,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
|||||||
? handleStreamedResponse
|
? handleStreamedResponse
|
||||||
: decodeResponseBody;
|
: decodeResponseBody;
|
||||||
|
|
||||||
let lastMiddlewareName = initialHandler.name;
|
let lastMiddleware = initialHandler.name;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const body = await initialHandler(proxyRes, req, res);
|
const body = await initialHandler(proxyRes, req, res);
|
||||||
@@ -84,50 +96,58 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
|
|||||||
if (req.isStreaming) {
|
if (req.isStreaming) {
|
||||||
// `handleStreamedResponse` writes to the response and ends it, so
|
// `handleStreamedResponse` writes to the response and ends it, so
|
||||||
// we can only execute middleware that doesn't write to the response.
|
// we can only execute middleware that doesn't write to the response.
|
||||||
middlewareStack.push(trackRateLimit, incrementKeyUsage, logPrompt);
|
middlewareStack.push(
|
||||||
|
trackRateLimit,
|
||||||
|
countResponseTokens,
|
||||||
|
incrementUsage,
|
||||||
|
logPrompt
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
middlewareStack.push(
|
middlewareStack.push(
|
||||||
trackRateLimit,
|
trackRateLimit,
|
||||||
handleUpstreamErrors,
|
handleUpstreamErrors,
|
||||||
incrementKeyUsage,
|
countResponseTokens,
|
||||||
|
incrementUsage,
|
||||||
copyHttpHeaders,
|
copyHttpHeaders,
|
||||||
|
saveImage,
|
||||||
logPrompt,
|
logPrompt,
|
||||||
...apiMiddleware
|
...apiMiddleware
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const middleware of middlewareStack) {
|
for (const middleware of middlewareStack) {
|
||||||
lastMiddlewareName = middleware.name;
|
lastMiddleware = middleware.name;
|
||||||
await middleware(proxyRes, req, res, body);
|
await middleware(proxyRes, req, res, body);
|
||||||
}
|
}
|
||||||
|
|
||||||
trackWaitTime(req);
|
trackWaitTime(req);
|
||||||
} catch (error: any) {
|
} catch (error) {
|
||||||
// Hack: if the error is a retryable rate-limit error, the request has
|
// Hack: if the error is a retryable rate-limit error, the request has
|
||||||
// been re-enqueued and we can just return without doing anything else.
|
// been re-enqueued and we can just return without doing anything else.
|
||||||
if (error instanceof RetryableError) {
|
if (error instanceof RetryableError) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const errorData = {
|
// Already logged and responded to the client by handleUpstreamErrors
|
||||||
error: error.stack,
|
if (error instanceof HttpError) {
|
||||||
thrownBy: lastMiddlewareName,
|
if (!res.writableEnded) res.end();
|
||||||
key: req.key?.hash,
|
|
||||||
};
|
|
||||||
const message = `Error while executing proxy response middleware: ${lastMiddlewareName} (${error.message})`;
|
|
||||||
if (res.headersSent) {
|
|
||||||
req.log.error(errorData, message);
|
|
||||||
// This should have already been handled by the error handler, but
|
|
||||||
// just in case...
|
|
||||||
if (!res.writableEnded) {
|
|
||||||
res.end();
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
logger.error(errorData, message);
|
|
||||||
res
|
const { stack, message } = error;
|
||||||
.status(500)
|
const info = { stack, lastMiddleware, key: req.key?.hash };
|
||||||
.json({ error: "Internal server error", proxy_note: message });
|
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
|
||||||
|
|
||||||
|
if (res.headersSent) {
|
||||||
|
req.log.error(info, description);
|
||||||
|
if (!res.writableEnded) res.end();
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
req.log.error(info, description);
|
||||||
|
res
|
||||||
|
.status(500)
|
||||||
|
.json({ error: "Internal server error", proxy_note: description });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -158,7 +178,7 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
|
|||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|
||||||
const promise = new Promise<string>((resolve, reject) => {
|
return new Promise<string>((resolve, reject) => {
|
||||||
let chunks: Buffer[] = [];
|
let chunks: Buffer[] = [];
|
||||||
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
proxyRes.on("data", (chunk) => chunks.push(chunk));
|
||||||
proxyRes.on("end", async () => {
|
proxyRes.on("end", async () => {
|
||||||
@@ -171,7 +191,7 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
|
|||||||
body = await decoder(body);
|
body = await decoder(body);
|
||||||
} else {
|
} else {
|
||||||
const errorMessage = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
|
const errorMessage = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
|
||||||
logger.warn({ contentEncoding, key: req.key?.hash }, errorMessage);
|
req.log.warn({ contentEncoding, key: req.key?.hash }, errorMessage);
|
||||||
writeErrorResponse(req, res, 500, {
|
writeErrorResponse(req, res, 500, {
|
||||||
error: errorMessage,
|
error: errorMessage,
|
||||||
contentEncoding,
|
contentEncoding,
|
||||||
@@ -188,23 +208,27 @@ export const decodeResponseBody: RawResponseBodyHandler = async (
|
|||||||
return resolve(body.toString());
|
return resolve(body.toString());
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
const errorMessage = `Proxy received response with invalid JSON: ${error.message}`;
|
const errorMessage = `Proxy received response with invalid JSON: ${error.message}`;
|
||||||
logger.warn({ error, key: req.key?.hash }, errorMessage);
|
req.log.warn({ error: error.stack, key: req.key?.hash }, errorMessage);
|
||||||
writeErrorResponse(req, res, 500, { error: errorMessage });
|
writeErrorResponse(req, res, 500, { error: errorMessage });
|
||||||
return reject(errorMessage);
|
return reject(errorMessage);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
return promise;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: This is too specific to OpenAI's error responses.
|
type ProxiedErrorPayload = {
|
||||||
|
error?: Record<string, any>;
|
||||||
|
message?: string;
|
||||||
|
proxy_note?: string;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handles non-2xx responses from the upstream service. If the proxied response
|
* Handles non-2xx responses from the upstream service. If the proxied response
|
||||||
* is an error, this will respond to the client with an error payload and throw
|
* is an error, this will respond to the client with an error payload and throw
|
||||||
* an error to stop the middleware stack.
|
* an error to stop the middleware stack.
|
||||||
* On 429 errors, if request queueing is enabled, the request will be silently
|
* On 429 errors, if request queueing is enabled, the request will be silently
|
||||||
* re-enqueued. Otherwise, the request will be rejected with an error payload.
|
* re-enqueued. Otherwise, the request will be rejected with an error payload.
|
||||||
* @throws {Error} On HTTP error status code from upstream service
|
* @throws {HttpError} On HTTP error status code from upstream service
|
||||||
*/
|
*/
|
||||||
const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
||||||
proxyRes,
|
proxyRes,
|
||||||
@@ -218,27 +242,19 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let errorPayload: Record<string, any>;
|
let errorPayload: ProxiedErrorPayload;
|
||||||
// Subtract 1 from available keys because if this message is being shown,
|
const tryAgainMessage = keyPool.available(req.body?.model)
|
||||||
// it's because the key is about to be disabled.
|
? `There may be more keys available for this model; try again in a few seconds.`
|
||||||
const availableKeys = keyPool.available(req.outboundApi) - 1;
|
: "There are no more keys available for this model.";
|
||||||
const tryAgainMessage = Boolean(availableKeys)
|
|
||||||
? `There are ${availableKeys} more keys available; try your request again.`
|
|
||||||
: "There are no more keys available.";
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (typeof body === "object") {
|
assertJsonResponse(body);
|
||||||
errorPayload = body;
|
errorPayload = body;
|
||||||
} else {
|
} catch (parseError) {
|
||||||
throw new Error("Received unparsable error response from upstream.");
|
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
|
||||||
}
|
const hash = req.key?.hash;
|
||||||
} catch (parseError: any) {
|
|
||||||
const statusMessage = proxyRes.statusMessage || "Unknown error";
|
const statusMessage = proxyRes.statusMessage || "Unknown error";
|
||||||
// Likely Bad Gateway or Gateway Timeout from reverse proxy/load balancer
|
req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
|
||||||
logger.warn(
|
|
||||||
{ statusCode, statusMessage, key: req.key?.hash },
|
|
||||||
parseError.message
|
|
||||||
);
|
|
||||||
|
|
||||||
const errorObject = {
|
const errorObject = {
|
||||||
statusCode,
|
statusCode,
|
||||||
@@ -247,51 +263,123 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
proxy_note: `This is likely a temporary error with the upstream service.`,
|
proxy_note: `This is likely a temporary error with the upstream service.`,
|
||||||
};
|
};
|
||||||
writeErrorResponse(req, res, statusCode, errorObject);
|
writeErrorResponse(req, res, statusCode, errorObject);
|
||||||
throw new Error(parseError.message);
|
throw new HttpError(statusCode, parseError.message);
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.warn(
|
const errorType =
|
||||||
{
|
errorPayload.error?.code ||
|
||||||
statusCode,
|
errorPayload.error?.type ||
|
||||||
type: errorPayload.error?.code,
|
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
|
||||||
errorPayload,
|
|
||||||
key: req.key?.hash,
|
req.log.warn(
|
||||||
},
|
{ statusCode, type: errorType, errorPayload, key: req.key?.hash },
|
||||||
`Received error response from upstream. (${proxyRes.statusMessage})`
|
`Received error response from upstream. (${proxyRes.statusMessage})`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const service = req.key!.service;
|
||||||
|
if (service === "aws") {
|
||||||
|
// Try to standardize the error format for AWS
|
||||||
|
errorPayload.error = { message: errorPayload.message, type: errorType };
|
||||||
|
delete errorPayload.message;
|
||||||
|
}
|
||||||
|
|
||||||
if (statusCode === 400) {
|
if (statusCode === 400) {
|
||||||
// Bad request (likely prompt is too long)
|
// Bad request. For OpenAI, this is usually due to prompt length.
|
||||||
if (req.outboundApi === "openai") {
|
// For Anthropic, this is usually due to missing preamble.
|
||||||
errorPayload.proxy_note = `Upstream service rejected the request as invalid. Your prompt may be too long for ${req.body?.model}.`;
|
switch (service) {
|
||||||
} else if (req.outboundApi === "anthropic") {
|
case "openai":
|
||||||
maybeHandleMissingPreambleError(req, errorPayload);
|
case "google-palm":
|
||||||
|
case "azure":
|
||||||
|
const filteredCodes = ["content_policy_violation", "content_filter"];
|
||||||
|
if (filteredCodes.includes(errorPayload.error?.code)) {
|
||||||
|
errorPayload.proxy_note = `Request was filtered by the upstream API's content moderation system. Modify your prompt and try again.`;
|
||||||
|
refundLastAttempt(req);
|
||||||
|
} else if (errorPayload.error?.code === "billing_hard_limit_reached") {
|
||||||
|
// For some reason, some models return this 400 error instead of the
|
||||||
|
// same 429 billing error that other models return.
|
||||||
|
handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
|
||||||
|
} else {
|
||||||
|
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "anthropic":
|
||||||
|
case "aws":
|
||||||
|
maybeHandleMissingPreambleError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assertNever(service);
|
||||||
}
|
}
|
||||||
} else if (statusCode === 401) {
|
} else if (statusCode === 401) {
|
||||||
// Key is invalid or was revoked
|
// Key is invalid or was revoked
|
||||||
keyPool.disable(req.key!);
|
keyPool.disable(req.key!, "revoked");
|
||||||
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
|
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
|
||||||
|
} else if (statusCode === 403) {
|
||||||
|
// Amazon is the only service that returns 403.
|
||||||
|
switch (errorType) {
|
||||||
|
case "UnrecognizedClientException":
|
||||||
|
// Key is invalid.
|
||||||
|
keyPool.disable(req.key!, "revoked");
|
||||||
|
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
|
||||||
|
break;
|
||||||
|
case "AccessDeniedException":
|
||||||
|
req.log.error(
|
||||||
|
{ key: req.key?.hash, model: req.body?.model },
|
||||||
|
"Disabling key due to AccessDeniedException when invoking model. If credentials are valid, check IAM permissions."
|
||||||
|
);
|
||||||
|
keyPool.disable(req.key!, "revoked");
|
||||||
|
errorPayload.proxy_note = `API key doesn't have access to the requested resource.`;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
|
||||||
|
}
|
||||||
} else if (statusCode === 429) {
|
} else if (statusCode === 429) {
|
||||||
// OpenAI uses this for a bunch of different rate-limiting scenarios.
|
switch (service) {
|
||||||
if (req.outboundApi === "openai") {
|
case "openai":
|
||||||
handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
|
handleOpenAIRateLimitError(req, tryAgainMessage, errorPayload);
|
||||||
} else if (req.outboundApi === "anthropic") {
|
break;
|
||||||
handleAnthropicRateLimitError(req, errorPayload);
|
case "anthropic":
|
||||||
|
handleAnthropicRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "aws":
|
||||||
|
handleAwsRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "azure":
|
||||||
|
handleAzureRateLimitError(req, errorPayload);
|
||||||
|
break;
|
||||||
|
case "google-palm":
|
||||||
|
errorPayload.proxy_note = `Automatic rate limit retries are not supported for this service. Try again in a few seconds.`;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assertNever(service);
|
||||||
}
|
}
|
||||||
} else if (statusCode === 404) {
|
} else if (statusCode === 404) {
|
||||||
// Most likely model not found
|
// Most likely model not found
|
||||||
if (req.outboundApi === "openai") {
|
switch (service) {
|
||||||
// TODO: this probably doesn't handle GPT-4-32k variants properly if the
|
case "openai":
|
||||||
// proxy has keys for both the 8k and 32k context models at the same time.
|
if (errorPayload.error?.code === "model_not_found") {
|
||||||
if (errorPayload.error?.code === "model_not_found") {
|
const requestedModel = req.body.model;
|
||||||
if (req.key!.isGpt4) {
|
const modelFamily = getOpenAIModelFamily(requestedModel);
|
||||||
errorPayload.proxy_note = `Assigned key isn't provisioned for the GPT-4 snapshot you requested. Try again to get a different key, or use Turbo.`;
|
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
|
||||||
} else {
|
req.log.error(
|
||||||
errorPayload.proxy_note = `No model was found for this key.`;
|
{ key: req.key?.hash, model: requestedModel, modelFamily },
|
||||||
|
"Prompt was routed to a key that does not support the requested model."
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
break;
|
||||||
} else if (req.outboundApi === "anthropic") {
|
case "anthropic":
|
||||||
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
|
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
|
||||||
|
break;
|
||||||
|
case "google-palm":
|
||||||
|
errorPayload.proxy_note = `The requested Google PaLM model might not exist, or the key might not be provisioned for it.`;
|
||||||
|
break;
|
||||||
|
case "aws":
|
||||||
|
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
|
||||||
|
break;
|
||||||
|
case "azure":
|
||||||
|
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assertNever(service);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
|
||||||
@@ -306,7 +394,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
}
|
}
|
||||||
|
|
||||||
writeErrorResponse(req, res, statusCode, errorPayload);
|
writeErrorResponse(req, res, statusCode, errorPayload);
|
||||||
throw new Error(errorPayload.error?.message);
|
throw new HttpError(statusCode, errorPayload.error?.message);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -330,7 +418,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
|
|||||||
*/
|
*/
|
||||||
function maybeHandleMissingPreambleError(
|
function maybeHandleMissingPreambleError(
|
||||||
req: Request,
|
req: Request,
|
||||||
errorPayload: Record<string, any>
|
errorPayload: ProxiedErrorPayload
|
||||||
) {
|
) {
|
||||||
if (
|
if (
|
||||||
errorPayload.error?.type === "invalid_request_error" &&
|
errorPayload.error?.type === "invalid_request_error" &&
|
||||||
@@ -341,11 +429,8 @@ function maybeHandleMissingPreambleError(
|
|||||||
"Request failed due to missing preamble. Key will be marked as such for subsequent requests."
|
"Request failed due to missing preamble. Key will be marked as such for subsequent requests."
|
||||||
);
|
);
|
||||||
keyPool.update(req.key!, { requiresPreamble: true });
|
keyPool.update(req.key!, { requiresPreamble: true });
|
||||||
if (config.queueMode !== "none") {
|
reenqueueRequest(req);
|
||||||
reenqueueRequest(req);
|
throw new RetryableError("Claude request re-enqueued to add preamble.");
|
||||||
throw new RetryableError("Claude request re-enqueued to add preamble.");
|
|
||||||
}
|
|
||||||
errorPayload.proxy_note = `This Claude key requires special prompt formatting. Try again; the proxy will reformat your prompt next time.`;
|
|
||||||
} else {
|
} else {
|
||||||
errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`;
|
errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`;
|
||||||
}
|
}
|
||||||
@@ -353,60 +438,155 @@ function maybeHandleMissingPreambleError(
|
|||||||
|
|
||||||
function handleAnthropicRateLimitError(
|
function handleAnthropicRateLimitError(
|
||||||
req: Request,
|
req: Request,
|
||||||
errorPayload: Record<string, any>
|
errorPayload: ProxiedErrorPayload
|
||||||
) {
|
) {
|
||||||
if (errorPayload.error?.type === "rate_limit_error") {
|
if (errorPayload.error?.type === "rate_limit_error") {
|
||||||
keyPool.markRateLimited(req.key!);
|
keyPool.markRateLimited(req.key!);
|
||||||
if (config.queueMode !== "none") {
|
reenqueueRequest(req);
|
||||||
reenqueueRequest(req);
|
throw new RetryableError("Claude rate-limited request re-enqueued.");
|
||||||
throw new RetryableError("Claude rate-limited request re-enqueued.");
|
|
||||||
}
|
|
||||||
errorPayload.proxy_note = `There are too many in-flight requests for this key. Try again later.`;
|
|
||||||
} else {
|
} else {
|
||||||
errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`;
|
errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleAwsRateLimitError(
|
||||||
|
req: Request,
|
||||||
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
const errorType = errorPayload.error?.type;
|
||||||
|
switch (errorType) {
|
||||||
|
case "ThrottlingException":
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
reenqueueRequest(req);
|
||||||
|
throw new RetryableError("AWS rate-limited request re-enqueued.");
|
||||||
|
case "ModelNotReadyException":
|
||||||
|
errorPayload.proxy_note = `The requested model is overloaded. Try again in a few seconds.`;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
errorPayload.proxy_note = `Unrecognized rate limit error from AWS. (${errorType})`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function handleOpenAIRateLimitError(
|
function handleOpenAIRateLimitError(
|
||||||
req: Request,
|
req: Request,
|
||||||
tryAgainMessage: string,
|
tryAgainMessage: string,
|
||||||
errorPayload: Record<string, any>
|
errorPayload: ProxiedErrorPayload
|
||||||
): Record<string, any> {
|
): Record<string, any> {
|
||||||
const type = errorPayload.error?.type;
|
const type = errorPayload.error?.type;
|
||||||
if (type === "insufficient_quota") {
|
switch (type) {
|
||||||
// Billing quota exceeded (key is dead, disable it)
|
case "insufficient_quota":
|
||||||
keyPool.disable(req.key!);
|
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
|
||||||
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
|
// Billing quota exceeded (key is dead, disable it)
|
||||||
} else if (type === "billing_not_active") {
|
keyPool.disable(req.key!, "quota");
|
||||||
// Billing is not active (key is dead, disable it)
|
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
|
||||||
keyPool.disable(req.key!);
|
break;
|
||||||
errorPayload.proxy_note = `Assigned key was deactivated by OpenAI. ${tryAgainMessage}`;
|
case "access_terminated":
|
||||||
} else if (type === "requests" || type === "tokens") {
|
// Account banned (key is dead, disable it)
|
||||||
// Per-minute request or token rate limit is exceeded, which we can retry
|
keyPool.disable(req.key!, "revoked");
|
||||||
keyPool.markRateLimited(req.key!);
|
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`;
|
||||||
if (config.queueMode !== "none") {
|
break;
|
||||||
|
case "billing_not_active":
|
||||||
|
// Key valid but account billing is delinquent
|
||||||
|
keyPool.disable(req.key!, "quota");
|
||||||
|
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. ${tryAgainMessage}`;
|
||||||
|
break;
|
||||||
|
case "requests":
|
||||||
|
case "tokens":
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
if (errorPayload.error?.message?.match(/on requests per day/)) {
|
||||||
|
// This key has a very low rate limit, so we can't re-enqueue it.
|
||||||
|
errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-minute request or token rate limit is exceeded, which we can retry
|
||||||
reenqueueRequest(req);
|
reenqueueRequest(req);
|
||||||
// This is confusing, but it will bubble up to the top-level response
|
|
||||||
// handler and cause the request to go back into the request queue.
|
|
||||||
throw new RetryableError("Rate-limited request re-enqueued.");
|
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||||
}
|
default:
|
||||||
errorPayload.proxy_note = `Assigned key's '${type}' rate limit has been exceeded. Try again later.`;
|
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
|
||||||
} else {
|
break;
|
||||||
// OpenAI probably overloaded
|
|
||||||
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
|
|
||||||
}
|
}
|
||||||
return errorPayload;
|
return errorPayload;
|
||||||
}
|
}
|
||||||
|
|
||||||
const incrementKeyUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
|
function handleAzureRateLimitError(
|
||||||
if (isCompletionRequest(req)) {
|
req: Request,
|
||||||
keyPool.incrementPrompt(req.key!);
|
errorPayload: ProxiedErrorPayload
|
||||||
|
) {
|
||||||
|
const code = errorPayload.error?.code;
|
||||||
|
switch (code) {
|
||||||
|
case "429":
|
||||||
|
keyPool.markRateLimited(req.key!);
|
||||||
|
reenqueueRequest(req);
|
||||||
|
throw new RetryableError("Rate-limited request re-enqueued.");
|
||||||
|
default:
|
||||||
|
errorPayload.proxy_note = `Unrecognized rate limit error from Azure (${code}). Please report this.`;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
|
||||||
|
if (isTextGenerationRequest(req) || isImageGenerationRequest(req)) {
|
||||||
|
const model = req.body.model;
|
||||||
|
const tokensUsed = req.promptTokens! + req.outputTokens!;
|
||||||
|
req.log.debug(
|
||||||
|
{
|
||||||
|
model,
|
||||||
|
tokensUsed,
|
||||||
|
promptTokens: req.promptTokens,
|
||||||
|
outputTokens: req.outputTokens,
|
||||||
|
},
|
||||||
|
`Incrementing usage for model`
|
||||||
|
);
|
||||||
|
keyPool.incrementUsage(req.key!, model, tokensUsed);
|
||||||
if (req.user) {
|
if (req.user) {
|
||||||
incrementPromptCount(req.user.token);
|
incrementPromptCount(req.user.token);
|
||||||
|
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const countResponseTokens: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
_res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (req.outboundApi === "openai-image") {
|
||||||
|
req.outputTokens = req.promptTokens;
|
||||||
|
req.promptTokens = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is prone to breaking if the upstream API makes even minor
|
||||||
|
// changes to the response format, especially for SSE responses. If you're
|
||||||
|
// seeing errors in this function, check the reassembled response body from
|
||||||
|
// handleStreamedResponse to see if the upstream API has changed.
|
||||||
|
try {
|
||||||
|
assertJsonResponse(body);
|
||||||
|
const service = req.outboundApi;
|
||||||
|
const completion = getCompletionFromBody(req, body);
|
||||||
|
const tokens = await countTokens({ req, completion, service });
|
||||||
|
|
||||||
|
req.log.debug(
|
||||||
|
{ service, tokens, prevOutputTokens: req.outputTokens },
|
||||||
|
`Counted tokens for completion`
|
||||||
|
);
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
req.tokenizerInfo.completion_tokens = tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
req.outputTokens = tokens.token_count;
|
||||||
|
} catch (error) {
|
||||||
|
req.log.warn(
|
||||||
|
error,
|
||||||
|
"Error while counting completion tokens; assuming `max_output_tokens`"
|
||||||
|
);
|
||||||
|
// req.outputTokens will already be set to `max_output_tokens` from the
|
||||||
|
// prompt counting middleware, so we don't need to do anything here.
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const trackRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
|
const trackRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
|
||||||
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
keyPool.updateRateLimits(req.key!, proxyRes.headers);
|
||||||
};
|
};
|
||||||
@@ -430,3 +610,14 @@ const copyHttpHeaders: ProxyResHandlerWithBody = async (
|
|||||||
res.setHeader(key, proxyRes.headers[key] as string);
|
res.setHeader(key, proxyRes.headers[key] as string);
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
function getAwsErrorType(header: string | string[] | undefined) {
|
||||||
|
const val = String(header).match(/^(\w+):?/)?.[1];
|
||||||
|
return val || String(header);
|
||||||
|
}
|
||||||
|
|
||||||
|
function assertJsonResponse(body: any): asserts body is Record<string, any> {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected response to be an object");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
import { Request } from "express";
|
import { Request } from "express";
|
||||||
import { config } from "../../../config";
|
import { config } from "../../../config";
|
||||||
import { AIService } from "../../../key-management";
|
import { logQueue } from "../../../shared/prompt-logging";
|
||||||
import { logQueue } from "../../../prompt-logging";
|
import {
|
||||||
import { isCompletionRequest } from "../common";
|
getCompletionFromBody,
|
||||||
|
getModelFromBody,
|
||||||
|
isImageGenerationRequest,
|
||||||
|
isTextGenerationRequest,
|
||||||
|
} from "../common";
|
||||||
import { ProxyResHandlerWithBody } from ".";
|
import { ProxyResHandlerWithBody } from ".";
|
||||||
import { logger } from "../../../logger";
|
import { assertNever } from "../../../shared/utils";
|
||||||
|
import { OpenAIChatMessage } from "../request/preprocessors/transform-outbound-payload";
|
||||||
|
|
||||||
/** If prompt logging is enabled, enqueues the prompt for logging. */
|
/** If prompt logging is enabled, enqueues the prompt for logging. */
|
||||||
export const logPrompt: ProxyResHandlerWithBody = async (
|
export const logPrompt: ProxyResHandlerWithBody = async (
|
||||||
@@ -20,59 +25,81 @@ export const logPrompt: ProxyResHandlerWithBody = async (
|
|||||||
throw new Error("Expected body to be an object");
|
throw new Error("Expected body to be an object");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isCompletionRequest(req)) {
|
const loggable =
|
||||||
return;
|
isTextGenerationRequest(req) || isImageGenerationRequest(req);
|
||||||
}
|
if (!loggable) return;
|
||||||
|
|
||||||
const promptPayload = getPromptForRequest(req);
|
const promptPayload = getPromptForRequest(req, responseBody);
|
||||||
const promptFlattened = flattenMessages(promptPayload);
|
const promptFlattened = flattenMessages(promptPayload);
|
||||||
const response = getResponseForService({
|
const response = getCompletionFromBody(req, responseBody);
|
||||||
service: req.outboundApi,
|
const model = getModelFromBody(req, responseBody);
|
||||||
body: responseBody,
|
|
||||||
});
|
|
||||||
|
|
||||||
logQueue.enqueue({
|
logQueue.enqueue({
|
||||||
endpoint: req.inboundApi,
|
endpoint: req.inboundApi,
|
||||||
promptRaw: JSON.stringify(promptPayload),
|
promptRaw: JSON.stringify(promptPayload),
|
||||||
promptFlattened,
|
promptFlattened,
|
||||||
model: response.model, // may differ from the requested model
|
model,
|
||||||
response: response.completion,
|
response,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
type OaiMessage = {
|
type OaiImageResult = {
|
||||||
role: "user" | "assistant" | "system";
|
prompt: string;
|
||||||
content: string;
|
size: string;
|
||||||
|
style: string;
|
||||||
|
quality: string;
|
||||||
|
revisedPrompt?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
const getPromptForRequest = (req: Request): string | OaiMessage[] => {
|
const getPromptForRequest = (
|
||||||
|
req: Request,
|
||||||
|
responseBody: Record<string, any>
|
||||||
|
): string | OpenAIChatMessage[] | OaiImageResult => {
|
||||||
// Since the prompt logger only runs after the request has been proxied, we
|
// Since the prompt logger only runs after the request has been proxied, we
|
||||||
// can assume the body has already been transformed to the target API's
|
// can assume the body has already been transformed to the target API's
|
||||||
// format.
|
// format.
|
||||||
if (req.outboundApi === "anthropic") {
|
switch (req.outboundApi) {
|
||||||
return req.body.prompt;
|
case "openai":
|
||||||
} else {
|
return req.body.messages;
|
||||||
return req.body.messages;
|
case "openai-text":
|
||||||
|
return req.body.prompt;
|
||||||
|
case "openai-image":
|
||||||
|
return {
|
||||||
|
prompt: req.body.prompt,
|
||||||
|
size: req.body.size,
|
||||||
|
style: req.body.style,
|
||||||
|
quality: req.body.quality,
|
||||||
|
revisedPrompt: responseBody.data[0].revised_prompt,
|
||||||
|
};
|
||||||
|
case "anthropic":
|
||||||
|
return req.body.prompt;
|
||||||
|
case "google-palm":
|
||||||
|
return req.body.prompt.text;
|
||||||
|
default:
|
||||||
|
assertNever(req.outboundApi);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const flattenMessages = (messages: string | OaiMessage[]): string => {
|
const flattenMessages = (
|
||||||
if (typeof messages === "string") {
|
val: string | OpenAIChatMessage[] | OaiImageResult
|
||||||
return messages.trim();
|
): string => {
|
||||||
|
if (typeof val === "string") {
|
||||||
|
return val.trim();
|
||||||
}
|
}
|
||||||
return messages.map((m) => `${m.role}: ${m.content}`).join("\n");
|
if (Array.isArray(val)) {
|
||||||
};
|
return val
|
||||||
|
.map(({ content, role }) => {
|
||||||
const getResponseForService = ({
|
const text = Array.isArray(content)
|
||||||
service,
|
? content
|
||||||
body,
|
.map((c) => {
|
||||||
}: {
|
if ("text" in c) return c.text;
|
||||||
service: AIService;
|
if ("image_url" in c) return "(( Attached Image ))";
|
||||||
body: Record<string, any>;
|
})
|
||||||
}): { completion: string; model: string } => {
|
.join("\n")
|
||||||
if (service === "anthropic") {
|
: content;
|
||||||
return { completion: body.completion.trim(), model: body.model };
|
return `${role}: ${text}`;
|
||||||
} else {
|
})
|
||||||
return { completion: body.choices[0].message.content, model: body.model };
|
.join("\n");
|
||||||
}
|
}
|
||||||
|
return val.prompt.trim();
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -0,0 +1,27 @@
|
|||||||
|
import { ProxyResHandlerWithBody } from "./index";
|
||||||
|
import { mirrorGeneratedImage, OpenAIImageGenerationResult } from "../../../shared/file-storage/mirror-generated-image";
|
||||||
|
|
||||||
|
export const saveImage: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
_res,
|
||||||
|
body,
|
||||||
|
) => {
|
||||||
|
if (req.outboundApi !== "openai-image") {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (body.data) {
|
||||||
|
const baseUrl = req.protocol + "://" + req.get("host");
|
||||||
|
const prompt = body.data[0].revised_prompt ?? req.body.prompt;
|
||||||
|
await mirrorGeneratedImage(
|
||||||
|
baseUrl,
|
||||||
|
prompt,
|
||||||
|
body as OpenAIImageGenerationResult
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||||
|
|
||||||
|
export type AnthropicCompletionResponse = {
|
||||||
|
completion: string;
|
||||||
|
stop_reason: string;
|
||||||
|
truncated: boolean;
|
||||||
|
stop: any;
|
||||||
|
model: string;
|
||||||
|
log_id: string;
|
||||||
|
exception: null;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||||
|
* finalized Anthropic completion response so that non-streaming middleware
|
||||||
|
* can operate on it as if it were a blocking response.
|
||||||
|
*/
|
||||||
|
export function mergeEventsForAnthropic(
|
||||||
|
events: OpenAIChatCompletionStreamEvent[]
|
||||||
|
): AnthropicCompletionResponse {
|
||||||
|
let merged: AnthropicCompletionResponse = {
|
||||||
|
log_id: "",
|
||||||
|
exception: null,
|
||||||
|
model: "",
|
||||||
|
completion: "",
|
||||||
|
stop_reason: "",
|
||||||
|
truncated: false,
|
||||||
|
stop: null,
|
||||||
|
};
|
||||||
|
merged = events.reduce((acc, event, i) => {
|
||||||
|
// The first event will only contain role assignment and response metadata
|
||||||
|
if (i === 0) {
|
||||||
|
acc.log_id = event.id;
|
||||||
|
acc.model = event.model;
|
||||||
|
acc.completion = "";
|
||||||
|
acc.stop_reason = "";
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.stop_reason = event.choices[0].finish_reason ?? "";
|
||||||
|
if (event.choices[0].delta.content) {
|
||||||
|
acc.completion += event.choices[0].delta.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, merged);
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||||
|
|
||||||
|
export type OpenAiChatCompletionResponse = {
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
created: number;
|
||||||
|
model: string;
|
||||||
|
choices: {
|
||||||
|
message: { role: string; content: string };
|
||||||
|
finish_reason: string | null;
|
||||||
|
index: number;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||||
|
* finalized OpenAI chat completion response so that non-streaming middleware
|
||||||
|
* can operate on it as if it were a blocking response.
|
||||||
|
*/
|
||||||
|
export function mergeEventsForOpenAIChat(
|
||||||
|
events: OpenAIChatCompletionStreamEvent[]
|
||||||
|
): OpenAiChatCompletionResponse {
|
||||||
|
let merged: OpenAiChatCompletionResponse = {
|
||||||
|
id: "",
|
||||||
|
object: "",
|
||||||
|
created: 0,
|
||||||
|
model: "",
|
||||||
|
choices: [],
|
||||||
|
};
|
||||||
|
merged = events.reduce((acc, event, i) => {
|
||||||
|
// The first event will only contain role assignment and response metadata
|
||||||
|
if (i === 0) {
|
||||||
|
acc.id = event.id;
|
||||||
|
acc.object = event.object;
|
||||||
|
acc.created = event.created;
|
||||||
|
acc.model = event.model;
|
||||||
|
acc.choices = [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: event.choices[0].delta.role ?? "assistant",
|
||||||
|
content: "",
|
||||||
|
},
|
||||||
|
finish_reason: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.choices[0].finish_reason = event.choices[0].finish_reason;
|
||||||
|
if (event.choices[0].delta.content) {
|
||||||
|
acc.choices[0].message.content += event.choices[0].delta.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, merged);
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
import { OpenAIChatCompletionStreamEvent } from "../index";
|
||||||
|
|
||||||
|
export type OpenAiTextCompletionResponse = {
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
created: number;
|
||||||
|
model: string;
|
||||||
|
choices: {
|
||||||
|
text: string;
|
||||||
|
finish_reason: string | null;
|
||||||
|
index: number;
|
||||||
|
logprobs: null;
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a list of OpenAI chat completion events, compiles them into a single
|
||||||
|
* finalized OpenAI text completion response so that non-streaming middleware
|
||||||
|
* can operate on it as if it were a blocking response.
|
||||||
|
*/
|
||||||
|
export function mergeEventsForOpenAIText(
|
||||||
|
events: OpenAIChatCompletionStreamEvent[]
|
||||||
|
): OpenAiTextCompletionResponse {
|
||||||
|
let merged: OpenAiTextCompletionResponse = {
|
||||||
|
id: "",
|
||||||
|
object: "",
|
||||||
|
created: 0,
|
||||||
|
model: "",
|
||||||
|
choices: [],
|
||||||
|
};
|
||||||
|
merged = events.reduce((acc, event, i) => {
|
||||||
|
// The first event will only contain role assignment and response metadata
|
||||||
|
if (i === 0) {
|
||||||
|
acc.id = event.id;
|
||||||
|
acc.object = event.object;
|
||||||
|
acc.created = event.created;
|
||||||
|
acc.model = event.model;
|
||||||
|
acc.choices = [
|
||||||
|
{
|
||||||
|
text: "",
|
||||||
|
index: 0,
|
||||||
|
finish_reason: null,
|
||||||
|
logprobs: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
return acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.choices[0].finish_reason = event.choices[0].finish_reason;
|
||||||
|
if (event.choices[0].delta.content) {
|
||||||
|
acc.choices[0].text += event.choices[0].delta.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, merged);
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
import { APIFormat } from "../../../../shared/key-management";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import {
|
||||||
|
mergeEventsForAnthropic,
|
||||||
|
mergeEventsForOpenAIChat,
|
||||||
|
mergeEventsForOpenAIText,
|
||||||
|
OpenAIChatCompletionStreamEvent
|
||||||
|
} from "./index";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Collects SSE events containing incremental chat completion responses and
|
||||||
|
* compiles them into a single finalized response for downstream middleware.
|
||||||
|
*/
|
||||||
|
export class EventAggregator {
|
||||||
|
private readonly format: APIFormat;
|
||||||
|
private readonly events: OpenAIChatCompletionStreamEvent[];
|
||||||
|
|
||||||
|
constructor({ format }: { format: APIFormat }) {
|
||||||
|
this.events = [];
|
||||||
|
this.format = format;
|
||||||
|
}
|
||||||
|
|
||||||
|
addEvent(event: OpenAIChatCompletionStreamEvent) {
|
||||||
|
this.events.push(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
getFinalResponse() {
|
||||||
|
switch (this.format) {
|
||||||
|
case "openai":
|
||||||
|
return mergeEventsForOpenAIChat(this.events);
|
||||||
|
case "openai-text":
|
||||||
|
return mergeEventsForOpenAIText(this.events);
|
||||||
|
case "anthropic":
|
||||||
|
return mergeEventsForAnthropic(this.events);
|
||||||
|
case "google-palm":
|
||||||
|
case "openai-image":
|
||||||
|
throw new Error(`SSE aggregation not supported for ${this.format}`);
|
||||||
|
default:
|
||||||
|
assertNever(this.format);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
export type SSEResponseTransformArgs = {
|
||||||
|
data: string;
|
||||||
|
lastPosition: number;
|
||||||
|
index: number;
|
||||||
|
fallbackId: string;
|
||||||
|
fallbackModel: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type OpenAIChatCompletionStreamEvent = {
|
||||||
|
id: string;
|
||||||
|
object: "chat.completion.chunk";
|
||||||
|
created: number;
|
||||||
|
model: string;
|
||||||
|
choices: {
|
||||||
|
index: number;
|
||||||
|
delta: { role?: string; content?: string };
|
||||||
|
finish_reason: string | null;
|
||||||
|
}[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export type StreamingCompletionTransformer = (
|
||||||
|
params: SSEResponseTransformArgs
|
||||||
|
) => { position: number; event?: OpenAIChatCompletionStreamEvent };
|
||||||
|
|
||||||
|
export { openAITextToOpenAIChat } from "./transformers/openai-text-to-openai";
|
||||||
|
export { anthropicV1ToOpenAI } from "./transformers/anthropic-v1-to-openai";
|
||||||
|
export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
|
||||||
|
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
|
||||||
|
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
|
||||||
|
export { mergeEventsForAnthropic } from "./aggregators/anthropic";
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
export type ServerSentEvent = { id?: string; type?: string; data: string };
|
||||||
|
|
||||||
|
/** Given a string of SSE data, parse it into a `ServerSentEvent` object. */
|
||||||
|
export function parseEvent(event: string) {
|
||||||
|
const buffer: ServerSentEvent = { data: "" };
|
||||||
|
return event.split(/\r?\n/).reduce(parseLine, buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseLine(event: ServerSentEvent, line: string) {
|
||||||
|
const separator = line.indexOf(":");
|
||||||
|
const field = separator === -1 ? line : line.slice(0,separator);
|
||||||
|
const value = separator === -1 ? "" : line.slice(separator + 1);
|
||||||
|
|
||||||
|
switch (field) {
|
||||||
|
case 'id':
|
||||||
|
event.id = value.trim()
|
||||||
|
break
|
||||||
|
case 'event':
|
||||||
|
event.type = value.trim()
|
||||||
|
break
|
||||||
|
case 'data':
|
||||||
|
event.data += value.trimStart()
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return event
|
||||||
|
}
|
||||||
@@ -0,0 +1,138 @@
|
|||||||
|
import { Transform, TransformOptions } from "stream";
|
||||||
|
import { logger } from "../../../../logger";
|
||||||
|
import { APIFormat } from "../../../../shared/key-management";
|
||||||
|
import { assertNever } from "../../../../shared/utils";
|
||||||
|
import {
|
||||||
|
anthropicV1ToOpenAI,
|
||||||
|
anthropicV2ToOpenAI,
|
||||||
|
OpenAIChatCompletionStreamEvent,
|
||||||
|
openAITextToOpenAIChat,
|
||||||
|
StreamingCompletionTransformer,
|
||||||
|
} from "./index";
|
||||||
|
import { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
|
||||||
|
|
||||||
|
const genlog = logger.child({ module: "sse-transformer" });
|
||||||
|
|
||||||
|
type SSEMessageTransformerOptions = TransformOptions & {
|
||||||
|
requestedModel: string;
|
||||||
|
requestId: string;
|
||||||
|
inputFormat: APIFormat;
|
||||||
|
inputApiVersion?: string;
|
||||||
|
logger?: typeof logger;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms SSE messages from one API format to OpenAI chat.completion.chunks.
|
||||||
|
* Emits the original string SSE message as an "originalMessage" event.
|
||||||
|
*/
|
||||||
|
export class SSEMessageTransformer extends Transform {
|
||||||
|
private lastPosition: number;
|
||||||
|
private msgCount: number;
|
||||||
|
private readonly inputFormat: APIFormat;
|
||||||
|
private readonly transformFn: StreamingCompletionTransformer;
|
||||||
|
private readonly log;
|
||||||
|
private readonly fallbackId: string;
|
||||||
|
private readonly fallbackModel: string;
|
||||||
|
|
||||||
|
constructor(options: SSEMessageTransformerOptions) {
|
||||||
|
super({ ...options, readableObjectMode: true });
|
||||||
|
this.log = options.logger?.child({ module: "sse-transformer" }) ?? genlog;
|
||||||
|
this.lastPosition = 0;
|
||||||
|
this.msgCount = 0;
|
||||||
|
this.transformFn = getTransformer(
|
||||||
|
options.inputFormat,
|
||||||
|
options.inputApiVersion
|
||||||
|
);
|
||||||
|
this.inputFormat = options.inputFormat;
|
||||||
|
this.fallbackId = options.requestId;
|
||||||
|
this.fallbackModel = options.requestedModel;
|
||||||
|
this.log.debug(
|
||||||
|
{
|
||||||
|
fn: this.transformFn.name,
|
||||||
|
format: options.inputFormat,
|
||||||
|
version: options.inputApiVersion,
|
||||||
|
},
|
||||||
|
"Selected SSE transformer"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: Function) {
|
||||||
|
try {
|
||||||
|
const originalMessage = chunk.toString();
|
||||||
|
const { event: transformedMessage, position: newPosition } =
|
||||||
|
this.transformFn({
|
||||||
|
data: originalMessage,
|
||||||
|
lastPosition: this.lastPosition,
|
||||||
|
index: this.msgCount++,
|
||||||
|
fallbackId: this.fallbackId,
|
||||||
|
fallbackModel: this.fallbackModel,
|
||||||
|
});
|
||||||
|
this.lastPosition = newPosition;
|
||||||
|
|
||||||
|
// Special case for Azure OpenAI, which is 99% the same as OpenAI but
|
||||||
|
// sometimes emits an extra event at the beginning of the stream with the
|
||||||
|
// content moderation system's response to the prompt. A lot of frontends
|
||||||
|
// don't expect this and neither does our event aggregator so we drop it.
|
||||||
|
if (this.inputFormat === "openai" && this.msgCount <= 1) {
|
||||||
|
if (originalMessage.includes("prompt_filter_results")) {
|
||||||
|
this.log.debug("Dropping Azure OpenAI content moderation SSE event");
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.emit("originalMessage", originalMessage);
|
||||||
|
|
||||||
|
// Some events may not be transformed, e.g. ping events
|
||||||
|
if (!transformedMessage) return callback();
|
||||||
|
|
||||||
|
if (this.msgCount === 1) {
|
||||||
|
// TODO: does this need to be skipped for passthroughToOpenAI?
|
||||||
|
this.push(createInitialMessage(transformedMessage));
|
||||||
|
}
|
||||||
|
this.push(transformedMessage);
|
||||||
|
callback();
|
||||||
|
} catch (err) {
|
||||||
|
this.log.error(err, "Error transforming SSE message");
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTransformer(
|
||||||
|
responseApi: APIFormat,
|
||||||
|
version?: string
|
||||||
|
): StreamingCompletionTransformer {
|
||||||
|
switch (responseApi) {
|
||||||
|
case "openai":
|
||||||
|
return passthroughToOpenAI;
|
||||||
|
case "openai-text":
|
||||||
|
return openAITextToOpenAIChat;
|
||||||
|
case "anthropic":
|
||||||
|
return version === "2023-01-01"
|
||||||
|
? anthropicV1ToOpenAI
|
||||||
|
: anthropicV2ToOpenAI;
|
||||||
|
case "google-palm":
|
||||||
|
case "openai-image":
|
||||||
|
throw new Error(`SSE transformation not supported for ${responseApi}`);
|
||||||
|
default:
|
||||||
|
assertNever(responseApi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenAI streaming chat completions start with an event that contains only the
|
||||||
|
* metadata and role (always 'assistant') for the response. To simulate this
|
||||||
|
* for APIs where the first event contains actual content, we create a fake
|
||||||
|
* initial event with no content but correct metadata.
|
||||||
|
*/
|
||||||
|
function createInitialMessage(
|
||||||
|
event: OpenAIChatCompletionStreamEvent
|
||||||
|
): OpenAIChatCompletionStreamEvent {
|
||||||
|
return {
|
||||||
|
...event,
|
||||||
|
choices: event.choices.map((choice) => ({
|
||||||
|
...choice,
|
||||||
|
delta: { role: "assistant", content: "" },
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -0,0 +1,114 @@
|
|||||||
|
import { Transform, TransformOptions } from "stream";
|
||||||
|
// @ts-ignore
|
||||||
|
import { Parser } from "lifion-aws-event-stream";
|
||||||
|
import { logger } from "../../../../logger";
|
||||||
|
import { RetryableError } from "../index";
|
||||||
|
|
||||||
|
const log = logger.child({ module: "sse-stream-adapter" });
|
||||||
|
|
||||||
|
type SSEStreamAdapterOptions = TransformOptions & { contentType?: string };
|
||||||
|
type AwsEventStreamMessage = {
|
||||||
|
headers: {
|
||||||
|
":message-type": "event" | "exception";
|
||||||
|
":exception-type"?: string;
|
||||||
|
};
|
||||||
|
payload: { message?: string /** base64 encoded */; bytes?: string };
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Receives either text chunks or AWS binary event stream chunks and emits
|
||||||
|
* full SSE events.
|
||||||
|
*/
|
||||||
|
export class SSEStreamAdapter extends Transform {
|
||||||
|
private readonly isAwsStream;
|
||||||
|
private parser = new Parser();
|
||||||
|
private partialMessage = "";
|
||||||
|
|
||||||
|
constructor(options?: SSEStreamAdapterOptions) {
|
||||||
|
super(options);
|
||||||
|
this.isAwsStream =
|
||||||
|
options?.contentType === "application/vnd.amazon.eventstream";
|
||||||
|
|
||||||
|
this.parser.on("data", (data: AwsEventStreamMessage) => {
|
||||||
|
const message = this.processAwsEvent(data);
|
||||||
|
if (message) {
|
||||||
|
this.push(Buffer.from(message + "\n\n"), "utf8");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
protected processAwsEvent(event: AwsEventStreamMessage): string | null {
|
||||||
|
const { payload, headers } = event;
|
||||||
|
if (headers[":message-type"] === "exception" || !payload.bytes) {
|
||||||
|
const eventStr = JSON.stringify(event);
|
||||||
|
// Under high load, AWS can rugpull us by returning a 200 and starting the
|
||||||
|
// stream but then immediately sending a rate limit error as the first
|
||||||
|
// event. My guess is some race condition in their rate limiting check
|
||||||
|
// that occurs if two requests arrive at the same time when only one
|
||||||
|
// concurrency slot is available.
|
||||||
|
if (headers[":exception-type"] === "throttlingException") {
|
||||||
|
log.warn(
|
||||||
|
{ event: eventStr },
|
||||||
|
"AWS request throttled after streaming has already started; retrying"
|
||||||
|
);
|
||||||
|
throw new RetryableError("AWS request throttled mid-stream");
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
{ event: eventStr },
|
||||||
|
"Received unexpected AWS event stream message"
|
||||||
|
);
|
||||||
|
return getFakeErrorCompletion("proxy AWS error", eventStr);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const { bytes } = payload;
|
||||||
|
// technically this is a transformation but we don't really distinguish
|
||||||
|
// between aws claude and anthropic claude at the APIFormat level, so
|
||||||
|
// these will short circuit the message transformer
|
||||||
|
return [
|
||||||
|
"event: completion",
|
||||||
|
`data: ${Buffer.from(bytes, "base64").toString("utf8")}`,
|
||||||
|
].join("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: Function) {
|
||||||
|
try {
|
||||||
|
if (this.isAwsStream) {
|
||||||
|
this.parser.write(chunk);
|
||||||
|
} else {
|
||||||
|
// We may receive multiple (or partial) SSE messages in a single chunk,
|
||||||
|
// so we need to buffer and emit separate stream events for full
|
||||||
|
// messages so we can parse/transform them properly.
|
||||||
|
const str = chunk.toString("utf8");
|
||||||
|
|
||||||
|
const fullMessages = (this.partialMessage + str).split(/\r?\n\r?\n/);
|
||||||
|
this.partialMessage = fullMessages.pop() || "";
|
||||||
|
|
||||||
|
for (const message of fullMessages) {
|
||||||
|
// Mixing line endings will break some clients and our request queue
|
||||||
|
// will have already sent \n for heartbeats, so we need to normalize
|
||||||
|
// to \n.
|
||||||
|
this.push(message.replace(/\r\n/g, "\n") + "\n\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
callback();
|
||||||
|
} catch (error) {
|
||||||
|
this.emit("error", error);
|
||||||
|
callback(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getFakeErrorCompletion(type: string, message: string) {
|
||||||
|
const content = `\`\`\`\n[${type}: ${message}]\n\`\`\`\n`;
|
||||||
|
const fakeEvent = JSON.stringify({
|
||||||
|
log_id: "aws-proxy-sse-message",
|
||||||
|
stop_reason: type,
|
||||||
|
completion:
|
||||||
|
"\nProxy encountered an error during streaming response.\n" + content,
|
||||||
|
truncated: false,
|
||||||
|
stop: null,
|
||||||
|
model: "",
|
||||||
|
});
|
||||||
|
return ["event: completion", `data: ${fakeEvent}\n\n`].join("\n");
|
||||||
|
}
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
import { StreamingCompletionTransformer } from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "anthropic-v1-to-openai",
|
||||||
|
});
|
||||||
|
|
||||||
|
type AnthropicV1StreamEvent = {
|
||||||
|
log_id?: string;
|
||||||
|
model?: string;
|
||||||
|
completion: string;
|
||||||
|
stop_reason: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms an incoming Anthropic SSE (2023-01-01 API) to an equivalent
|
||||||
|
* OpenAI chat.completion.chunk SSE.
|
||||||
|
*/
|
||||||
|
export const anthropicV1ToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||||
|
const { data, lastPosition } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||||
|
return { position: lastPosition };
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionEvent = asCompletion(rawEvent);
|
||||||
|
if (!completionEvent) {
|
||||||
|
return { position: lastPosition };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anthropic sends the full completion so far with each event whereas OpenAI
|
||||||
|
// only sends the delta. To make the SSE events compatible, we remove
|
||||||
|
// everything before `lastPosition` from the completion.
|
||||||
|
const newEvent = {
|
||||||
|
id: "ant-" + (completionEvent.log_id ?? params.fallbackId),
|
||||||
|
object: "chat.completion.chunk" as const,
|
||||||
|
created: Date.now(),
|
||||||
|
model: completionEvent.model ?? params.fallbackModel,
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
delta: { content: completionEvent.completion?.slice(lastPosition) },
|
||||||
|
finish_reason: completionEvent.stop_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
return { position: completionEvent.completion.length, event: newEvent };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asCompletion(event: ServerSentEvent): AnthropicV1StreamEvent | null {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(event.data);
|
||||||
|
if (parsed.completion !== undefined && parsed.stop_reason !== undefined) {
|
||||||
|
return parsed;
|
||||||
|
} else {
|
||||||
|
// noinspection ExceptionCaughtLocallyJS
|
||||||
|
throw new Error("Missing required fields");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
import { StreamingCompletionTransformer } from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "anthropic-v2-to-openai",
|
||||||
|
});
|
||||||
|
|
||||||
|
type AnthropicV2StreamEvent = {
|
||||||
|
log_id?: string;
|
||||||
|
model?: string;
|
||||||
|
completion: string;
|
||||||
|
stop_reason: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms an incoming Anthropic SSE (2023-06-01 API) to an equivalent
|
||||||
|
* OpenAI chat.completion.chunk SSE.
|
||||||
|
*/
|
||||||
|
export const anthropicV2ToOpenAI: StreamingCompletionTransformer = (params) => {
|
||||||
|
const { data } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionEvent = asCompletion(rawEvent);
|
||||||
|
if (!completionEvent) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const newEvent = {
|
||||||
|
id: "ant-" + (completionEvent.log_id ?? params.fallbackId),
|
||||||
|
object: "chat.completion.chunk" as const,
|
||||||
|
created: Date.now(),
|
||||||
|
model: completionEvent.model ?? params.fallbackModel,
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
delta: { content: completionEvent.completion },
|
||||||
|
finish_reason: completionEvent.stop_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
return { position: completionEvent.completion.length, event: newEvent };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asCompletion(event: ServerSentEvent): AnthropicV2StreamEvent | null {
|
||||||
|
if (event.type === "ping") return null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(event.data);
|
||||||
|
if (parsed.completion !== undefined && parsed.stop_reason !== undefined) {
|
||||||
|
return parsed;
|
||||||
|
} else {
|
||||||
|
// noinspection ExceptionCaughtLocallyJS
|
||||||
|
throw new Error("Missing required fields");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
import { SSEResponseTransformArgs } from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "openai-text-to-openai",
|
||||||
|
});
|
||||||
|
|
||||||
|
type OpenAITextCompletionStreamEvent = {
|
||||||
|
id: string;
|
||||||
|
object: "text_completion";
|
||||||
|
created: number;
|
||||||
|
choices: {
|
||||||
|
text: string;
|
||||||
|
index: number;
|
||||||
|
logprobs: null;
|
||||||
|
finish_reason: string | null;
|
||||||
|
}[];
|
||||||
|
model: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const openAITextToOpenAIChat = (params: SSEResponseTransformArgs) => {
|
||||||
|
const { data } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionEvent = asCompletion(rawEvent);
|
||||||
|
if (!completionEvent) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const newEvent = {
|
||||||
|
id: completionEvent.id,
|
||||||
|
object: "chat.completion.chunk" as const,
|
||||||
|
created: completionEvent.created,
|
||||||
|
model: completionEvent.model,
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: completionEvent.choices[0].index,
|
||||||
|
delta: { content: completionEvent.choices[0].text },
|
||||||
|
finish_reason: completionEvent.choices[0].finish_reason,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
return { position: -1, event: newEvent };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asCompletion(
|
||||||
|
event: ServerSentEvent
|
||||||
|
): OpenAITextCompletionStreamEvent | null {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(event.data);
|
||||||
|
if (Array.isArray(parsed.choices) && parsed.choices[0].text !== undefined) {
|
||||||
|
return parsed;
|
||||||
|
} else {
|
||||||
|
// noinspection ExceptionCaughtLocallyJS
|
||||||
|
throw new Error("Missing required fields");
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid data event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
import {
|
||||||
|
OpenAIChatCompletionStreamEvent,
|
||||||
|
SSEResponseTransformArgs,
|
||||||
|
} from "../index";
|
||||||
|
import { parseEvent, ServerSentEvent } from "../parse-sse";
|
||||||
|
import { logger } from "../../../../../logger";
|
||||||
|
|
||||||
|
const log = logger.child({
|
||||||
|
module: "sse-transformer",
|
||||||
|
transformer: "openai-to-openai",
|
||||||
|
});
|
||||||
|
|
||||||
|
export const passthroughToOpenAI = (params: SSEResponseTransformArgs) => {
|
||||||
|
const { data } = params;
|
||||||
|
|
||||||
|
const rawEvent = parseEvent(data);
|
||||||
|
if (!rawEvent.data || rawEvent.data === "[DONE]") {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionEvent = asCompletion(rawEvent);
|
||||||
|
if (!completionEvent) {
|
||||||
|
return { position: -1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { position: -1, event: completionEvent };
|
||||||
|
};
|
||||||
|
|
||||||
|
function asCompletion(
|
||||||
|
event: ServerSentEvent
|
||||||
|
): OpenAIChatCompletionStreamEvent | null {
|
||||||
|
try {
|
||||||
|
return JSON.parse(event.data);
|
||||||
|
} catch (error) {
|
||||||
|
log.warn({ error: error.stack, event }, "Received invalid event");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
@@ -0,0 +1,142 @@
|
|||||||
|
import { RequestHandler, Router, Request } from "express";
|
||||||
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
|
import { config } from "../config";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
import { createQueueMiddleware } from "./queue";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { handleProxyError } from "./middleware/common";
|
||||||
|
import {
|
||||||
|
addKey,
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeBody,
|
||||||
|
createOnProxyReqHandler,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import {
|
||||||
|
createOnProxyResHandler,
|
||||||
|
ProxyResHandlerWithBody,
|
||||||
|
} from "./middleware/response";
|
||||||
|
import { generateModelList } from "./openai";
|
||||||
|
import {
|
||||||
|
mirrorGeneratedImage,
|
||||||
|
OpenAIImageGenerationResult,
|
||||||
|
} from "../shared/file-storage/mirror-generated-image";
|
||||||
|
|
||||||
|
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
|
||||||
|
|
||||||
|
let modelListCache: any = null;
|
||||||
|
let modelListValid = 0;
|
||||||
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
|
if (new Date().getTime() - modelListValid < 1000 * 60) return modelListCache;
|
||||||
|
const result = generateModelList(KNOWN_MODELS);
|
||||||
|
modelListCache = { object: "list", data: result };
|
||||||
|
modelListValid = new Date().getTime();
|
||||||
|
res.status(200).json(modelListCache);
|
||||||
|
};
|
||||||
|
|
||||||
|
const openaiImagesResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.promptLogging) {
|
||||||
|
const host = req.get("host");
|
||||||
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai") {
|
||||||
|
req.log.info("Transforming OpenAI image response to OpenAI chat format");
|
||||||
|
body = transformResponseForChat(body as OpenAIImageGenerationResult, req);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(200).json(body);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms a DALL-E image generation response into a chat response, simply
|
||||||
|
* embedding the image URL into the chat message as a Markdown image.
|
||||||
|
*/
|
||||||
|
function transformResponseForChat(
|
||||||
|
imageBody: OpenAIImageGenerationResult,
|
||||||
|
req: Request
|
||||||
|
): Record<string, any> {
|
||||||
|
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
|
||||||
|
const content = imageBody.data
|
||||||
|
.map((item) => {
|
||||||
|
const { url, b64_json } = item;
|
||||||
|
if (b64_json) {
|
||||||
|
return ``;
|
||||||
|
} else {
|
||||||
|
return ``;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.join("\n\n");
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: "dalle-" + req.id,
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Date.now(),
|
||||||
|
model: req.body.model,
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 0,
|
||||||
|
completion_tokens: req.outputTokens,
|
||||||
|
total_tokens: req.outputTokens,
|
||||||
|
},
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { role: "assistant", content },
|
||||||
|
finish_reason: "stop",
|
||||||
|
index: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const openaiImagesProxy = createQueueMiddleware({
|
||||||
|
proxyMiddleware: createProxyMiddleware({
|
||||||
|
target: "https://api.openai.com",
|
||||||
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
|
pathRewrite: {
|
||||||
|
"^/v1/chat/completions": "/v1/images/generations",
|
||||||
|
},
|
||||||
|
on: {
|
||||||
|
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
|
||||||
|
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
|
||||||
|
error: handleProxyError,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const openaiImagesRouter = Router();
|
||||||
|
openaiImagesRouter.get("/v1/models", handleModelRequest);
|
||||||
|
openaiImagesRouter.post(
|
||||||
|
"/v1/images/generations",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai-image",
|
||||||
|
outApi: "openai-image",
|
||||||
|
service: "openai",
|
||||||
|
}),
|
||||||
|
openaiImagesProxy
|
||||||
|
);
|
||||||
|
openaiImagesRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai",
|
||||||
|
outApi: "openai-image",
|
||||||
|
service: "openai",
|
||||||
|
}),
|
||||||
|
openaiImagesProxy
|
||||||
|
);
|
||||||
|
export const openaiImage = openaiImagesRouter;
|
||||||
@@ -1,47 +1,66 @@
|
|||||||
import { RequestHandler, Request, Router } from "express";
|
import { RequestHandler, Router } from "express";
|
||||||
import * as http from "http";
|
|
||||||
import { createProxyMiddleware } from "http-proxy-middleware";
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
import { config } from "../config";
|
import { config } from "../config";
|
||||||
import { keyPool } from "../key-management";
|
import { keyPool } from "../shared/key-management";
|
||||||
|
import {
|
||||||
|
getOpenAIModelFamily,
|
||||||
|
ModelFamily,
|
||||||
|
OpenAIModelFamily,
|
||||||
|
} from "../shared/models";
|
||||||
import { logger } from "../logger";
|
import { logger } from "../logger";
|
||||||
import { createQueueMiddleware } from "./queue";
|
import { createQueueMiddleware } from "./queue";
|
||||||
import { ipLimiter } from "./rate-limit";
|
import { ipLimiter } from "./rate-limit";
|
||||||
import { handleProxyError } from "./middleware/common";
|
import { handleProxyError } from "./middleware/common";
|
||||||
import {
|
import {
|
||||||
addKey,
|
addKey,
|
||||||
|
addKeyForEmbeddingsRequest,
|
||||||
|
createEmbeddingsPreprocessorMiddleware,
|
||||||
|
createOnProxyReqHandler,
|
||||||
createPreprocessorMiddleware,
|
createPreprocessorMiddleware,
|
||||||
finalizeBody,
|
finalizeBody,
|
||||||
languageFilter,
|
forceModel,
|
||||||
limitCompletions,
|
RequestPreprocessor,
|
||||||
limitOutputTokens,
|
|
||||||
} from "./middleware/request";
|
} from "./middleware/request";
|
||||||
import {
|
import {
|
||||||
createOnProxyResHandler,
|
createOnProxyResHandler,
|
||||||
ProxyResHandlerWithBody,
|
ProxyResHandlerWithBody,
|
||||||
} from "./middleware/response";
|
} from "./middleware/response";
|
||||||
|
|
||||||
|
// https://platform.openai.com/docs/models/overview
|
||||||
|
export const KNOWN_OPENAI_MODELS = [
|
||||||
|
"gpt-4-1106-preview",
|
||||||
|
"gpt-4-vision-preview",
|
||||||
|
"gpt-4",
|
||||||
|
"gpt-4-0613",
|
||||||
|
"gpt-4-0314", // EOL 2024-06-13
|
||||||
|
"gpt-4-32k",
|
||||||
|
"gpt-4-32k-0613",
|
||||||
|
"gpt-4-32k-0314", // EOL 2024-06-13
|
||||||
|
"gpt-3.5-turbo",
|
||||||
|
"gpt-3.5-turbo-0301", // EOL 2024-06-13
|
||||||
|
"gpt-3.5-turbo-0613",
|
||||||
|
"gpt-3.5-turbo-16k",
|
||||||
|
"gpt-3.5-turbo-16k-0613",
|
||||||
|
"gpt-3.5-turbo-instruct",
|
||||||
|
"gpt-3.5-turbo-instruct-0914",
|
||||||
|
"text-embedding-ada-002",
|
||||||
|
];
|
||||||
|
|
||||||
let modelsCache: any = null;
|
let modelsCache: any = null;
|
||||||
let modelsCacheTime = 0;
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
function getModelsResponse() {
|
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
|
||||||
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
let available = new Set<OpenAIModelFamily>();
|
||||||
return modelsCache;
|
for (const key of keyPool.list()) {
|
||||||
|
if (key.isDisabled || key.service !== "openai") continue;
|
||||||
|
key.modelFamilies.forEach((family) =>
|
||||||
|
available.add(family as OpenAIModelFamily)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
|
||||||
|
available = new Set([...available].filter((x) => allowed.has(x)));
|
||||||
|
|
||||||
const gptVariants = [
|
return models
|
||||||
"gpt-4",
|
|
||||||
"gpt-4-0314",
|
|
||||||
"gpt-4-32k",
|
|
||||||
"gpt-4-32k-0314",
|
|
||||||
"gpt-3.5-turbo",
|
|
||||||
"gpt-3.5-turbo-0301",
|
|
||||||
];
|
|
||||||
|
|
||||||
const gpt4Available = keyPool.list().filter((key) => {
|
|
||||||
return key.service === "openai" && !key.isDisabled && key.isGpt4;
|
|
||||||
}).length;
|
|
||||||
|
|
||||||
const models = gptVariants
|
|
||||||
.map((id) => ({
|
.map((id) => ({
|
||||||
id,
|
id,
|
||||||
object: "model",
|
object: "model",
|
||||||
@@ -60,44 +79,34 @@ function getModelsResponse() {
|
|||||||
root: id,
|
root: id,
|
||||||
parent: null,
|
parent: null,
|
||||||
}))
|
}))
|
||||||
.filter((model) => {
|
.filter((model) => available.has(getOpenAIModelFamily(model.id)));
|
||||||
if (model.id.startsWith("gpt-4")) {
|
|
||||||
return gpt4Available > 0;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
|
|
||||||
modelsCache = { object: "list", data: models };
|
|
||||||
modelsCacheTime = new Date().getTime();
|
|
||||||
|
|
||||||
return modelsCache;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const handleModelRequest: RequestHandler = (_req, res) => {
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
res.status(200).json(getModelsResponse());
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) return modelsCache;
|
||||||
|
const result = generateModelList();
|
||||||
|
modelsCache = { object: "list", data: result };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
res.status(200).json(modelsCache);
|
||||||
};
|
};
|
||||||
|
|
||||||
const rewriteRequest = (
|
/** Handles some turbo-instruct special cases. */
|
||||||
proxyReq: http.ClientRequest,
|
const rewriteForTurboInstruct: RequestPreprocessor = (req) => {
|
||||||
req: Request,
|
// /v1/turbo-instruct/v1/chat/completions accepts either prompt or messages.
|
||||||
res: http.ServerResponse
|
// Depending on whichever is provided, we need to set the inbound format so
|
||||||
) => {
|
// it is transformed correctly later.
|
||||||
const rewriterPipeline = [
|
if (req.body.prompt && !req.body.messages) {
|
||||||
addKey,
|
req.inboundApi = "openai-text";
|
||||||
languageFilter,
|
} else if (req.body.messages && !req.body.prompt) {
|
||||||
limitOutputTokens,
|
req.inboundApi = "openai";
|
||||||
limitCompletions,
|
// Set model for user since they're using a client which is not aware of
|
||||||
finalizeBody,
|
// turbo-instruct.
|
||||||
];
|
req.body.model = "gpt-3.5-turbo-instruct";
|
||||||
|
} else {
|
||||||
try {
|
throw new Error("`prompt` OR `messages` must be provided");
|
||||||
for (const rewriter of rewriterPipeline) {
|
|
||||||
rewriter(proxyReq, req, res, {});
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
req.log.error(error, "Error while executing proxy rewriter");
|
|
||||||
proxyReq.destroy(error as Error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req.url = "/v1/completions";
|
||||||
};
|
};
|
||||||
|
|
||||||
const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
@@ -115,50 +124,108 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
|
|||||||
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
|
||||||
|
req.log.info("Transforming Turbo-Instruct response to Chat format");
|
||||||
|
body = transformTurboInstructResponse(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
res.status(200).json(body);
|
res.status(200).json(body);
|
||||||
};
|
};
|
||||||
|
|
||||||
const openaiProxy = createQueueMiddleware(
|
/** Only used for non-streaming responses. */
|
||||||
createProxyMiddleware({
|
function transformTurboInstructResponse(
|
||||||
|
turboInstructBody: Record<string, any>
|
||||||
|
): Record<string, any> {
|
||||||
|
const transformed = { ...turboInstructBody };
|
||||||
|
transformed.choices = [
|
||||||
|
{
|
||||||
|
...turboInstructBody.choices[0],
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: turboInstructBody.choices[0].text.trim(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
|
delete transformed.choices[0].text;
|
||||||
|
return transformed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const openaiProxy = createQueueMiddleware({
|
||||||
|
proxyMiddleware: createProxyMiddleware({
|
||||||
target: "https://api.openai.com",
|
target: "https://api.openai.com",
|
||||||
changeOrigin: true,
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
on: {
|
on: {
|
||||||
proxyReq: rewriteRequest,
|
proxyReq: createOnProxyReqHandler({
|
||||||
|
pipeline: [addKey, finalizeBody],
|
||||||
|
}),
|
||||||
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
|
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
|
||||||
error: handleProxyError,
|
error: handleProxyError,
|
||||||
},
|
},
|
||||||
selfHandleResponse: true,
|
}),
|
||||||
logger,
|
});
|
||||||
})
|
|
||||||
);
|
const openaiEmbeddingsProxy = createProxyMiddleware({
|
||||||
|
target: "https://api.openai.com",
|
||||||
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: false,
|
||||||
|
logger,
|
||||||
|
on: {
|
||||||
|
proxyReq: createOnProxyReqHandler({
|
||||||
|
pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
|
||||||
|
}),
|
||||||
|
error: handleProxyError,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
const openaiRouter = Router();
|
const openaiRouter = Router();
|
||||||
// Fix paths because clients don't consistently use the /v1 prefix.
|
|
||||||
openaiRouter.use((req, _res, next) => {
|
|
||||||
if (!req.path.startsWith("/v1/")) {
|
|
||||||
req.url = `/v1${req.url}`;
|
|
||||||
}
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
openaiRouter.get("/v1/models", handleModelRequest);
|
openaiRouter.get("/v1/models", handleModelRequest);
|
||||||
|
// Native text completion endpoint, only for turbo-instruct.
|
||||||
|
openaiRouter.post(
|
||||||
|
"/v1/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai-text",
|
||||||
|
outApi: "openai-text",
|
||||||
|
service: "openai",
|
||||||
|
}),
|
||||||
|
openaiProxy
|
||||||
|
);
|
||||||
|
// turbo-instruct compatibility endpoint, accepts either prompt or messages
|
||||||
|
openaiRouter.post(
|
||||||
|
/\/v1\/turbo-instruct\/(v1\/)?chat\/completions/,
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "openai-text", service: "openai" },
|
||||||
|
{
|
||||||
|
beforeTransform: [rewriteForTurboInstruct],
|
||||||
|
afterTransform: [forceModel("gpt-3.5-turbo-instruct")],
|
||||||
|
}
|
||||||
|
),
|
||||||
|
openaiProxy
|
||||||
|
);
|
||||||
|
// General chat completion endpoint. Turbo-instruct is not supported here.
|
||||||
openaiRouter.post(
|
openaiRouter.post(
|
||||||
"/v1/chat/completions",
|
"/v1/chat/completions",
|
||||||
ipLimiter,
|
ipLimiter,
|
||||||
createPreprocessorMiddleware({ inApi: "openai", outApi: "openai" }),
|
createPreprocessorMiddleware({
|
||||||
|
inApi: "openai",
|
||||||
|
outApi: "openai",
|
||||||
|
service: "openai",
|
||||||
|
}),
|
||||||
openaiProxy
|
openaiProxy
|
||||||
);
|
);
|
||||||
// Redirect browser requests to the homepage.
|
// Embeddings endpoint.
|
||||||
openaiRouter.get("*", (req, res, next) => {
|
openaiRouter.post(
|
||||||
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
|
"/v1/embeddings",
|
||||||
if (isBrowser) {
|
ipLimiter,
|
||||||
res.redirect("/");
|
createEmbeddingsPreprocessorMiddleware(),
|
||||||
} else {
|
openaiEmbeddingsProxy
|
||||||
next();
|
);
|
||||||
}
|
|
||||||
});
|
|
||||||
openaiRouter.use((req, res) => {
|
|
||||||
req.log.warn(`Blocked openai proxy request: ${req.method} ${req.path}`);
|
|
||||||
res.status(404).json({ error: "Not found" });
|
|
||||||
});
|
|
||||||
|
|
||||||
export const openai = openaiRouter;
|
export const openai = openaiRouter;
|
||||||
|
|||||||
@@ -0,0 +1,170 @@
|
|||||||
|
import { Request, RequestHandler, Router } from "express";
|
||||||
|
import * as http from "http";
|
||||||
|
import { createProxyMiddleware } from "http-proxy-middleware";
|
||||||
|
import { v4 } from "uuid";
|
||||||
|
import { config } from "../config";
|
||||||
|
import { logger } from "../logger";
|
||||||
|
import { createQueueMiddleware } from "./queue";
|
||||||
|
import { ipLimiter } from "./rate-limit";
|
||||||
|
import { handleProxyError } from "./middleware/common";
|
||||||
|
import {
|
||||||
|
addKey,
|
||||||
|
createOnProxyReqHandler,
|
||||||
|
createPreprocessorMiddleware,
|
||||||
|
finalizeBody,
|
||||||
|
forceModel,
|
||||||
|
} from "./middleware/request";
|
||||||
|
import {
|
||||||
|
createOnProxyResHandler,
|
||||||
|
ProxyResHandlerWithBody,
|
||||||
|
} from "./middleware/response";
|
||||||
|
|
||||||
|
let modelsCache: any = null;
|
||||||
|
let modelsCacheTime = 0;
|
||||||
|
|
||||||
|
const getModelsResponse = () => {
|
||||||
|
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
|
||||||
|
return modelsCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config.googlePalmKey) return { object: "list", data: [] };
|
||||||
|
|
||||||
|
const bisonVariants = ["text-bison-001"];
|
||||||
|
|
||||||
|
const models = bisonVariants.map((id) => ({
|
||||||
|
id,
|
||||||
|
object: "model",
|
||||||
|
created: new Date().getTime(),
|
||||||
|
owned_by: "google",
|
||||||
|
permission: [],
|
||||||
|
root: "palm",
|
||||||
|
parent: null,
|
||||||
|
}));
|
||||||
|
|
||||||
|
modelsCache = { object: "list", data: models };
|
||||||
|
modelsCacheTime = new Date().getTime();
|
||||||
|
|
||||||
|
return modelsCache;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleModelRequest: RequestHandler = (_req, res) => {
|
||||||
|
res.status(200).json(getModelsResponse());
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Only used for non-streaming requests. */
|
||||||
|
const palmResponseHandler: ProxyResHandlerWithBody = async (
|
||||||
|
_proxyRes,
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
body
|
||||||
|
) => {
|
||||||
|
if (typeof body !== "object") {
|
||||||
|
throw new Error("Expected body to be an object");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.promptLogging) {
|
||||||
|
const host = req.get("host");
|
||||||
|
body.proxy_note = `Prompts are logged on this proxy instance. See ${host} for more information.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.inboundApi === "openai") {
|
||||||
|
req.log.info("Transforming Google PaLM response to OpenAI format");
|
||||||
|
body = transformPalmResponse(body, req);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req.tokenizerInfo) {
|
||||||
|
body.proxy_tokenizer = req.tokenizerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: PaLM has no streaming capability which will pose a problem here if
|
||||||
|
// requests wait in the queue for too long. Probably need to fake streaming
|
||||||
|
// and return the entire completion in one stream event using the other
|
||||||
|
// response handler.
|
||||||
|
res.status(200).json(body);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transforms a model response from the Anthropic API to match those from the
|
||||||
|
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
|
||||||
|
* is only used for non-streaming requests as streaming requests are handled
|
||||||
|
* on-the-fly.
|
||||||
|
*/
|
||||||
|
function transformPalmResponse(
|
||||||
|
palmRespBody: Record<string, any>,
|
||||||
|
req: Request
|
||||||
|
): Record<string, any> {
|
||||||
|
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
|
||||||
|
return {
|
||||||
|
id: "plm-" + v4(),
|
||||||
|
object: "chat.completion",
|
||||||
|
created: Date.now(),
|
||||||
|
model: req.body.model,
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: req.promptTokens,
|
||||||
|
completion_tokens: req.outputTokens,
|
||||||
|
total_tokens: totalTokens,
|
||||||
|
},
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: palmRespBody.candidates[0].output,
|
||||||
|
},
|
||||||
|
finish_reason: null, // palm doesn't return this
|
||||||
|
index: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function reassignPathForPalmModel(proxyReq: http.ClientRequest, req: Request) {
|
||||||
|
if (req.body.stream) {
|
||||||
|
throw new Error("Google PaLM API doesn't support streaming requests");
|
||||||
|
}
|
||||||
|
|
||||||
|
// PaLM API specifies the model in the URL path, not the request body. This
|
||||||
|
// doesn't work well with our rewriter architecture, so we need to manually
|
||||||
|
// fix it here.
|
||||||
|
|
||||||
|
// POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateText
|
||||||
|
// POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateMessage
|
||||||
|
|
||||||
|
// The chat api (generateMessage) is not very useful at this time as it has
|
||||||
|
// few params and no adjustable safety settings.
|
||||||
|
|
||||||
|
proxyReq.path = proxyReq.path.replace(
|
||||||
|
/^\/v1\/chat\/completions/,
|
||||||
|
`/v1beta2/models/${req.body.model}:generateText`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const googlePalmProxy = createQueueMiddleware({
|
||||||
|
proxyMiddleware: createProxyMiddleware({
|
||||||
|
target: "https://generativelanguage.googleapis.com",
|
||||||
|
changeOrigin: true,
|
||||||
|
selfHandleResponse: true,
|
||||||
|
logger,
|
||||||
|
on: {
|
||||||
|
proxyReq: createOnProxyReqHandler({
|
||||||
|
pipeline: [reassignPathForPalmModel, addKey, finalizeBody],
|
||||||
|
}),
|
||||||
|
proxyRes: createOnProxyResHandler([palmResponseHandler]),
|
||||||
|
error: handleProxyError,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const palmRouter = Router();
|
||||||
|
palmRouter.get("/v1/models", handleModelRequest);
|
||||||
|
// OpenAI-to-Google PaLM compatibility endpoint.
|
||||||
|
palmRouter.post(
|
||||||
|
"/v1/chat/completions",
|
||||||
|
ipLimiter,
|
||||||
|
createPreprocessorMiddleware(
|
||||||
|
{ inApi: "openai", outApi: "google-palm", service: "google-palm" },
|
||||||
|
{ afterTransform: [forceModel("text-bison-001")] }
|
||||||
|
),
|
||||||
|
googlePalmProxy
|
||||||
|
);
|
||||||
|
|
||||||
|
export const googlePalm = palmRouter;
|
||||||
@@ -4,10 +4,6 @@
|
|||||||
* a given key has generated, so our queue will simply retry requests that fail
|
* a given key has generated, so our queue will simply retry requests that fail
|
||||||
* with a non-billing related 429 over and over again until they succeed.
|
* with a non-billing related 429 over and over again until they succeed.
|
||||||
*
|
*
|
||||||
* Dequeueing can operate in one of two modes:
|
|
||||||
* - 'fair': requests are dequeued in the order they were enqueued.
|
|
||||||
* - 'random': requests are dequeued randomly, not really a queue at all.
|
|
||||||
*
|
|
||||||
* When a request to a proxied endpoint is received, we create a closure around
|
* When a request to a proxied endpoint is received, we create a closure around
|
||||||
* the call to http-proxy-middleware and attach it to the request. This allows
|
* the call to http-proxy-middleware and attach it to the request. This allows
|
||||||
* us to pause the request until we have a key available. Further, if the
|
* us to pause the request until we have a key available. Further, if the
|
||||||
@@ -15,53 +11,67 @@
|
|||||||
* back in the queue and it will be retried later using the same closure.
|
* back in the queue and it will be retried later using the same closure.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import crypto from "crypto";
|
||||||
import type { Handler, Request } from "express";
|
import type { Handler, Request } from "express";
|
||||||
import { config, DequeueMode } from "../config";
|
import { keyPool } from "../shared/key-management";
|
||||||
import { keyPool, SupportedModel } from "../key-management";
|
import { getModelFamilyForRequest, MODEL_FAMILIES, ModelFamily } from "../shared/models";
|
||||||
|
import { buildFakeSse, initializeSseStream } from "../shared/streaming";
|
||||||
import { logger } from "../logger";
|
import { logger } from "../logger";
|
||||||
import { AGNAI_DOT_CHAT_IP } from "./rate-limit";
|
import { getUniqueIps, SHARED_IP_ADDRESSES } from "./rate-limit";
|
||||||
import { buildFakeSseMessage } from "./middleware/common";
|
import { RequestPreprocessor } from "./middleware/request";
|
||||||
|
import { handleProxyError } from "./middleware/common";
|
||||||
export type QueuePartition = "claude" | "turbo" | "gpt-4";
|
|
||||||
|
|
||||||
const queue: Request[] = [];
|
const queue: Request[] = [];
|
||||||
const log = logger.child({ module: "request-queue" });
|
const log = logger.child({ module: "request-queue" });
|
||||||
|
|
||||||
let dequeueMode: DequeueMode = "fair";
|
|
||||||
|
|
||||||
/** Maximum number of queue slots for Agnai.chat requests. */
|
/** Maximum number of queue slots for Agnai.chat requests. */
|
||||||
const AGNAI_CONCURRENCY_LIMIT = 15;
|
const AGNAI_CONCURRENCY_LIMIT = 5;
|
||||||
/** Maximum number of queue slots for individual users. */
|
/** Maximum number of queue slots for individual users. */
|
||||||
const USER_CONCURRENCY_LIMIT = 1;
|
const USER_CONCURRENCY_LIMIT = 1;
|
||||||
|
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
|
||||||
|
const MAX_HEARTBEAT_SIZE =
|
||||||
|
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
|
||||||
|
const HEARTBEAT_INTERVAL =
|
||||||
|
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
|
||||||
|
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
|
||||||
|
const PAYLOAD_SCALE_FACTOR = parseFloat(
|
||||||
|
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
|
||||||
|
);
|
||||||
|
|
||||||
const sameIpPredicate = (incoming: Request) => (queued: Request) =>
|
/**
|
||||||
queued.ip === incoming.ip;
|
* Returns an identifier for a request. This is used to determine if a
|
||||||
const sameUserPredicate = (incoming: Request) => (queued: Request) => {
|
* request is already in the queue.
|
||||||
const incomingUser = incoming.user ?? { token: incoming.ip };
|
*
|
||||||
const queuedUser = queued.user ?? { token: queued.ip };
|
* This can be (in order of preference):
|
||||||
return queuedUser.token === incomingUser.token;
|
* - user token assigned by the proxy operator
|
||||||
};
|
* - x-risu-tk header, if the request is from RisuAI.xyz
|
||||||
|
* - 'shared-ip' if the request is from a shared IP address like Agnai.chat
|
||||||
|
* - IP address
|
||||||
|
*/
|
||||||
|
function getIdentifier(req: Request) {
|
||||||
|
if (req.user) return req.user.token;
|
||||||
|
if (req.risuToken) return req.risuToken;
|
||||||
|
if (isFromSharedIp(req)) return "shared-ip";
|
||||||
|
return req.ip;
|
||||||
|
}
|
||||||
|
|
||||||
|
const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
|
||||||
|
getIdentifier(queued) === getIdentifier(incoming);
|
||||||
|
|
||||||
|
const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
|
||||||
|
|
||||||
export function enqueue(req: Request) {
|
export function enqueue(req: Request) {
|
||||||
let enqueuedRequestCount = 0;
|
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
|
||||||
let isGuest = req.user?.token === undefined;
|
let isGuest = req.user?.token === undefined;
|
||||||
|
|
||||||
if (isGuest) {
|
// Requests from shared IP addresses such as Agnai.chat are exempt from IP-
|
||||||
enqueuedRequestCount = queue.filter(sameIpPredicate(req)).length;
|
// based rate limiting but can only occupy a certain number of slots in the
|
||||||
} else {
|
// queue. Authenticated users always get a single spot in the queue.
|
||||||
enqueuedRequestCount = queue.filter(sameUserPredicate(req)).length;
|
const isSharedIp = isFromSharedIp(req);
|
||||||
}
|
|
||||||
|
|
||||||
// All Agnai.chat requests come from the same IP, so we allow them to have
|
|
||||||
// more spots in the queue. Can't make it unlimited because people will
|
|
||||||
// intentionally abuse it.
|
|
||||||
// Authenticated users always get a single spot in the queue.
|
|
||||||
const maxConcurrentQueuedRequests =
|
const maxConcurrentQueuedRequests =
|
||||||
isGuest && req.ip === AGNAI_DOT_CHAT_IP
|
isGuest && isSharedIp ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT;
|
||||||
? AGNAI_CONCURRENCY_LIMIT
|
|
||||||
: USER_CONCURRENCY_LIMIT;
|
|
||||||
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
|
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
|
||||||
if (req.ip === AGNAI_DOT_CHAT_IP) {
|
if (isSharedIp) {
|
||||||
// Re-enqueued requests are not counted towards the limit since they
|
// Re-enqueued requests are not counted towards the limit since they
|
||||||
// already made it through the queue once.
|
// already made it through the queue once.
|
||||||
if (req.retryCount === 0) {
|
if (req.retryCount === 0) {
|
||||||
@@ -72,45 +82,36 @@ export function enqueue(req: Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
queue.push(req);
|
|
||||||
req.queueOutTime = 0;
|
|
||||||
|
|
||||||
// shitty hack to remove hpm's event listeners on retried requests
|
// shitty hack to remove hpm's event listeners on retried requests
|
||||||
removeProxyMiddlewareEventListeners(req);
|
removeProxyMiddlewareEventListeners(req);
|
||||||
|
|
||||||
// If the request opted into streaming, we need to register a heartbeat
|
// If the request opted into streaming, we need to register a heartbeat
|
||||||
// handler to keep the connection alive while it waits in the queue. We
|
// handler to keep the connection alive while it waits in the queue. We
|
||||||
// deregister the handler when the request is dequeued.
|
// deregister the handler when the request is dequeued.
|
||||||
if (req.body.stream === "true" || req.body.stream === true) {
|
const { stream } = req.body;
|
||||||
|
if (stream === "true" || stream === true || req.isStreaming) {
|
||||||
const res = req.res!;
|
const res = req.res!;
|
||||||
if (!res.headersSent) {
|
if (!res.headersSent) {
|
||||||
initStreaming(req);
|
initStreaming(req);
|
||||||
}
|
}
|
||||||
req.heartbeatInterval = setInterval(() => {
|
registerHeartbeat(req);
|
||||||
if (process.env.NODE_ENV === "production") {
|
} else if (getProxyLoad() > LOAD_THRESHOLD) {
|
||||||
req.res!.write(": queue heartbeat\n\n");
|
throw new Error(
|
||||||
} else {
|
"Due to heavy traffic on this proxy, you must enable streaming for your request."
|
||||||
req.log.info(`Sending heartbeat to request in queue.`);
|
);
|
||||||
const partition = getPartitionForRequest(req);
|
|
||||||
const avgWait = Math.round(getEstimatedWaitTime(partition) / 1000);
|
|
||||||
const currentDuration = Math.round((Date.now() - req.startTime) / 1000);
|
|
||||||
const debugMsg = `queue length: ${queue.length}; elapsed time: ${currentDuration}s; avg wait: ${avgWait}s`;
|
|
||||||
req.res!.write(buildFakeSseMessage("heartbeat", debugMsg, req));
|
|
||||||
}
|
|
||||||
}, 10000);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register a handler to remove the request from the queue if the connection
|
queue.push(req);
|
||||||
// is aborted or closed before it is dequeued.
|
req.queueOutTime = 0;
|
||||||
|
|
||||||
const removeFromQueue = () => {
|
const removeFromQueue = () => {
|
||||||
req.log.info(`Removing aborted request from queue.`);
|
req.log.info(`Removing aborted request from queue.`);
|
||||||
const index = queue.indexOf(req);
|
const index = queue.indexOf(req);
|
||||||
if (index !== -1) {
|
if (index !== -1) {
|
||||||
queue.splice(index, 1);
|
queue.splice(index, 1);
|
||||||
}
|
}
|
||||||
if (req.heartbeatInterval) {
|
if (req.heartbeatInterval) clearInterval(req.heartbeatInterval);
|
||||||
clearInterval(req.heartbeatInterval);
|
if (req.monitorInterval) clearInterval(req.monitorInterval);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
req.onAborted = removeFromQueue;
|
req.onAborted = removeFromQueue;
|
||||||
req.res!.once("close", removeFromQueue);
|
req.res!.once("close", removeFromQueue);
|
||||||
@@ -122,46 +123,32 @@ export function enqueue(req: Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function getPartitionForRequest(req: Request): QueuePartition {
|
function getQueueForPartition(partition: ModelFamily): Request[] {
|
||||||
// There is a single request queue, but it is partitioned by model and API
|
return queue
|
||||||
// provider.
|
.filter((req) => getModelFamilyForRequest(req) === partition)
|
||||||
// - claude: requests for the Anthropic API, regardless of model
|
.sort((a, b) => {
|
||||||
// - gpt-4: requests for the OpenAI API, specifically for GPT-4 models
|
// Certain requests are exempted from IP-based rate limiting because they
|
||||||
// - turbo: effectively, all other requests
|
// come from a shared IP address. To prevent these requests from starving
|
||||||
const provider = req.outboundApi;
|
// out other requests during periods of high traffic, we sort them to the
|
||||||
const model = (req.body.model as SupportedModel) ?? "gpt-3.5-turbo";
|
// end of the queue.
|
||||||
if (provider === "anthropic") {
|
const aIsExempted = isFromSharedIp(a);
|
||||||
return "claude";
|
const bIsExempted = isFromSharedIp(b);
|
||||||
}
|
if (aIsExempted && !bIsExempted) return 1;
|
||||||
if (provider === "openai" && model.startsWith("gpt-4")) {
|
if (!aIsExempted && bIsExempted) return -1;
|
||||||
return "gpt-4";
|
return 0;
|
||||||
}
|
});
|
||||||
return "turbo";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function getQueueForPartition(partition: QueuePartition): Request[] {
|
export function dequeue(partition: ModelFamily): Request | undefined {
|
||||||
return queue.filter((req) => getPartitionForRequest(req) === partition);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function dequeue(partition: QueuePartition): Request | undefined {
|
|
||||||
const modelQueue = getQueueForPartition(partition);
|
const modelQueue = getQueueForPartition(partition);
|
||||||
|
|
||||||
if (modelQueue.length === 0) {
|
if (modelQueue.length === 0) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
let req: Request;
|
const req = modelQueue.reduce((prev, curr) =>
|
||||||
|
prev.startTime < curr.startTime ? prev : curr
|
||||||
if (dequeueMode === "fair") {
|
);
|
||||||
// Dequeue the request that has been waiting the longest
|
|
||||||
req = modelQueue.reduce((prev, curr) =>
|
|
||||||
prev.startTime < curr.startTime ? prev : curr
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// Dequeue a random request
|
|
||||||
const index = Math.floor(Math.random() * modelQueue.length);
|
|
||||||
req = modelQueue[index];
|
|
||||||
}
|
|
||||||
queue.splice(queue.indexOf(req), 1);
|
queue.splice(queue.indexOf(req), 1);
|
||||||
|
|
||||||
if (req.onAborted) {
|
if (req.onAborted) {
|
||||||
@@ -169,9 +156,8 @@ export function dequeue(partition: QueuePartition): Request | undefined {
|
|||||||
req.onAborted = undefined;
|
req.onAborted = undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req.heartbeatInterval) {
|
if (req.heartbeatInterval) clearInterval(req.heartbeatInterval);
|
||||||
clearInterval(req.heartbeatInterval);
|
if (req.monitorInterval) clearInterval(req.monitorInterval);
|
||||||
}
|
|
||||||
|
|
||||||
// Track the time leaving the queue now, but don't add it to the wait times
|
// Track the time leaving the queue now, but don't add it to the wait times
|
||||||
// yet because we don't know if the request will succeed or fail. We track
|
// yet because we don't know if the request will succeed or fail. We track
|
||||||
@@ -190,25 +176,23 @@ export function dequeue(partition: QueuePartition): Request | undefined {
|
|||||||
function processQueue() {
|
function processQueue() {
|
||||||
// This isn't completely correct, because a key can service multiple models.
|
// This isn't completely correct, because a key can service multiple models.
|
||||||
// Currently if a key is locked out on one model it will also stop servicing
|
// Currently if a key is locked out on one model it will also stop servicing
|
||||||
// the others, because we only track one rate limit per key.
|
// the others, because we only track rate limits for the key as a whole.
|
||||||
const gpt4Lockout = keyPool.getLockoutPeriod("gpt-4");
|
|
||||||
const turboLockout = keyPool.getLockoutPeriod("gpt-3.5-turbo");
|
|
||||||
const claudeLockout = keyPool.getLockoutPeriod("claude-v1");
|
|
||||||
|
|
||||||
const reqs: (Request | undefined)[] = [];
|
const reqs: (Request | undefined)[] = [];
|
||||||
if (gpt4Lockout === 0) {
|
MODEL_FAMILIES.forEach((modelFamily) => {
|
||||||
reqs.push(dequeue("gpt-4"));
|
const lockout = keyPool.getLockoutPeriod(modelFamily);
|
||||||
}
|
if (lockout === 0) {
|
||||||
if (turboLockout === 0) {
|
reqs.push(dequeue(modelFamily));
|
||||||
reqs.push(dequeue("turbo"));
|
}
|
||||||
}
|
});
|
||||||
if (claudeLockout === 0) {
|
|
||||||
reqs.push(dequeue("claude"));
|
|
||||||
}
|
|
||||||
|
|
||||||
reqs.filter(Boolean).forEach((req) => {
|
reqs.filter(Boolean).forEach((req) => {
|
||||||
if (req?.proceed) {
|
if (req?.proceed) {
|
||||||
req.log.info({ retries: req.retryCount }, `Dequeuing request.`);
|
const modelFamily = getModelFamilyForRequest(req!);
|
||||||
|
req.log.info({
|
||||||
|
retries: req.retryCount,
|
||||||
|
partition: modelFamily,
|
||||||
|
}, `Dequeuing request.`);
|
||||||
req.proceed();
|
req.proceed();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -241,39 +225,94 @@ function cleanQueue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function start() {
|
export function start() {
|
||||||
|
MODEL_FAMILIES.forEach((modelFamily) => {
|
||||||
|
historicalEmas.set(modelFamily, 0);
|
||||||
|
currentEmas.set(modelFamily, 0);
|
||||||
|
estimates.set(modelFamily, 0);
|
||||||
|
});
|
||||||
processQueue();
|
processQueue();
|
||||||
cleanQueue();
|
cleanQueue();
|
||||||
log.info(`Started request queue.`);
|
log.info(`Started request queue.`);
|
||||||
}
|
}
|
||||||
|
|
||||||
let waitTimes: { partition: QueuePartition; start: number; end: number }[] = [];
|
let waitTimes: {
|
||||||
|
partition: ModelFamily;
|
||||||
|
start: number;
|
||||||
|
end: number;
|
||||||
|
isDeprioritized: boolean;
|
||||||
|
}[] = [];
|
||||||
|
|
||||||
/** Adds a successful request to the list of wait times. */
|
/** Adds a successful request to the list of wait times. */
|
||||||
export function trackWaitTime(req: Request) {
|
export function trackWaitTime(req: Request) {
|
||||||
waitTimes.push({
|
waitTimes.push({
|
||||||
partition: getPartitionForRequest(req),
|
partition: getModelFamilyForRequest(req),
|
||||||
start: req.startTime!,
|
start: req.startTime!,
|
||||||
end: req.queueOutTime ?? Date.now(),
|
end: req.queueOutTime ?? Date.now(),
|
||||||
|
isDeprioritized: isFromSharedIp(req),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns average wait time in milliseconds. */
|
const WAIT_TIME_INTERVAL = 3000;
|
||||||
export function getEstimatedWaitTime(partition: QueuePartition) {
|
const ALPHA_HISTORICAL = 0.2;
|
||||||
const now = Date.now();
|
const ALPHA_CURRENT = 0.3;
|
||||||
const recentWaits = waitTimes.filter(
|
const historicalEmas: Map<ModelFamily, number> = new Map();
|
||||||
(wt) => wt.partition === partition && now - wt.end < 300 * 1000
|
const currentEmas: Map<ModelFamily, number> = new Map();
|
||||||
);
|
const estimates: Map<ModelFamily, number> = new Map();
|
||||||
if (recentWaits.length === 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
export function getEstimatedWaitTime(partition: ModelFamily) {
|
||||||
recentWaits.reduce((sum, wt) => sum + wt.end - wt.start, 0) /
|
return estimates.get(partition) ?? 0;
|
||||||
recentWaits.length
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getQueueLength(partition: QueuePartition | "all" = "all") {
|
/**
|
||||||
|
* Returns estimated wait time for the given queue partition in milliseconds.
|
||||||
|
* Requests which are deprioritized are not included in the calculation as they
|
||||||
|
* would skew the results due to their longer wait times.
|
||||||
|
*/
|
||||||
|
function calculateWaitTime(partition: ModelFamily) {
|
||||||
|
const now = Date.now();
|
||||||
|
const recentWaits = waitTimes
|
||||||
|
.filter((wait) => {
|
||||||
|
const isSamePartition = wait.partition === partition;
|
||||||
|
const isRecent = now - wait.end < 300 * 1000;
|
||||||
|
const isNormalPriority = !wait.isDeprioritized;
|
||||||
|
return isSamePartition && isRecent && isNormalPriority;
|
||||||
|
})
|
||||||
|
.map((wait) => wait.end - wait.start);
|
||||||
|
const recentAverage = recentWaits.length
|
||||||
|
? recentWaits.reduce((sum, wait) => sum + wait, 0) / recentWaits.length
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
const historicalEma = historicalEmas.get(partition) ?? 0;
|
||||||
|
historicalEmas.set(
|
||||||
|
partition,
|
||||||
|
ALPHA_HISTORICAL * recentAverage + (1 - ALPHA_HISTORICAL) * historicalEma
|
||||||
|
);
|
||||||
|
|
||||||
|
const currentWaits = queue
|
||||||
|
.filter((req) => {
|
||||||
|
const isSamePartition = getModelFamilyForRequest(req) === partition;
|
||||||
|
const isNormalPriority = !isFromSharedIp(req);
|
||||||
|
return isSamePartition && isNormalPriority;
|
||||||
|
})
|
||||||
|
.map((req) => now - req.startTime!);
|
||||||
|
const longestCurrentWait = Math.max(...currentWaits, 0);
|
||||||
|
|
||||||
|
const currentEma = currentEmas.get(partition) ?? 0;
|
||||||
|
currentEmas.set(
|
||||||
|
partition,
|
||||||
|
ALPHA_CURRENT * longestCurrentWait + (1 - ALPHA_CURRENT) * currentEma
|
||||||
|
);
|
||||||
|
|
||||||
|
return (historicalEma + currentEma) / 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
setInterval(() => {
|
||||||
|
MODEL_FAMILIES.forEach((modelFamily) => {
|
||||||
|
estimates.set(modelFamily, calculateWaitTime(modelFamily));
|
||||||
|
});
|
||||||
|
}, WAIT_TIME_INTERVAL);
|
||||||
|
|
||||||
|
export function getQueueLength(partition: ModelFamily | "all" = "all") {
|
||||||
if (partition === "all") {
|
if (partition === "all") {
|
||||||
return queue.length;
|
return queue.length;
|
||||||
}
|
}
|
||||||
@@ -281,13 +320,27 @@ export function getQueueLength(partition: QueuePartition | "all" = "all") {
|
|||||||
return modelQueue.length;
|
return modelQueue.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function createQueueMiddleware(proxyMiddleware: Handler): Handler {
|
export function createQueueMiddleware({
|
||||||
|
beforeProxy,
|
||||||
|
proxyMiddleware,
|
||||||
|
}: {
|
||||||
|
beforeProxy?: RequestPreprocessor;
|
||||||
|
proxyMiddleware: Handler;
|
||||||
|
}): Handler {
|
||||||
return (req, res, next) => {
|
return (req, res, next) => {
|
||||||
if (config.queueMode === "none") {
|
req.proceed = async () => {
|
||||||
return proxyMiddleware(req, res, next);
|
if (beforeProxy) {
|
||||||
}
|
try {
|
||||||
|
// Hack to let us run asynchronous middleware before the
|
||||||
req.proceed = () => {
|
// http-proxy-middleware handler. This is used to sign AWS requests
|
||||||
|
// before they are proxied, as the signing is asynchronous.
|
||||||
|
// Unlike RequestPreprocessors, this runs every time the request is
|
||||||
|
// dequeued, not just the first time.
|
||||||
|
await beforeProxy(req);
|
||||||
|
} catch (err) {
|
||||||
|
return handleProxyError(err, req, res);
|
||||||
|
}
|
||||||
|
}
|
||||||
proxyMiddleware(req, res, next);
|
proxyMiddleware(req, res, next);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -307,17 +360,14 @@ export function createQueueMiddleware(proxyMiddleware: Handler): Handler {
|
|||||||
function killQueuedRequest(req: Request) {
|
function killQueuedRequest(req: Request) {
|
||||||
if (!req.res || req.res.writableEnded) {
|
if (!req.res || req.res.writableEnded) {
|
||||||
req.log.warn(`Attempted to terminate request that has already ended.`);
|
req.log.warn(`Attempted to terminate request that has already ended.`);
|
||||||
|
queue.splice(queue.indexOf(req), 1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const res = req.res;
|
const res = req.res;
|
||||||
try {
|
try {
|
||||||
const message = `Your request has been terminated by the proxy because it has been in the queue for more than 5 minutes. The queue is currently ${queue.length} requests long.`;
|
const message = `Your request has been terminated by the proxy because it has been in the queue for more than 5 minutes.`;
|
||||||
if (res.headersSent) {
|
if (res.headersSent) {
|
||||||
const fakeErrorEvent = buildFakeSseMessage(
|
const fakeErrorEvent = buildFakeSse("proxy queue error", message, req);
|
||||||
"proxy queue error",
|
|
||||||
message,
|
|
||||||
req
|
|
||||||
);
|
|
||||||
res.write(fakeErrorEvent);
|
res.write(fakeErrorEvent);
|
||||||
res.end();
|
res.end();
|
||||||
} else {
|
} else {
|
||||||
@@ -329,16 +379,19 @@ function killQueuedRequest(req: Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function initStreaming(req: Request) {
|
function initStreaming(req: Request) {
|
||||||
req.log.info(`Initiating streaming for new queued request.`);
|
|
||||||
const res = req.res!;
|
const res = req.res!;
|
||||||
res.statusCode = 200;
|
initializeSseStream(res);
|
||||||
res.setHeader("Content-Type", "text/event-stream");
|
|
||||||
res.setHeader("Cache-Control", "no-cache");
|
if (req.query.badSseParser) {
|
||||||
res.setHeader("Connection", "keep-alive");
|
// Some clients have a broken SSE parser that doesn't handle comments
|
||||||
res.setHeader("X-Accel-Buffering", "no"); // nginx-specific fix
|
// correctly. These clients can pass ?badSseParser=true to
|
||||||
res.flushHeaders();
|
// disable comments in the SSE stream.
|
||||||
res.write("\n");
|
res.write(getHeartbeatPayload());
|
||||||
res.write(": joining queue\n\n");
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.write(`: joining queue at position ${queue.length}\n\n`);
|
||||||
|
res.write(getHeartbeatPayload());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -394,3 +447,93 @@ function removeProxyMiddlewareEventListeners(req: Request) {
|
|||||||
req.removeListener("error", reqOnError as any);
|
req.removeListener("error", reqOnError as any);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function registerHeartbeat(req: Request) {
|
||||||
|
const res = req.res!;
|
||||||
|
|
||||||
|
const currentSize = getHeartbeatSize();
|
||||||
|
req.log.debug({
|
||||||
|
currentSize,
|
||||||
|
HEARTBEAT_INTERVAL,
|
||||||
|
PAYLOAD_SCALE_FACTOR,
|
||||||
|
MAX_HEARTBEAT_SIZE,
|
||||||
|
}, "Joining queue with heartbeat.");
|
||||||
|
|
||||||
|
let isBufferFull = false;
|
||||||
|
let bufferFullCount = 0;
|
||||||
|
req.heartbeatInterval = setInterval(() => {
|
||||||
|
if (isBufferFull) {
|
||||||
|
bufferFullCount++;
|
||||||
|
if (bufferFullCount >= 3) {
|
||||||
|
req.log.error("Heartbeat skipped too many times; killing connection.");
|
||||||
|
res.destroy();
|
||||||
|
} else {
|
||||||
|
req.log.warn({ bufferFullCount }, "Heartbeat skipped; buffer is full.");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = getHeartbeatPayload();
|
||||||
|
if (!res.write(data)) {
|
||||||
|
isBufferFull = true;
|
||||||
|
res.once("drain", () => (isBufferFull = false));
|
||||||
|
}
|
||||||
|
}, HEARTBEAT_INTERVAL);
|
||||||
|
monitorHeartbeat(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
function monitorHeartbeat(req: Request) {
|
||||||
|
const res = req.res!;
|
||||||
|
|
||||||
|
let lastBytesSent = 0;
|
||||||
|
req.monitorInterval = setInterval(() => {
|
||||||
|
const bytesSent = res.socket?.bytesWritten ?? 0;
|
||||||
|
const bytesSinceLast = bytesSent - lastBytesSent;
|
||||||
|
req.log.debug(
|
||||||
|
{
|
||||||
|
previousBytesSent: lastBytesSent,
|
||||||
|
currentBytesSent: bytesSent,
|
||||||
|
},
|
||||||
|
"Heartbeat monitor check."
|
||||||
|
);
|
||||||
|
lastBytesSent = bytesSent;
|
||||||
|
|
||||||
|
const minBytes = Math.floor(getHeartbeatSize() / 2);
|
||||||
|
if (bytesSinceLast < minBytes) {
|
||||||
|
req.log.warn(
|
||||||
|
{ minBytes, bytesSinceLast },
|
||||||
|
"Queued request is processing heartbeats enough data or server is overloaded; killing connection."
|
||||||
|
);
|
||||||
|
res.destroy();
|
||||||
|
}
|
||||||
|
}, HEARTBEAT_INTERVAL * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sends larger heartbeats when the queue is overloaded */
|
||||||
|
function getHeartbeatSize() {
|
||||||
|
const load = getProxyLoad();
|
||||||
|
|
||||||
|
if (load <= LOAD_THRESHOLD) {
|
||||||
|
return MIN_HEARTBEAT_SIZE;
|
||||||
|
} else {
|
||||||
|
const excessLoad = load - LOAD_THRESHOLD;
|
||||||
|
const size =
|
||||||
|
MIN_HEARTBEAT_SIZE + Math.pow(excessLoad * PAYLOAD_SCALE_FACTOR, 2);
|
||||||
|
if (size > MAX_HEARTBEAT_SIZE) return MAX_HEARTBEAT_SIZE;
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getHeartbeatPayload() {
|
||||||
|
const size = getHeartbeatSize();
|
||||||
|
const data =
|
||||||
|
process.env.NODE_ENV === "production"
|
||||||
|
? crypto.randomBytes(size).toString("base64")
|
||||||
|
: `payload size: ${size}`;
|
||||||
|
|
||||||
|
return `: queue heartbeat ${data}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getProxyLoad() {
|
||||||
|
return Math.max(getUniqueIps(), queue.length);
|
||||||
|
}
|
||||||
|
|||||||