33 Commits

Author SHA1 Message Date
nai-degen bf13a8b524 tweaks type 2023-08-28 03:52:04 -05:00
nai-degen 6453dae433 adds horrible wip git persistence layer 2023-08-24 14:17:09 -05:00
nai-degen 80ecbd78df adds HF datasets configs 2023-08-24 12:36:57 -05:00
nai-degen c05bfefba4 fixes incorrectly applied doubleCsrf to REST routes 2023-08-10 15:54:01 -05:00
nai-degen 9b184ab245 removes QUOTA_DISPLAY_MODE config as OpenAI no longer supports it 2023-08-09 18:29:38 -05:00
nai-degen 6bb67281d9 removes QUEUE_MODE config (now always enabled) 2023-08-09 18:29:34 -05:00
nai-degen 5d3fb6af3a removes IP redaction from pino 2023-08-09 18:29:29 -05:00
khanon 268165e2be Add CSRF protection to server-rendered views (khanon/oai-reverse-proxy!34) 2023-08-09 23:11:26 +00:00
nai-degen 6f4e581bf2 fixes forgotten http=true on admin cookie 2023-08-09 11:01:30 -05:00
nai-degen 358339d48b fixes issue with Claude <EOT> token disallowed 2023-08-08 17:43:12 -05:00
nai-degen c8d8e2e58f avoids instantiating new Claude tiktoken on every call 2023-08-08 17:38:03 -05:00
nai-degen d1d83b41fa uses accurate Claude tokenization 2023-08-08 17:29:36 -05:00
nai-degen 81ceee7897 adds pagination info below user table 2023-08-05 23:49:14 -05:00
nai-degen dc32e41ab5 updates user management docs 2023-08-05 23:48:33 -05:00
nai-degen 21ee00f057 fixes issue with IP count sorting 2023-08-05 23:26:15 -05:00
nai-degen 97a2b6b479 fixes build issue with missing EJS templates 2023-08-05 23:01:58 -05:00
nai-degen 61d90f3f3a fixes admin ui sort during pagination 2023-08-05 21:16:44 -05:00
khanon bb230469b2 Admin user management UI (khanon/oai-reverse-proxy!32) 2023-08-06 00:58:33 +00:00
nai-degen 125bbe6441 fixes issue with writeErrorResponse 2023-08-04 13:49:11 -05:00
nai-degen d29c304d5a increases tokenizer failsafe to 500000 characters 2023-07-27 15:21:06 -05:00
nai-degen addfa7c57b restores trial key detection via workaround 2023-07-24 14:07:02 -05:00
nai-degen e5b4c7bc9e removes key limit/trial status from infopage 2023-07-24 13:14:44 -05:00
nai-degen 51503dec14 disables key checker, mostly 2023-07-24 13:11:45 -05:00
nai-degen 00346360af fixes turbo-16k incompatibility 2023-07-23 20:13:38 -05:00
nai-degen e2bd8a6b86 extracts Risu auth into new middleware so queue can use it too 2023-07-22 13:48:02 -05:00
nai-degen b8534dafae reduces default MAX_OUTPUT_TOKENS_ANTHROPIC 2023-07-21 19:18:21 -05:00
khanon 56a4902599 Add tokenizers and configurable context size limits (khanon/oai-reverse-proxy!28) 2023-07-22 00:11:32 +00:00
khanon 7634afeea4 Implement rate limit for risuai.xyz (khanon/oai-reverse-proxy!31) 2023-07-21 21:48:07 +00:00
nai-degen 77c2309b52 correctly flags trial keys during startup even if over quota 2023-07-20 23:06:37 -05:00
khanon aa5380d2ef Rework OpenAIKeyChecker to remove usage tracking and test all keys for liveness (khanon/oai-reverse-proxy!29) 2023-07-21 04:00:12 +00:00
nai-degen cbf9f16108 removes clamp on quota display to better show glitched keys 2023-07-19 23:34:09 -05:00
breathingmanually 576423d1f8 Fix JSON parse exception when Claude finishes streaming (khanon/oai-reverse-proxy!25) 2023-07-20 01:57:50 +00:00
nai-degen c31540e54e bumps deps to address npm audit advisories 2023-07-19 11:52:58 -05:00
62 changed files with 2013 additions and 1882 deletions
+11 -14
View File
@@ -10,8 +10,7 @@
# REJECT_DISALLOWED=false # REJECT_DISALLOWED=false
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy." # REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# CHECK_KEYS=true # CHECK_KEYS=true
# QUOTA_DISPLAY_MODE=full # TURBO_ONLY=false
# QUEUE_MODE=fair
# BLOCKED_ORIGINS=reddit.com,9gag.com # BLOCKED_ORIGINS=reddit.com,9gag.com
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service." # BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# BLOCK_REDIRECT="https://roblox.com/" # BLOCK_REDIRECT="https://roblox.com/"
@@ -19,7 +18,8 @@
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled # Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode. # by default in production mode.
# Optional settings for user management. See docs/user-management.md. # Optional settings for user management and access control. See
# `docs/user-management.md` to learn how to use these.
# GATEKEEPER=none # GATEKEEPER=none
# GATEKEEPER_STORE=memory # GATEKEEPER_STORE=memory
# MAX_IPS_PER_USER=20 # MAX_IPS_PER_USER=20
@@ -28,7 +28,8 @@
# PROMPT_LOGGING=false # PROMPT_LOGGING=false
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# The values below are secret -- make sure they are set securely. # The values below are secret -- make sure they are set securely. Do NOT set
# them in the .env file of a public repository.
# For Huggingface, set them via the Secrets section in your Space's config UI. # For Huggingface, set them via the Secrets section in your Space's config UI.
# For Render, create a "secret file" called .env using the Environment tab. # For Render, create a "secret file" called .env using the Environment tab.
@@ -36,24 +37,20 @@
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# TEMPORARY: This will eventually be replaced by a more robust system.
# You can adjust the models used when sending OpenAI prompts to /anthropic.
# Refer to Anthropic's docs for more info (note that they don't list older
# versions of the models, but they still work).
# CLAUDE_SMALL_MODEL=claude-v1.2
# CLAUDE_BIG_MODEL=claude-v1-100k
# You can require a Bearer token for requests when using proxy_token gatekeeper. # You can require a Bearer token for requests when using proxy_token gatekeeper.
# PROXY_KEY=your-secret-key # PROXY_KEY=your-secret-key
# You can set an admin key for user management when using user_token gatekeeper. # You can set an admin key for user management when using user_token gatekeeper.
# ADMIN_KEY=your-very-secret-key # ADMIN_KEY=your-very-secret-key
# These are used for various persistence features. Refer to the docs for more # These are used to push data to a Huggingface Dataset repository.
# info. # HF_DATASET_REPO_URL=https://huggingface.co/datasets/your-username/your-dataset-name
# HF_PRIVATE_SSH_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# These are used to persist user data to Firebase across restarts.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com # FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
# This is only relevant if you want to use the prompt logging feature. # These are used to log prompts to Google Sheets.
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-1
View File
@@ -1,7 +1,6 @@
.env .env
.venv .venv
.vscode .vscode
.venv
build build
greeting.md greeting.md
node_modules node_modules
-2
View File
@@ -40,5 +40,3 @@ To run the proxy locally for development or testing, install Node.js >= 18.0.0 a
4. Start the server in development mode with `npm run start:dev`. 4. Start the server in development mode with `npm run start:dev`.
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server. You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
See the [Optional Dependencies](./docs/optional-dependencies.md) page for information on how to install the optional Claude tokenizer locally.
View File
-45
View File
@@ -1,45 +0,0 @@
# Switched to alpine both for smaller image size and because zeromq.js provides
# a working prebuilt binary for alpine. On Debian, the prebuild was not working
# and a bug in libzmq's makefile was causing the build from source to fail.
# https://github.com/zeromq/zeromq.js/issues/529#issuecomment-1370721089
FROM node:18-alpine as builder
# Install general build dependencies
RUN apk add --no-cache autoconf automake g++ libtool zeromq-dev python3 \
py3-pip git curl cmake gcc musl-dev pkgconfig openssl-dev
# Install Rust (required to build huggingface/tokenizers)
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
RUN git clone -b tokenize https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app
RUN npm ci
RUN npm run build && \
npm prune --production
FROM node:18-alpine as runner
RUN apk add --no-cache \
zeromq-dev \
python3
COPY --from=builder /app/build /app/build
COPY --from=builder /app/node_modules /app/node_modules
COPY --from=builder /app/.venv /app/.venv
COPY --from=builder /app/package.json /app/package.json
WORKDIR /app
RUN . .venv/bin/activate
EXPOSE 7860
ENV NODE_ENV=production
# TODO: stamp with tag and git commit
ENV RENDER=true
ENV RENDER_GIT_COMMIT=ci-test
CMD [ "npm", "start" ]
+3 -4
View File
@@ -1,10 +1,9 @@
FROM node:18-bullseye FROM node:18-bullseye-slim
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y git python3 python3-pip libzmq3-dev curl cmake g++ libsodium-dev pkg-config apt-get install -y git
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
WORKDIR /app WORKDIR /app
RUN pip3 install --no-cache-dir -r requirements.txt RUN npm install
RUN npm ci --loglevel=verbose
COPY Dockerfile greeting.md* .env* ./ COPY Dockerfile greeting.md* .env* ./
RUN npm run build RUN npm run build
EXPOSE 7860 EXPOSE 7860
-35
View File
@@ -1,35 +0,0 @@
# Optional Dependencies
## Claude tokenizer
As Anthropic does not ship a NodeJS tokenizer, the server includes a small Python script that runs alongside the proxy to tokenize Claude requests. It is automatically started when the server is launched, but requires additional dependencies to be installed. If these dependencies are not installed, the server will not be able to accurately count the number of tokens in Claude requests but will still function normally otherwise.
Note: On Windows, a Windows Firewall prompt may appear when the Claude tokenizer is started. This is normal and is caused by the Python process attempting to open a socket to communicate with the NodeJS server. You can safely allow the connection.
### Automatic installation (local development)
This will create a venv and install the required dependencies. You still need to activate the venv when running the server, and you must have Python >= 3.8.0 installed.
1. Install Python >= 3.8.0
2. Run `npm install`, which should automatically create a venv and install the required dependencies.
3. Activate the virtual environment with `source .venv/bin/activate` (Linux/Mac) or `.\.venv\Scripts\activate` (PowerShell/Windows)
- **This step is required every time you start the server from a new terminal.**
### Manual installation (local development)
1. Install Python >= 3.8.0
2. Create a virtual environment using `python -m .venv venv`
3. Activate the virtual environment with `source .venv/bin/activate` (Linux/Mac) or `.\.venv\Scripts\activate` (PowerShell/Windows)
- **This step is required every time you start the server from a new terminal.**
4. Install dependencies with `pip install -r requirements.txt`
5. Provided you have the virtual environment activated, the server will automatically start the tokenizer when it is launched.
### Docker (production deployment)
Refer to the reference Dockerfiles for examples on how to install the tokenizer. The Huggingface and Render Dockerfiles both include the tokenizer.
Generally, you will need libzmq3-dev, cmake, g++, and Python >= 3.8.0 installed. The postinstall script will automatically install the required Python dependencies.
### Troubleshooting
Ensure that:
- Python >= 3.8 is installed and in your PATH
- Python dependencies are installed (re-run `npm install`)
- Python venv is activated (see above)
- zeromq optional dependency installed successfully
- This should generally be installed automatically.
- On Windows, you may need to install MS C++ Build Tools or set msvs_version (eg `npm config set msvs_version 2019`), then re-run npm install.
- On Linux, ensure you have the appropriate build tools and headers installed for your distribution; refer to the reference Dockerfiles for examples.
+5 -9
View File
@@ -24,19 +24,19 @@ To set the password, create a `PROXY_KEY` secret in your environment.
## Per-user authentication (`GATEKEEPER=user_token`) ## Per-user authentication (`GATEKEEPER=user_token`)
This mode allows you to provision separate Bearer tokens for each user. You can manage users via the /admin/users REST API, which itself requires an admin Bearer token. This mode allows you to provision separate Bearer tokens for each user. You can manage users via the /admin/users via REST or through the admin interface at `/admin`.
To begin, set `ADMIN_KEY` to a secret value. This will be used to authenticate requests to the /admin/users REST API. To begin, set `ADMIN_KEY` to a secret value. This will be used to authenticate requests to the REST API or to log in to the UI.
[You can find an OpenAPI specification for the /admin/users REST API here.](openapi-admin-users.yaml) [You can find an OpenAPI specification for the /admin/users REST API here.](openapi-admin-users.yaml)
By default, the proxy will store user data in memory. Naturally, this means that user data will be lost when the proxy is restarted, though you can use the bulk user import/export feature to save and restore user data manually or via a script. However, the proxy also supports persisting user data to an external data store with some additional configuration. By default, the proxy will store user data in memory. Naturally, this means that user data will be lost when the proxy is restarted, though you can use the user import/export feature to save and restore user data manually or via a script. However, the proxy also supports persisting user data to an external data store with some additional configuration.
Below are the supported data stores and their configuration options. Below are the supported data stores and their configuration options.
### Memory ### Memory
This is the default data store (`GATEKEEPER_STORE=memory`) User data will be stored in memory and will be lost when the proxy is restarted. You are responsible for downloading and re-uploading user data via the REST API if you want to persist it. This is the default data store (`GATEKEEPER_STORE=memory`) User data will be stored in memory and will be lost when the server is restarted. You are responsible for exporting and re-importing user data after a restart.
### Firebase Realtime Database ### Firebase Realtime Database
@@ -58,8 +58,4 @@ To use Firebase Realtime Database to persist user data, set the following enviro
7. Set `FIREBASE_RTDB_URL` to the reference URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`. 7. Set `FIREBASE_RTDB_URL` to the reference URL of your Firebase Realtime Database, e.g. `https://my-project-default-rtdb.firebaseio.com`.
8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already. 8. Set `GATEKEEPER_STORE` to `firebase_rtdb` in your environment if you haven't already.
The proxy will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly. The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
---
Users are loaded from the database and changes are flushed periodically. You can use the PUT /admin/users API to bulk import users and force a flush to the database.
-47
View File
@@ -1,47 +0,0 @@
const esbuild = require("esbuild");
const fs = require("fs");
const { copy } = require("esbuild-plugin-copy");
const buildDir = "build";
const config = {
entryPoints: ["src/server.ts"],
bundle: true,
outfile: `${buildDir}/server.js`,
platform: "node",
target: "es2020",
format: "cjs",
sourcemap: true,
external: ["fs", "path", "zeromq", "tiktoken"],
plugins: [
copy({
resolveFrom: "cwd",
assets: {
from: ["src/tokenization/*.py"],
to: [`${buildDir}/tokenization`],
},
}),
],
};
function createBundler() {
return {
build: async () => esbuild.build(config),
watch: async () => {
const watchConfig = { ...config, logLevel: "info" };
const ctx = await esbuild.context(watchConfig);
ctx.watch();
},
};
}
(async () => {
fs.rmSync(buildDir, { recursive: true, force: true });
const isDev = process.argv.includes("--dev");
const bundler = createBundler();
if (isDev) {
await bundler.watch();
} else {
await bundler.build();
}
})();
+480 -756
View File
File diff suppressed because it is too large Load Diff
+16 -19
View File
@@ -3,13 +3,10 @@
"version": "1.0.0", "version": "1.0.0",
"description": "Reverse proxy for the OpenAI API", "description": "Reverse proxy for the OpenAI API",
"scripts": { "scripts": {
"build:dev": "node esbuild.js --dev", "build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"build": "node esbuild.js", "start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
"postinstall": "node scripts/install-python-deps.js",
"start:dev:tsc": "nodemon --watch src --exec ts-node src/server.ts",
"start:dev": "concurrently \"npm run build:dev\" \"npm run start:watch\"",
"start:replit": "tsc && node build/server.js",
"start:watch": "nodemon --require source-map-support/register build/server.js", "start:watch": "nodemon --require source-map-support/register build/server.js",
"start:replit": "tsc && node build/server.js",
"start": "node build/server.js", "start": "node build/server.js",
"type-check": "tsc --noEmit" "type-check": "tsc --noEmit"
}, },
@@ -19,43 +16,43 @@
"author": "", "author": "",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"axios": "^1.3.5", "axios": "^1.3.5",
"cookie-parser": "^1.4.6",
"copyfiles": "^2.4.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"csrf-csrf": "^2.3.0",
"dotenv": "^16.0.3", "dotenv": "^16.0.3",
"ejs": "^3.1.9",
"express": "^4.18.2", "express": "^4.18.2",
"firebase-admin": "^11.9.0", "firebase-admin": "^11.10.1",
"googleapis": "^117.0.0", "googleapis": "^122.0.0",
"http-proxy-middleware": "^3.0.0-beta.1", "http-proxy-middleware": "^3.0.0-beta.1",
"openai": "^3.2.1", "multer": "^1.4.5-lts.1",
"pino": "^8.11.0", "pino": "^8.11.0",
"pino-http": "^8.3.3", "pino-http": "^8.3.3",
"showdown": "^2.1.0", "showdown": "^2.1.0",
"tiktoken": "^1.0.7", "tiktoken": "^1.0.10",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"zlib": "^1.0.5", "zlib": "^1.0.5",
"zod": "^3.21.4" "zod": "^3.21.4"
}, },
"devDependencies": { "devDependencies": {
"@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13", "@types/cors": "^2.8.13",
"@types/express": "^4.17.17", "@types/express": "^4.17.17",
"@types/multer": "^1.4.7",
"@types/showdown": "^2.0.0", "@types/showdown": "^2.0.0",
"@types/uuid": "^9.0.1", "@types/uuid": "^9.0.1",
"@types/zeromq": "^5.2.2",
"concurrently": "^8.0.1", "concurrently": "^8.0.1",
"esbuild": "^0.17.16", "esbuild": "^0.17.16",
"esbuild-node-externals": "^1.7.0",
"esbuild-plugin-copy": "^2.1.1",
"esbuild-register": "^3.4.2", "esbuild-register": "^3.4.2",
"nodemon": "^2.0.22", "nodemon": "^3.0.1",
"source-map-support": "^0.5.21", "source-map-support": "^0.5.21",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typescript": "^5.0.4" "typescript": "^5.0.4"
}, },
"overrides": { "overrides": {
"optionator": "^0.9.3", "google-gax": "^3.6.1"
"semver": "^7.5.3"
},
"optionalDependencies": {
"zeromq": "^6.0.0-beta.16"
} }
} }
-2
View File
@@ -1,2 +0,0 @@
pyzmq==25.1.0
anthropic==0.2.9
-68
View File
@@ -1,68 +0,0 @@
const fs = require("fs");
const spawn = require("child_process").spawn;
const IS_WINDOWS = process.platform === "win32";
const IS_DEV = process.env.NODE_ENV !== "production";
const installDeps = async () => {
try {
console.log("Installing additional optional dependencies...");
console.log("Creating venv...");
await maybeCreateVenv();
console.log("Installing python dependencies...");
await installPythonDependencies();
} catch (error) {
console.error("Error installing additional optional dependencies", error);
process.exit(0); // don't fail the build
}
};
installDeps();
async function maybeCreateVenv() {
if (!IS_DEV) {
console.log("Skipping venv creation in production");
return true;
}
if (fs.existsSync(".venv")) {
console.log("Skipping venv creation, already exists");
return true;
}
const python = IS_WINDOWS ? "python" : "python3";
await runCommand(`${python} -m venv .venv`);
return true;
}
async function installPythonDependencies() {
const commands = [];
if (IS_DEV) {
commands.push(
IS_WINDOWS ? ".venv\\Scripts\\activate.bat" : "source .venv/bin/activate"
);
}
const pip = IS_WINDOWS ? "pip" : "pip3";
commands.push(`${pip} install -r requirements.txt`);
const command = commands.join(" && ");
await runCommand(command);
return true;
}
async function runCommand(command) {
return new Promise((resolve, reject) => {
const child = spawn(command, [], { shell: true });
child.stdout.on("data", (data) => {
console.log(data.toString());
});
child.stderr.on("data", (data) => {
console.error(data.toString());
});
child.on("close", (code) => {
if (code === 0) {
resolve();
} else {
reject();
}
});
});
}
+14 -33
View File
@@ -1,37 +1,17 @@
import { Router } from "express"; import { Router } from "express";
import { z } from "zod"; import { z } from "zod";
import * as userStore from "../proxy/auth/user-store"; import * as userStore from "../../proxy/auth/user-store";
import { UserSchema, UserSchemaWithToken, parseSort, sortBy } from "../common";
const usersRouter = Router(); const router = Router();
const UserSchema = z
.object({
ip: z.array(z.string()).optional(),
type: z.enum(["normal", "special"]).optional(),
promptCount: z.number().optional(),
tokenCount: z.number().optional(),
createdAt: z.number().optional(),
lastUsedAt: z.number().optional(),
disabledAt: z.number().optional(),
disabledReason: z.string().optional(),
})
.strict();
const UserSchemaWithToken = UserSchema.extend({
token: z.string(),
}).strict();
/** /**
* Returns a list of all users, sorted by prompt count and then last used time. * Returns a list of all users, sorted by prompt count and then last used time.
* GET /admin/users * GET /admin/users
*/ */
usersRouter.get("/", (_req, res) => { router.get("/", (req, res) => {
const users = userStore.getUsers().sort((a, b) => { const sort = parseSort(req.query.sort) || ["promptCount", "lastUsedAt"];
if (a.promptCount !== b.promptCount) { const users = userStore.getUsers().sort(sortBy(sort, false));
return b.promptCount - a.promptCount;
}
return (b.lastUsedAt ?? 0) - (a.lastUsedAt ?? 0);
});
res.json({ users, count: users.length }); res.json({ users, count: users.length });
}); });
@@ -39,7 +19,7 @@ usersRouter.get("/", (_req, res) => {
* Returns the user with the given token. * Returns the user with the given token.
* GET /admin/users/:token * GET /admin/users/:token
*/ */
usersRouter.get("/:token", (req, res) => { router.get("/:token", (req, res) => {
const user = userStore.getUser(req.params.token); const user = userStore.getUser(req.params.token);
if (!user) { if (!user) {
return res.status(404).json({ error: "Not found" }); return res.status(404).json({ error: "Not found" });
@@ -52,8 +32,9 @@ usersRouter.get("/:token", (req, res) => {
* Returns the created user's token. * Returns the created user's token.
* POST /admin/users * POST /admin/users
*/ */
usersRouter.post("/", (_req, res) => { router.post("/", (req, res) => {
res.json({ token: userStore.createUser() }); const token = userStore.createUser();
res.json({ token });
}); });
/** /**
@@ -62,7 +43,7 @@ usersRouter.post("/", (_req, res) => {
* Returns the upserted user. * Returns the upserted user.
* PUT /admin/users/:token * PUT /admin/users/:token
*/ */
usersRouter.put("/:token", (req, res) => { router.put("/:token", (req, res) => {
const result = UserSchema.safeParse(req.body); const result = UserSchema.safeParse(req.body);
if (!result.success) { if (!result.success) {
return res.status(400).json({ error: result.error }); return res.status(400).json({ error: result.error });
@@ -77,7 +58,7 @@ usersRouter.put("/:token", (req, res) => {
* Returns an object containing the upserted users and the number of upserts. * Returns an object containing the upserted users and the number of upserts.
* PUT /admin/users * PUT /admin/users
*/ */
usersRouter.put("/", (req, res) => { router.put("/", (req, res) => {
const result = z.array(UserSchemaWithToken).safeParse(req.body.users); const result = z.array(UserSchemaWithToken).safeParse(req.body.users);
if (!result.success) { if (!result.success) {
return res.status(400).json({ error: result.error }); return res.status(400).json({ error: result.error });
@@ -95,7 +76,7 @@ usersRouter.put("/", (req, res) => {
* Returns the disabled user. * Returns the disabled user.
* DELETE /admin/users/:token * DELETE /admin/users/:token
*/ */
usersRouter.delete("/:token", (req, res) => { router.delete("/:token", (req, res) => {
const user = userStore.getUser(req.params.token); const user = userStore.getUser(req.params.token);
const disabledReason = z const disabledReason = z
.string() .string()
@@ -111,4 +92,4 @@ usersRouter.delete("/:token", (req, res) => {
res.json(userStore.getUser(req.params.token)); res.json(userStore.getUser(req.params.token));
}); });
export { usersRouter }; export { router as usersApiRouter };
+58
View File
@@ -0,0 +1,58 @@
import { Request, Response, RequestHandler } from "express";
import { config } from "../config";
const ADMIN_KEY = config.adminKey;
const failedAttempts = new Map<string, number>();
type AuthorizeParams = { via: "cookie" | "header" };
export const authorize: ({ via }: AuthorizeParams) => RequestHandler =
({ via }) =>
(req, res, next) => {
const bearerToken = req.headers.authorization?.slice("Bearer ".length);
const cookieToken = req.cookies["admin-token"];
const token = via === "cookie" ? cookieToken : bearerToken;
const attempts = failedAttempts.get(req.ip) ?? 0;
if (!token) {
return res.status(401).json({ error: "Unauthorized" });
}
if (!ADMIN_KEY) {
req.log.warn(
{ ip: req.ip },
`Blocked admin request because no admin key is configured`
);
return res.status(401).json({ error: "Unauthorized" });
}
if (attempts > 5) {
req.log.warn(
{ ip: req.ip, token: bearerToken },
`Blocked admin request due to too many failed attempts`
);
return res.status(401).json({ error: "Too many attempts" });
}
if (token !== ADMIN_KEY) {
req.log.warn(
{ ip: req.ip, attempts, token },
`Attempted admin request with invalid token`
);
return handleFailedLogin(req, res);
}
req.log.info({ ip: req.ip }, `Admin request authorized`);
next();
};
function handleFailedLogin(req: Request, res: Response) {
const attempts = failedAttempts.get(req.ip) ?? 0;
const newAttempts = attempts + 1;
failedAttempts.set(req.ip, newAttempts);
if (req.accepts("json", "html") === "json") {
return res.status(401).json({ error: "Unauthorized" });
}
res.clearCookie("admin-token");
return res.redirect("/admin/login?failed=true");
}
+58
View File
@@ -0,0 +1,58 @@
import { z } from "zod";
import { Query } from "express-serve-static-core";
export function parseSort(sort: Query["sort"]) {
if (!sort) return null;
if (typeof sort === "string") return sort.split(",");
if (Array.isArray(sort)) return sort.splice(3) as string[];
return null;
}
export function sortBy(fields: string[], asc = true) {
return (a: any, b: any) => {
for (const field of fields) {
if (a[field] !== b[field]) {
// always sort nulls to the end
if (a[field] == null) return 1;
if (b[field] == null) return -1;
const valA = Array.isArray(a[field]) ? a[field].length : a[field];
const valB = Array.isArray(b[field]) ? b[field].length : b[field];
const result = valA < valB ? -1 : 1;
return asc ? result : -result;
}
}
return 0;
};
}
export function paginate(set: unknown[], page: number, pageSize: number = 20) {
const p = Math.max(1, Math.min(page, Math.ceil(set.length / pageSize)));
return {
page: p,
items: set.slice((p - 1) * pageSize, p * pageSize),
pageSize,
pageCount: Math.ceil(set.length / pageSize),
totalCount: set.length,
nextPage: p * pageSize < set.length ? p + 1 : null,
prevPage: p > 1 ? p - 1 : null,
};
}
export const UserSchema = z
.object({
ip: z.array(z.string()).optional(),
type: z.enum(["normal", "special"]).optional(),
promptCount: z.number().optional(),
tokenCount: z.number().optional(),
createdAt: z.number().optional(),
lastUsedAt: z.number().optional(),
disabledAt: z.number().optional(),
disabledReason: z.string().optional(),
})
.strict();
export const UserSchemaWithToken = UserSchema.extend({
token: z.string(),
}).strict();
+24
View File
@@ -0,0 +1,24 @@
import { doubleCsrf } from "csrf-csrf";
import { v4 as uuid } from "uuid";
import express from "express";
const CSRF_SECRET = uuid();
const { generateToken, doubleCsrfProtection } = doubleCsrf({
getSecret: () => CSRF_SECRET,
cookieName: "csrf",
cookieOptions: { sameSite: "strict", path: "/" },
getTokenFromRequest: (req) => req.body["_csrf"] || req.query["_csrf"],
});
const injectCsrfToken: express.RequestHandler = (req, res, next) => {
res.locals.csrfToken = generateToken(res, req);
// force generation of new token on back button
// TODO: implement session-based CSRF tokens
res.setHeader("Cache-Control", "no-cache, no-store, must-revalidate");
res.setHeader("Pragma", "no-cache");
res.setHeader("Expires", "0");
next();
};
export { injectCsrfToken, doubleCsrfProtection as checkCsrfToken };
+29
View File
@@ -0,0 +1,29 @@
import { Router } from "express";
const loginRouter = Router();
loginRouter.get("/login", (req, res) => {
res.render("admin/login", { failed: req.query.failed });
});
loginRouter.post("/login", (req, res) => {
res.cookie("admin-token", req.body.token, {
maxAge: 1000 * 60 * 60 * 24 * 14,
httpOnly: true,
});
res.redirect("/admin");
});
loginRouter.get("/logout", (req, res) => {
res.clearCookie("admin-token");
res.redirect("/admin/login");
});
loginRouter.get("/", (req, res) => {
if (req.cookies["admin-token"]) {
return res.redirect("/admin/manage");
}
res.redirect("/admin/login");
});
export { loginRouter };
+17 -29
View File
@@ -1,36 +1,24 @@
import { RequestHandler, Router } from "express"; import express, { Router } from "express";
import { config } from "../config"; import cookieParser from "cookie-parser";
import { usersRouter } from "./users"; import { authorize } from "./auth";
import { injectCsrfToken, checkCsrfToken } from "./csrf";
const ADMIN_KEY = config.adminKey; import { usersApiRouter as apiRouter } from "./api/users";
const failedAttempts = new Map<string, number>(); import { usersUiRouter as uiRouter } from "./ui/users";
import { loginRouter } from "./login";
const adminRouter = Router(); const adminRouter = Router();
const auth: RequestHandler = (req, res, next) => { adminRouter.use(
const token = req.headers.authorization?.slice("Bearer ".length); express.json({ limit: "20mb" }),
const attempts = failedAttempts.get(req.ip) ?? 0; express.urlencoded({ extended: true, limit: "20mb" })
if (attempts > 5) { );
req.log.warn( adminRouter.use(cookieParser());
{ ip: req.ip, token }, adminRouter.use(injectCsrfToken);
`Blocked request to admin API due to too many failed attempts`
);
return res.status(401).json({ error: "Too many attempts" });
}
if (token !== ADMIN_KEY) { adminRouter.use("/users", authorize({ via: "header" }), apiRouter);
const newAttempts = attempts + 1;
failedAttempts.set(req.ip, newAttempts);
req.log.warn(
{ ip: req.ip, attempts: newAttempts, token },
`Attempted admin API request with invalid token`
);
return res.status(401).json({ error: "Unauthorized" });
}
next(); adminRouter.use(checkCsrfToken); // All UI routes require CSRF token
}; adminRouter.use("/", loginRouter);
adminRouter.use("/manage", authorize({ via: "cookie" }), uiRouter);
adminRouter.use(auth);
adminRouter.use("/users", usersRouter);
export { adminRouter }; export { adminRouter };
+135
View File
@@ -0,0 +1,135 @@
import { Router } from "express";
import multer from "multer";
import { z } from "zod";
import { config } from "../../config";
import * as userStore from "../../proxy/auth/user-store";
import {
UserSchemaWithToken,
parseSort,
sortBy,
paginate,
UserSchema,
} from "../common";
const router = Router();
const upload = multer({
storage: multer.memoryStorage(),
fileFilter: (_req, file, cb) => {
if (file.mimetype !== "application/json") {
cb(new Error("Invalid file type"));
} else {
cb(null, true);
}
},
});
router.get("/create-user", (req, res) => {
const recentUsers = userStore
.getUsers()
.sort(sortBy(["createdAt"], false))
.slice(0, 5);
res.render("admin/create-user", {
recentUsers,
newToken: !!req.query.created,
});
});
router.post("/create-user", (_req, res) => {
userStore.createUser();
return res.redirect(`/admin/manage/create-user?created=true`);
});
router.get("/view-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
res.render("admin/view-user", { user });
});
router.get("/list-users", (req, res) => {
const sort = parseSort(req.query.sort) || ["promptCount", "lastUsedAt"];
const requestedPageSize =
Number(req.query.perPage) || Number(req.cookies.perPage) || 20;
const perPage = Math.max(1, Math.min(1000, requestedPageSize));
const users = userStore.getUsers().sort(sortBy(sort, false));
const page = Number(req.query.page) || 1;
const { items, ...pagination } = paginate(users, page, perPage);
return res.render("admin/list-users", {
sort: sort.join(","),
users: items,
...pagination,
});
});
router.get("/import-users", (req, res) => {
const imported = Number(req.query.imported) || 0;
res.render("admin/import-users", { imported });
});
router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) {
return res.status(400).json({ error: "No file uploaded" });
}
const data = JSON.parse(req.file.buffer.toString());
const result = z.array(UserSchemaWithToken).safeParse(data.users);
if (!result.success) {
return res.status(400).json({ error: result.error });
}
const upserts = result.data.map((user) => userStore.upsertUser(user));
res.redirect(`/admin/manage/import-users?imported=${upserts.length}`);
});
router.get("/export-users", (_req, res) => {
res.render("admin/export-users");
});
router.get("/export-users.json", (_req, res) => {
const users = userStore.getUsers();
res.setHeader("Content-Disposition", "attachment; filename=users.json");
res.setHeader("Content-Type", "application/json");
res.send(JSON.stringify({ users }, null, 2));
});
router.get("/", (_req, res) => {
res.render("admin/index", {
isPersistenceEnabled: config.gatekeeperStore !== "memory",
});
});
router.post("/edit-user/:token", (req, res) => {
const result = UserSchema.safeParse(req.body);
if (!result.success) {
return res.status(400).send(result.error);
}
userStore.upsertUser({ ...result.data, token: req.params.token });
return res.sendStatus(204);
});
router.post("/reactivate-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
userStore.upsertUser({
token: user.token,
disabledAt: 0,
disabledReason: "",
});
return res.sendStatus(204);
});
router.post("/disable-user/:token", (req, res) => {
const user = userStore.getUser(req.params.token);
if (!user) {
return res.status(404).send("User not found");
}
userStore.disableUser(req.params.token, req.body.reason);
return res.sendStatus(204);
});
export { router as usersUiRouter };
+35 -28
View File
@@ -9,9 +9,8 @@ const startupLogger = pino({ level: "debug" }).child({ module: "startup" });
const isDev = process.env.NODE_ENV !== "production"; const isDev = process.env.NODE_ENV !== "production";
type PromptLoggingBackend = "google_sheets"; type PromptLoggingBackend = "google_sheets";
export type DequeueMode = "fair" | "random" | "none";
type Config = { export type Config = {
/** The port the proxy server will listen on. */ /** The port the proxy server will listen on. */
port: number; port: number;
/** Comma-delimited list of OpenAI API keys. */ /** Comma-delimited list of OpenAI API keys. */
@@ -48,13 +47,21 @@ type Config = {
* `memory`: Users are stored in memory and are lost on restart (default) * `memory`: Users are stored in memory and are lost on restart (default)
* *
* `firebase_rtdb`: Users are stored in a Firebase Realtime Database; requires * `firebase_rtdb`: Users are stored in a Firebase Realtime Database; requires
* `firebaseKey` and `firebaseRtdbUrl` to be set. * `firebaseKey` and `firebaseRtdbUrl` to be set. (deprecated)
*
* `huggingface_datasets`: Users are stored in a Huggingface Datasets git
* repository; requires `hfDatasetRepoUrl` and `hfPrivateSshKey` to be set.
**/ **/
gatekeeperStore: "memory" | "firebase_rtdb"; gatekeeperStore: "memory" | "firebase_rtdb" | "huggingface_datasets";
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */ /** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
firebaseRtdbUrl?: string; firebaseRtdbUrl?: string;
/** Base64-encoded Firebase service account key if using the Firebase RTDB store. */ /** Base64-encoded Firebase service account key if using the Firebase RTDB store. */
firebaseKey?: string; firebaseKey?: string;
/** URL of the Huggingface Datasets git repository if using the Huggingface
* Datasets store. */
hfDatasetRepoUrl?: string;
/** Private SSH key used to push to the Huggingface Dataset repository. */
hfPrivateSshKey?: string;
/** /**
* Maximum number of IPs per user, after which their token is disabled. * Maximum number of IPs per user, after which their token is disabled.
* Users with the manually-assigned `special` role are exempt from this limit. * Users with the manually-assigned `special` role are exempt from this limit.
@@ -63,6 +70,20 @@ type Config = {
maxIpsPerUser: number; maxIpsPerUser: number;
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */ /** Per-IP limit for requests per minute to OpenAI's completions endpoint. */
modelRateLimit: number; modelRateLimit: number;
/**
* For OpenAI, the maximum number of context tokens (prompt + max output) a
* user can request before their request is rejected.
* Context limits can help prevent excessive spend.
* Defaults to 0, which means no limit beyond OpenAI's stated maximums.
*/
maxContextTokensOpenAI: number;
/**
* For Anthropic, the maximum number of context tokens a user can request.
* Claude context limits can prevent requests from tying up concurrency slots
* for too long, which can lengthen queue times for other users.
* Defaults to 0, which means no limit beyond Anthropic's stated maximums.
*/
maxContextTokensAnthropic: number;
/** For OpenAI, the maximum number of sampled tokens a user can request. */ /** For OpenAI, the maximum number of sampled tokens a user can request. */
maxOutputTokensOpenAI: number; maxOutputTokensOpenAI: number;
/** For Anthropic, the maximum number of sampled tokens a user can request. */ /** For Anthropic, the maximum number of sampled tokens a user can request. */
@@ -83,26 +104,6 @@ type Config = {
googleSheetsSpreadsheetId?: string; googleSheetsSpreadsheetId?: string;
/** Whether to periodically check keys for usage and validity. */ /** Whether to periodically check keys for usage and validity. */
checkKeys?: boolean; checkKeys?: boolean;
/**
* How to display quota information on the info page.
*
* `none`: Hide quota information
*
* `partial`: Display quota information only as a percentage
*
* `full`: Display quota information as usage against total capacity
*/
quotaDisplayMode: "none" | "partial" | "full";
/**
* Which request queueing strategy to use when keys are over their rate limit.
*
* `fair`: Requests are serviced in the order they were received (default)
*
* `random`: Requests are serviced randomly
*
* `none`: Requests are not queued and users have to retry manually
*/
queueMode: DequeueMode;
/** /**
* Comma-separated list of origins to block. Requests matching any of these * Comma-separated list of origins to block. Requests matching any of these
* origins or referers will be rejected. * origins or referers will be rejected.
@@ -139,11 +140,18 @@ export const config: Config = {
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0), maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined), firebaseRtdbUrl: getEnvWithDefault("FIREBASE_RTDB_URL", undefined),
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined), firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
hfDatasetRepoUrl: getEnvWithDefault("HF_DATASET_REPO_URL", undefined),
hfPrivateSshKey: getEnvWithDefault("HF_PRIVATE_SSH_KEY", undefined),
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4), modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 0),
maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC",
0
),
maxOutputTokensOpenAI: getEnvWithDefault("MAX_OUTPUT_TOKENS_OPENAI", 300), maxOutputTokensOpenAI: getEnvWithDefault("MAX_OUTPUT_TOKENS_OPENAI", 300),
maxOutputTokensAnthropic: getEnvWithDefault( maxOutputTokensAnthropic: getEnvWithDefault(
"MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS_ANTHROPIC",
600 400
), ),
rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false), rejectDisallowed: getEnvWithDefault("REJECT_DISALLOWED", false),
rejectMessage: getEnvWithDefault( rejectMessage: getEnvWithDefault(
@@ -152,7 +160,6 @@ export const config: Config = {
), ),
logLevel: getEnvWithDefault("LOG_LEVEL", "info"), logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev), checkKeys: getEnvWithDefault("CHECK_KEYS", !isDev),
quotaDisplayMode: getEnvWithDefault("QUOTA_DISPLAY_MODE", "partial"),
promptLogging: getEnvWithDefault("PROMPT_LOGGING", false), promptLogging: getEnvWithDefault("PROMPT_LOGGING", false),
promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined), promptLoggingBackend: getEnvWithDefault("PROMPT_LOGGING_BACKEND", undefined),
googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined), googleSheetsKey: getEnvWithDefault("GOOGLE_SHEETS_KEY", undefined),
@@ -160,7 +167,6 @@ export const config: Config = {
"GOOGLE_SHEETS_SPREADSHEET_ID", "GOOGLE_SHEETS_SPREADSHEET_ID",
undefined undefined
), ),
queueMode: getEnvWithDefault("QUEUE_MODE", "fair"),
blockedOrigins: getEnvWithDefault("BLOCKED_ORIGINS", undefined), blockedOrigins: getEnvWithDefault("BLOCKED_ORIGINS", undefined),
blockMessage: getEnvWithDefault( blockMessage: getEnvWithDefault(
"BLOCK_MESSAGE", "BLOCK_MESSAGE",
@@ -271,10 +277,11 @@ export const OMITTED_KEYS: (keyof Config)[] = [
"proxyKey", "proxyKey",
"adminKey", "adminKey",
"checkKeys", "checkKeys",
"quotaDisplayMode",
"googleSheetsKey", "googleSheetsKey",
"firebaseKey", "firebaseKey",
"firebaseRtdbUrl", "firebaseRtdbUrl",
"hfDatasetRepoUrl",
"hfPrivateSshKey",
"gatekeeperStore", "gatekeeperStore",
"maxIpsPerUser", "maxIpsPerUser",
"blockedOrigins", "blockedOrigins",
+40 -71
View File
@@ -2,7 +2,7 @@ import fs from "fs";
import { Request, Response } from "express"; import { Request, Response } from "express";
import showdown from "showdown"; import showdown from "showdown";
import { config, listConfig } from "./config"; import { config, listConfig } from "./config";
import { keyPool } from "./key-management"; import { OpenAIKey, keyPool } from "./key-management";
import { getUniqueIps } from "./proxy/rate-limit"; import { getUniqueIps } from "./proxy/rate-limit";
import { import {
QueuePartition, QueuePartition,
@@ -52,17 +52,7 @@ function cacheInfoPageHtml(baseUrl: string) {
}; };
const title = getServerTitle(); const title = getServerTitle();
let headerHtml = buildInfoPageHeader(new showdown.Converter(), title); const headerHtml = buildInfoPageHeader(new showdown.Converter(), title);
if (process.env.MISSING_PYTHON_WARNING) {
headerHtml +=
`<p style="color: red;">Python is not installed; the Claude tokenizer ` +
`cannot start. Your Dockerfile may be out of date; see <a ` +
`href="https://gitgud.io/khanon/oai-reverse-proxy">the docs</a> for an ` +
`updated Huggingface Dockerfile.</p><p>You can disable this warning by ` +
`setting <code>DISABLE_MISSING_PYTHON_WARNING=true</code> in your ` +
`environment.</p>`;
}
const pageBody = `<!DOCTYPE html> const pageBody = `<!DOCTYPE html>
<html lang="en"> <html lang="en">
@@ -88,7 +78,9 @@ function cacheInfoPageHtml(baseUrl: string) {
type ServiceInfo = { type ServiceInfo = {
activeKeys: number; activeKeys: number;
trialKeys?: number; trialKeys?: number;
quota: string; // activeLimit: string;
revokedKeys?: number;
overQuotaKeys?: number;
proomptersInQueue: number; proomptersInQueue: number;
estimatedQueueTime: string; estimatedQueueTime: string;
}; };
@@ -98,52 +90,38 @@ type ServiceInfo = {
function getOpenAIInfo() { function getOpenAIInfo() {
const info: { [model: string]: Partial<ServiceInfo> } = {}; const info: { [model: string]: Partial<ServiceInfo> } = {};
const keys = keyPool.list().filter((k) => k.service === "openai"); const keys = keyPool
.list()
.filter((k) => k.service === "openai") as OpenAIKey[];
const hasGpt4 = keys.some((k) => k.isGpt4) && !config.turboOnly; const hasGpt4 = keys.some((k) => k.isGpt4) && !config.turboOnly;
if (keyPool.anyUnchecked()) { if (keyPool.anyUnchecked()) {
const uncheckedKeys = keys.filter((k) => !k.lastChecked); const uncheckedKeys = keys.filter((k) => !k.lastChecked);
info.status = `Still checking ${uncheckedKeys.length} keys...` as any; info.status =
`Performing startup key checks (${uncheckedKeys.length} left).` as any;
} else { } else {
delete info.status; delete info.status;
} }
if (config.checkKeys) { if (config.checkKeys) {
const turboKeys = keys.filter((k) => !k.isGpt4 && !k.isDisabled); const turboKeys = keys.filter((k) => !k.isGpt4);
const gpt4Keys = keys.filter((k) => k.isGpt4 && !k.isDisabled); const gpt4Keys = keys.filter((k) => k.isGpt4);
const quota: Record<string, string> = { turbo: "", gpt4: "" };
const turboQuota = keyPool.remainingQuota("openai") * 100;
const gpt4Quota = keyPool.remainingQuota("openai", { gpt4: true }) * 100;
if (config.quotaDisplayMode === "full") {
const turboUsage = keyPool.usageInUsd("openai");
const gpt4Usage = keyPool.usageInUsd("openai", { gpt4: true });
quota.turbo = `${turboUsage} (${Math.round(turboQuota)}% remaining)`;
quota.gpt4 = `${gpt4Usage} (${Math.round(gpt4Quota)}% remaining)`;
} else {
quota.turbo = `${Math.round(turboQuota)}%`;
quota.gpt4 = `${Math.round(gpt4Quota * 100)}%`;
}
info.turbo = { info.turbo = {
activeKeys: turboKeys.filter((k) => !k.isDisabled).length, activeKeys: turboKeys.filter((k) => !k.isDisabled).length,
trialKeys: turboKeys.filter((k) => k.isTrial).length, trialKeys: turboKeys.filter((k) => k.isTrial).length,
quota: quota.turbo, revokedKeys: turboKeys.filter((k) => k.isRevoked).length,
overQuotaKeys: turboKeys.filter((k) => k.isOverQuota).length,
}; };
if (hasGpt4) { if (hasGpt4) {
info.gpt4 = { info.gpt4 = {
activeKeys: gpt4Keys.filter((k) => !k.isDisabled).length, activeKeys: gpt4Keys.filter((k) => !k.isDisabled).length,
trialKeys: gpt4Keys.filter((k) => k.isTrial).length, trialKeys: gpt4Keys.filter((k) => k.isTrial).length,
quota: quota.gpt4, revokedKeys: gpt4Keys.filter((k) => k.isRevoked).length,
overQuotaKeys: gpt4Keys.filter((k) => k.isOverQuota).length,
}; };
} }
if (config.quotaDisplayMode === "none") {
delete info.turbo?.quota;
delete info.gpt4?.quota;
}
} else { } else {
info.status = "Key checking is disabled." as any; info.status = "Key checking is disabled." as any;
info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length }; info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
@@ -152,17 +130,15 @@ function getOpenAIInfo() {
}; };
} }
if (config.queueMode !== "none") { const turboQueue = getQueueInformation("turbo");
const turboQueue = getQueueInformation("turbo");
info.turbo.proomptersInQueue = turboQueue.proomptersInQueue; info.turbo.proomptersInQueue = turboQueue.proomptersInQueue;
info.turbo.estimatedQueueTime = turboQueue.estimatedQueueTime; info.turbo.estimatedQueueTime = turboQueue.estimatedQueueTime;
if (hasGpt4) { if (hasGpt4) {
const gpt4Queue = getQueueInformation("gpt-4"); const gpt4Queue = getQueueInformation("gpt-4");
info.gpt4.proomptersInQueue = gpt4Queue.proomptersInQueue; info.gpt4.proomptersInQueue = gpt4Queue.proomptersInQueue;
info.gpt4.estimatedQueueTime = gpt4Queue.estimatedQueueTime; info.gpt4.estimatedQueueTime = gpt4Queue.estimatedQueueTime;
}
} }
return info; return info;
@@ -172,11 +148,9 @@ function getAnthropicInfo() {
const claudeInfo: Partial<ServiceInfo> = {}; const claudeInfo: Partial<ServiceInfo> = {};
const keys = keyPool.list().filter((k) => k.service === "anthropic"); const keys = keyPool.list().filter((k) => k.service === "anthropic");
claudeInfo.activeKeys = keys.filter((k) => !k.isDisabled).length; claudeInfo.activeKeys = keys.filter((k) => !k.isDisabled).length;
if (config.queueMode !== "none") { const queue = getQueueInformation("claude");
const queue = getQueueInformation("claude"); claudeInfo.proomptersInQueue = queue.proomptersInQueue;
claudeInfo.proomptersInQueue = queue.proomptersInQueue; claudeInfo.estimatedQueueTime = queue.estimatedQueueTime;
claudeInfo.estimatedQueueTime = queue.estimatedQueueTime;
}
return { claude: claudeInfo }; return { claude: claudeInfo };
} }
@@ -202,26 +176,24 @@ Logs are anonymous and do not contain IP addresses or timestamps. [You can see t
**If you are uncomfortable with this, don't send prompts to this proxy!**`; **If you are uncomfortable with this, don't send prompts to this proxy!**`;
} }
if (config.queueMode !== "none") { const waits: string[] = [];
const waits: string[] = []; infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;
if (config.openaiKey) { if (config.openaiKey) {
const turboWait = getQueueInformation("turbo").estimatedQueueTime; const turboWait = getQueueInformation("turbo").estimatedQueueTime;
const gpt4Wait = getQueueInformation("gpt-4").estimatedQueueTime; const gpt4Wait = getQueueInformation("gpt-4").estimatedQueueTime;
waits.push(`**Turbo:** ${turboWait}`); waits.push(`**Turbo:** ${turboWait}`);
if (keyPool.list().some((k) => k.isGpt4) && !config.turboOnly) { if (keyPool.list().some((k) => k.isGpt4) && !config.turboOnly) {
waits.push(`**GPT-4:** ${gpt4Wait}`); waits.push(`**GPT-4:** ${gpt4Wait}`);
}
} }
if (config.anthropicKey) {
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
waits.push(`**Claude:** ${claudeWait}`);
}
infoBody += "\n\n" + waits.join(" / ");
} }
if (config.anthropicKey) {
const claudeWait = getQueueInformation("claude").estimatedQueueTime;
waits.push(`**Claude:** ${claudeWait}`);
}
infoBody += "\n\n" + waits.join(" / ");
if (customGreeting) { if (customGreeting) {
infoBody += `\n## Server Greeting\n infoBody += `\n## Server Greeting\n
${customGreeting}`; ${customGreeting}`;
@@ -231,9 +203,6 @@ ${customGreeting}`;
/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */ /** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
function getQueueInformation(partition: QueuePartition) { function getQueueInformation(partition: QueuePartition) {
if (config.queueMode === "none") {
return {};
}
const waitMs = getEstimatedWaitTime(partition); const waitMs = getEstimatedWaitTime(partition);
const waitTime = const waitTime =
waitMs < 60000 waitMs < 60000
+2 -9
View File
@@ -201,14 +201,7 @@ export class AnthropicKeyProvider implements KeyProvider<AnthropicKey> {
key.rateLimitedUntil = now + RATE_LIMIT_LOCKOUT; key.rateLimitedUntil = now + RATE_LIMIT_LOCKOUT;
} }
public remainingQuota() { public activeLimitInUsd() {
const activeKeys = this.keys.filter((k) => !k.isDisabled).length; return "∞";
const allKeys = this.keys.length;
if (activeKeys === 0) return 0;
return Math.round((activeKeys / allKeys) * 100) / 100;
}
public usageInUsd() {
return "$0.00 / ∞";
} }
} }
+1 -2
View File
@@ -52,8 +52,7 @@ export interface KeyProvider<T extends Key = Key> {
anyUnchecked(): boolean; anyUnchecked(): boolean;
incrementPrompt(hash: string): void; incrementPrompt(hash: string): void;
getLockoutPeriod(model: Model): number; getLockoutPeriod(model: Model): number;
remainingQuota(options?: Record<string, unknown>): number; activeLimitInUsd(options?: Record<string, unknown>): string;
usageInUsd(options?: Record<string, unknown>): string;
markRateLimited(hash: string): void; markRateLimited(hash: string): void;
} }
+9 -10
View File
@@ -32,9 +32,15 @@ export class KeyPool {
return this.keyProviders.flatMap((provider) => provider.list()); return this.keyProviders.flatMap((provider) => provider.list());
} }
public disable(key: Key): void { public disable(key: Key, reason: "quota" | "revoked"): void {
const service = this.getKeyProvider(key.service); const service = this.getKeyProvider(key.service);
service.disable(key); service.disable(key);
if (service instanceof OpenAIKeyProvider) {
service.update(key.hash, {
isRevoked: reason === "revoked",
isOverQuota: reason === "quota",
});
}
} }
public update(key: Key, props: AllowedPartial): void { public update(key: Key, props: AllowedPartial): void {
@@ -75,18 +81,11 @@ export class KeyPool {
} }
} }
public remainingQuota( public activeLimitInUsd(
service: AIService,
options?: Record<string, unknown>
): number {
return this.getKeyProvider(service).remainingQuota(options);
}
public usageInUsd(
service: AIService, service: AIService,
options?: Record<string, unknown> options?: Record<string, unknown>
): string { ): string {
return this.getKeyProvider(service).usageInUsd(options); return this.getKeyProvider(service).activeLimitInUsd(options);
} }
private getService(model: Model): AIService { private getService(model: Model): AIService {
+161 -99
View File
@@ -1,14 +1,24 @@
import axios, { AxiosError } from "axios"; import axios, { AxiosError } from "axios";
import { Configuration, OpenAIApi } from "openai";
import { logger } from "../../logger"; import { logger } from "../../logger";
import type { OpenAIKey, OpenAIKeyProvider } from "./provider"; import type { OpenAIKey, OpenAIKeyProvider } from "./provider";
/** Minimum time in between any two key checks. */
const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds const MIN_CHECK_INTERVAL = 3 * 1000; // 3 seconds
const KEY_CHECK_PERIOD = 5 * 60 * 1000; // 5 minutes /**
* Minimum time in between checks for a given key. Because we can no longer
* read quota usage, there is little reason to check a single key more often
* than this.
**/
const KEY_CHECK_PERIOD = 60 * 60 * 1000; // 1 hour
const POST_CHAT_COMPLETIONS_URL = "https://api.openai.com/v1/chat/completions";
const GET_MODELS_URL = "https://api.openai.com/v1/models";
const GET_SUBSCRIPTION_URL = const GET_SUBSCRIPTION_URL =
"https://api.openai.com/dashboard/billing/subscription"; "https://api.openai.com/dashboard/billing/subscription";
const GET_USAGE_URL = "https://api.openai.com/dashboard/billing/usage";
type GetModelsResponse = {
data: [{ id: string }];
};
type GetSubscriptionResponse = { type GetSubscriptionResponse = {
plan: { title: string }; plan: { title: string };
@@ -18,10 +28,6 @@ type GetSubscriptionResponse = {
system_hard_limit_usd: number; system_hard_limit_usd: number;
}; };
type GetUsageResponse = {
total_usage: number;
};
type OpenAIError = { type OpenAIError = {
error: { type: string; code: string; param: unknown; message: string }; error: { type: string; code: string; param: unknown; message: string };
}; };
@@ -54,7 +60,8 @@ export class OpenAIKeyChecker {
/** /**
* Schedules the next check. If there are still keys yet to be checked, it * Schedules the next check. If there are still keys yet to be checked, it
* will schedule a check immediately for the next unchecked key. Otherwise, * will schedule a check immediately for the next unchecked key. Otherwise,
* it will schedule a check in several minutes for the oldest key. * it will schedule a check for the least recently checked key, respecting
* the minimum check interval.
**/ **/
private scheduleNextCheck() { private scheduleNextCheck() {
const enabledKeys = this.keys.filter((key) => !key.isDisabled); const enabledKeys = this.keys.filter((key) => !key.isDisabled);
@@ -94,8 +101,8 @@ export class OpenAIKeyChecker {
key.lastChecked < oldest.lastChecked ? key : oldest key.lastChecked < oldest.lastChecked ? key : oldest
); );
// Don't check any individual key more than once every 5 minutes. // Don't check any individual key too often.
// Also, don't check anything more often than once every 3 seconds. // Don't check anything at all at a rate faster than once per 3 seconds.
const nextCheck = Math.max( const nextCheck = Math.max(
oldestKey.lastChecked + KEY_CHECK_PERIOD, oldestKey.lastChecked + KEY_CHECK_PERIOD,
this.lastCheck + MIN_CHECK_INTERVAL this.lastCheck + MIN_CHECK_INTERVAL
@@ -122,47 +129,43 @@ export class OpenAIKeyChecker {
this.log.debug({ key: key.hash }, "Checking key..."); this.log.debug({ key: key.hash }, "Checking key...");
let isInitialCheck = !key.lastChecked; let isInitialCheck = !key.lastChecked;
try { try {
// During the initial check we need to get the subscription first because // We only need to check for provisioned models on the initial check.
// trials have different behavior.
if (isInitialCheck) { if (isInitialCheck) {
const subscription = await this.getSubscription(key); const [/* subscription,*/ provisionedModels, livenessTest] =
this.updateKey(key.hash, { isTrial: !subscription.has_payment_method }); await Promise.all([
if (key.isTrial) { // this.getSubscription(key),
this.log.debug( this.getProvisionedModels(key),
{ key: key.hash }, this.testLiveness(key),
"Attempting generation on trial key." ]);
);
await this.assertCanGenerate(key);
}
const [provisionedModels, usage] = await Promise.all([
this.getProvisionedModels(key),
this.getUsage(key),
]);
const updates = { const updates = {
isGpt4: provisionedModels.gpt4, isGpt4: provisionedModels.gpt4,
softLimit: subscription.soft_limit_usd, // softLimit: subscription.soft_limit_usd,
hardLimit: subscription.hard_limit_usd, // hardLimit: subscription.hard_limit_usd,
systemHardLimit: subscription.system_hard_limit_usd, // systemHardLimit: subscription.system_hard_limit_usd,
usage, isTrial: livenessTest.rateLimit <= 250,
softLimit: 0,
hardLimit: 0,
systemHardLimit: 0,
}; };
this.updateKey(key.hash, updates); this.updateKey(key.hash, updates);
} else { } else {
// Don't check provisioned models after the initial check because it's // Provisioned models don't change, so we don't need to check them again
// not likely to change. const [/* subscription, */ _livenessTest] = await Promise.all([
const [subscription, usage] = await Promise.all([ // this.getSubscription(key),
this.getSubscription(key), this.testLiveness(key),
this.getUsage(key),
]); ]);
const updates = { const updates = {
softLimit: subscription.soft_limit_usd, // softLimit: subscription.soft_limit_usd,
hardLimit: subscription.hard_limit_usd, // hardLimit: subscription.hard_limit_usd,
systemHardLimit: subscription.system_hard_limit_usd, // systemHardLimit: subscription.system_hard_limit_usd,
usage, softLimit: 0,
hardLimit: 0,
systemHardLimit: 0,
}; };
this.updateKey(key.hash, updates); this.updateKey(key.hash, updates);
} }
this.log.info( this.log.info(
{ key: key.hash, usage: key.usage, hardLimit: key.hardLimit }, { key: key.hash, hardLimit: key.hardLimit },
"Key check complete." "Key check complete."
); );
} catch (error) { } catch (error) {
@@ -175,17 +178,28 @@ export class OpenAIKeyChecker {
// Only enqueue the next check if this wasn't a startup check, since those // Only enqueue the next check if this wasn't a startup check, since those
// are batched together elsewhere. // are batched together elsewhere.
if (!isInitialCheck) { if (!isInitialCheck) {
this.scheduleNextCheck(); // this.scheduleNextCheck();
} }
} }
private async getProvisionedModels( private async getProvisionedModels(
key: OpenAIKey key: OpenAIKey
): Promise<{ turbo: boolean; gpt4: boolean }> { ): Promise<{ turbo: boolean; gpt4: boolean }> {
const openai = new OpenAIApi(new Configuration({ apiKey: key.key })); const opts = { headers: { Authorization: `Bearer ${key.key}` } };
const models = (await openai.listModels()!).data.data; const { data } = await axios.get<GetModelsResponse>(GET_MODELS_URL, opts);
const models = data.data;
const turbo = models.some(({ id }) => id.startsWith("gpt-3.5")); const turbo = models.some(({ id }) => id.startsWith("gpt-3.5"));
const gpt4 = models.some(({ id }) => id.startsWith("gpt-4")); const gpt4 = models.some(({ id }) => id.startsWith("gpt-4"));
// We want to update the key's `isGpt4` flag here, but we don't want to
// update its `lastChecked` timestamp because we need to let the liveness
// check run before we can consider the key checked.
// Need to use `find` here because keys are cloned from the pool.
const keyFromPool = this.keys.find((k) => k.hash === key.hash)!;
this.updateKey(key.hash, {
isGpt4: gpt4,
lastChecked: keyFromPool.lastChecked,
});
return { turbo, gpt4 }; return { turbo, gpt4 };
} }
@@ -194,89 +208,137 @@ export class OpenAIKeyChecker {
GET_SUBSCRIPTION_URL, GET_SUBSCRIPTION_URL,
{ headers: { Authorization: `Bearer ${key.key}` } } { headers: { Authorization: `Bearer ${key.key}` } }
); );
// See note above about updating the key's `lastChecked` timestamp.
const keyFromPool = this.keys.find((k) => k.hash === key.hash)!;
this.updateKey(key.hash, {
isTrial: !data.has_payment_method,
lastChecked: keyFromPool.lastChecked,
});
return data; return data;
} }
private async getUsage(key: OpenAIKey) {
const querystring = OpenAIKeyChecker.getUsageQuerystring(key.isTrial);
const url = `${GET_USAGE_URL}?${querystring}`;
const { data } = await axios.get<GetUsageResponse>(url, {
headers: { Authorization: `Bearer ${key.key}` },
});
return parseFloat((data.total_usage / 100).toFixed(2));
}
private handleAxiosError(key: OpenAIKey, error: AxiosError) { private handleAxiosError(key: OpenAIKey, error: AxiosError) {
if (error.response && OpenAIKeyChecker.errorIsOpenAiError(error)) { if (error.response && OpenAIKeyChecker.errorIsOpenAIError(error)) {
const { status, data } = error.response; const { status, data } = error.response;
if (status === 401) { if (status === 401) {
this.log.warn( this.log.warn(
{ key: key.hash, error: data }, { key: key.hash, error: data },
"Key is invalid or revoked. Disabling key." "Key is invalid or revoked. Disabling key."
); );
this.updateKey(key.hash, { isDisabled: true }); this.updateKey(key.hash, {
} else if (status === 429 && data.error.type === "insufficient_quota") { isDisabled: true,
this.log.warn( isRevoked: true,
{ key: key.hash, isTrial: key.isTrial, error: data }, isGpt4: false,
"Key is out of quota. Disabling key." });
); } else if (status === 429) {
this.updateKey(key.hash, { isDisabled: true }); switch (data.error.type) {
} case "insufficient_quota":
else if (status === 429 && data.error.type === "access_terminated") { case "access_terminated":
this.log.warn( case "billing_not_active":
{ key: key.hash, isTrial: key.isTrial, error: data }, const isOverQuota = data.error.type === "insufficient_quota";
"Key has been terminated due to policy violations. Disabling key." const isRevoked = !isOverQuota;
); const isGpt4 = isRevoked ? false : key.isGpt4;
this.updateKey(key.hash, { isDisabled: true }); this.log.warn(
{ key: key.hash, rateLimitType: data.error.type, error: data },
"Key returned a non-transient 429 error. Disabling key."
);
this.updateKey(key.hash, {
isDisabled: true,
isRevoked,
isOverQuota,
isGpt4,
});
break;
case "requests":
// Trial keys have extremely low requests-per-minute limits and we
// can often hit them just while checking the key, so we need to
// retry the check later to know if the key has quota remaining.
this.log.warn(
{ key: key.hash, error: data },
"Key is currently rate limited, so its liveness cannot be checked. Retrying in fifteen seconds."
);
// To trigger a shorter than usual delay before the next check, we
// will set its `lastChecked` to (NOW - (KEY_CHECK_PERIOD - 15s)).
// This will cause the usual key check scheduling logic to schedule
// the next check in 15 seconds. This also prevents the key from
// holding up startup checks for other keys.
const fifteenSeconds = 15 * 1000;
const next = Date.now() - (KEY_CHECK_PERIOD - fifteenSeconds);
this.updateKey(key.hash, { lastChecked: next });
break;
case "tokens":
// Hitting a token rate limit, even on a trial key, actually implies
// that the key is valid and can generate completions, so we will
// treat this as effectively a successful `testLiveness` call.
this.log.info(
{ key: key.hash },
"Key is currently `tokens` rate limited; assuming it is operational."
);
this.updateKey(key.hash, { lastChecked: Date.now() });
break;
default:
this.log.error(
{ key: key.hash, rateLimitType: data.error.type, error: data },
"Encountered unexpected rate limit error class while checking key. This may indicate a change in the API; please report this."
);
// We don't know what this error means, so we just let the key
// through and maybe it will fail when someone tries to use it.
this.updateKey(key.hash, { lastChecked: Date.now() });
}
} else { } else {
this.log.error( this.log.error(
{ key: key.hash, status, error: data }, { key: key.hash, status, error: data },
"Encountered API error while checking key." "Encountered unexpected error status while checking key. This may indicate a change in the API; please report this."
); );
this.updateKey(key.hash, { lastChecked: Date.now() });
} }
return; return;
} }
this.log.error( this.log.error(
{ key: key.hash, error }, { key: key.hash, error: error.message },
"Network error while checking key; trying again later." "Network error while checking key; trying this key again in a minute."
); );
const oneMinute = 60 * 1000;
const next = Date.now() - (KEY_CHECK_PERIOD - oneMinute);
this.updateKey(key.hash, { lastChecked: next });
} }
/** /**
* Trial key usage reporting is inaccurate, so we need to run an actual * Tests whether the key is valid and has quota remaining. The request we send
* completion to test them for liveness. * is actually not valid, but keys which are revoked or out of quota will fail
* with a 401 or 429 error instead of the expected 400 Bad Request error.
* This lets us avoid test keys without spending any quota.
*
* We use the rate limit header to determine whether it's a trial key.
*/ */
private async assertCanGenerate(key: OpenAIKey): Promise<void> { private async testLiveness(key: OpenAIKey): Promise<{ rateLimit: number }> {
const openai = new OpenAIApi(new Configuration({ apiKey: key.key })); const payload = {
// This will throw an AxiosError if the key is invalid or out of quota.
await openai.createChatCompletion({
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
messages: [{ role: "user", content: "Hello" }], max_tokens: -1,
max_tokens: 1, messages: [{ role: "user", content: "" }],
}); };
const { headers, data } = await axios.post<OpenAIError>(
POST_CHAT_COMPLETIONS_URL,
payload,
{
headers: { Authorization: `Bearer ${key.key}` },
validateStatus: (status) => status === 400,
}
);
const rateLimitHeader = headers["x-ratelimit-limit-requests"];
const rateLimit = parseInt(rateLimitHeader) || 3500; // trials have 200
// invalid_request_error is the expected error
if (data.error.type !== "invalid_request_error") {
this.log.warn(
{ key: key.hash, error: data },
"Unexpected 400 error class while checking key; assuming key is valid, but this may indicate a change in the API."
);
}
return { rateLimit };
} }
static getUsageQuerystring(isTrial: boolean) { static errorIsOpenAIError(
// For paid keys, the limit resets every month, so we can use the first day
// of the current month.
// For trial keys, the limit does not reset and we don't know when the key
// was created, so we use 99 days ago because that's as far back as the API
// will let us go.
// End date needs to be set to the beginning of the next day so that we get
// usage for the current day.
const today = new Date();
const startDate = isTrial
? new Date(today.getTime() - 99 * 24 * 60 * 60 * 1000)
: new Date(today.getFullYear(), today.getMonth(), 1);
const endDate = new Date(today.getTime() + 24 * 60 * 60 * 1000);
return `start_date=${startDate.toISOString().split("T")[0]}&end_date=${
endDate.toISOString().split("T")[0]
}`;
}
static errorIsOpenAiError(
error: AxiosError error: AxiosError
): error is AxiosError<OpenAIError> { ): error is AxiosError<OpenAIError> {
const data = error.response?.data as any; const data = error.response?.data as any;
+17 -35
View File
@@ -18,8 +18,10 @@ export const OPENAI_SUPPORTED_MODELS: readonly OpenAIModel[] = [
export interface OpenAIKey extends Key { export interface OpenAIKey extends Key {
readonly service: "openai"; readonly service: "openai";
/** The current usage of this key. */ /** Set when key check returns a 401. */
usage: number; isRevoked: boolean;
/** Set when key check returns a non-transient 429. */
isOverQuota: boolean;
/** Threshold at which a warning email will be sent by OpenAI. */ /** Threshold at which a warning email will be sent by OpenAI. */
softLimit: number; softLimit: number;
/** Threshold at which the key will be disabled because it has reached the user-defined limit. */ /** Threshold at which the key will be disabled because it has reached the user-defined limit. */
@@ -54,7 +56,7 @@ export interface OpenAIKey extends Key {
export type OpenAIKeyUpdate = Omit< export type OpenAIKeyUpdate = Omit<
Partial<OpenAIKey>, Partial<OpenAIKey>,
"key" | "hash" | "lastUsed" | "lastChecked" | "promptCount" "key" | "hash" | "promptCount"
>; >;
export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> { export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
@@ -80,6 +82,8 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
isGpt4: true, isGpt4: true,
isTrial: false, isTrial: false,
isDisabled: false, isDisabled: false,
isRevoked: false,
isOverQuota: false,
softLimit: 0, softLimit: 0,
hardLimit: 0, hardLimit: 0,
systemHardLimit: 0, systemHardLimit: 0,
@@ -183,7 +187,7 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
/** Called by the key checker to update key information. */ /** Called by the key checker to update key information. */
public update(keyHash: string, update: OpenAIKeyUpdate) { public update(keyHash: string, update: OpenAIKeyUpdate) {
const keyFromPool = this.keys.find((k) => k.hash === keyHash)!; const keyFromPool = this.keys.find((k) => k.hash === keyHash)!;
Object.assign(keyFromPool, { ...update, lastChecked: Date.now() }); Object.assign(keyFromPool, { lastChecked: Date.now(), ...update });
// this.writeKeyStatus(); // this.writeKeyStatus();
} }
@@ -192,9 +196,6 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
const keyFromPool = this.keys.find((k) => k.key === key.key); const keyFromPool = this.keys.find((k) => k.key === key.key);
if (!keyFromPool || keyFromPool.isDisabled) return; if (!keyFromPool || keyFromPool.isDisabled) return;
keyFromPool.isDisabled = true; keyFromPool.isDisabled = true;
// If it's disabled just set the usage to the hard limit so it doesn't
// mess with the aggregate usage.
keyFromPool.usage = keyFromPool.hardLimit;
this.log.warn({ key: key.hash }, "Key disabled"); this.log.warn({ key: key.hash }, "Key disabled");
} }
@@ -301,35 +302,16 @@ export class OpenAIKeyProvider implements KeyProvider<OpenAIKey> {
} }
} }
/** Returns the remaining aggregate quota for all keys as a percentage. */ /**
public remainingQuota({ gpt4 }: { gpt4: boolean } = { gpt4: false }): number { * Returns the total quota limit of all keys in USD. Keys which are disabled
const keys = this.keys.filter((k) => k.isGpt4 === gpt4); * are not included in the total.
if (keys.length === 0) return 0; */
public activeLimitInUsd(
const totalUsage = keys.reduce((acc, key) => { { gpt4 }: { gpt4: boolean } = { gpt4: false }
// Keys can slightly exceed their quota ): string {
return acc + Math.min(key.usage, key.hardLimit); const keys = this.keys.filter((k) => !k.isDisabled && k.isGpt4 === gpt4);
}, 0);
const totalLimit = keys.reduce((acc, { hardLimit }) => acc + hardLimit, 0); const totalLimit = keys.reduce((acc, { hardLimit }) => acc + hardLimit, 0);
return `$${totalLimit.toFixed(2)}`;
return 1 - totalUsage / totalLimit;
}
/** Returns used and available usage in USD. */
public usageInUsd({ gpt4 }: { gpt4: boolean } = { gpt4: false }): string {
const keys = this.keys.filter((k) => k.isGpt4 === gpt4);
if (keys.length === 0) return "???";
const totalHardLimit = keys.reduce(
(acc, { hardLimit }) => acc + hardLimit,
0
);
const totalUsage = keys.reduce((acc, key) => {
// Keys can slightly exceed their quota
return acc + Math.min(key.usage, key.hardLimit);
}, 0);
return `$${totalUsage.toFixed(2)} / $${totalHardLimit.toFixed(2)}`;
} }
/** Writes key status to disk. */ /** Writes key status to disk. */
+167
View File
@@ -0,0 +1,167 @@
/**
* Very scuffed persistence system using a Huggingface's Datasets git repo as a
* file system. We use this because it's free and everyone is already deploying
* to Huggingface's Spaces feature anyway, so they can easily create a Dataset
* repository too rather than having to find some other place to host files.
*
* We periodically commit to the repo, and then pull from it when we need to
* read data. This is a bit slow, but it's fine for our purposes.
*/
import fs from "fs";
import os from "os";
import path from "path";
import { spawn } from "child_process";
import { config, Config } from "./config";
import { logger } from "./logger";
const log = logger.child({ module: "dataset-persistence" });
let singleton: DatasetPersistence | null = null;
class DatasetPersistence {
private initialized: boolean = false;
private keyPath = `${os.tmpdir()}/id_rsa`;
private repoPath = `${os.tmpdir()}/oai-proxy-dataset`;
private repoUrl!: string;
private sshKey!: string;
constructor() {
if (singleton) return singleton;
if (config.gatekeeperStore !== "huggingface_datasets") return;
DatasetPersistence.assertConfigured(config);
this.repoUrl = config.hfDatasetRepoUrl;
this.sshKey = config.hfPrivateSshKey.trim();
singleton = this;
}
async init() {
if (this.initialized) return;
log.info(
{ repoUrl: this.repoUrl, keyPath: this.keyPath, repoPath: this.repoPath },
"Initializing Huggingface Datasets persistence."
);
try {
this.setupSshKey();
await this.runGit(
"config user.email 'oai-proxy-persistence@example.com'"
);
await this.runGit("config user.name 'Proxy Persistence'");
log.info("Cloning repo...");
const cloneOutput = await this.runGit(
`clone --depth 1 ${this.repoUrl} ${this.repoPath}`
);
log.info({ output: cloneOutput.toString() }, "Cloned repo.");
// Test write access
const pushOutput = this.runGit("push").toString();
if (pushOutput !== "Everything up-to-date") {
log.error({ output: pushOutput }, "Unexpected output from git push.");
throw new Error("Unable to push to repo.");
}
log.info("Datasets configuration looks good.");
} catch (e) {
log.error(
{ error: e },
"Failed to initialize Huggingface Datasets persistence."
);
throw e;
}
this.initialized = true;
}
async get(key: string): Promise<Buffer | null> {
try {
await this.init();
this.runGit(`checkout HEAD -- ${key}`);
const filePath = path.join(this.repoPath, key);
return fs.promises.readFile(filePath);
} catch (e) {
log.error({ error: e }, "Failed to get key from Dataset repo.");
return null;
}
}
async set(key: string, value: Buffer) {
try {
await this.init();
await fs.promises.writeFile(`${this.repoPath}/${key}`, value);
// TODO: Need to set up LFS for >10MB files
if (fs.statSync(`${this.repoPath}/${key}`).size > 10 * 1024 * 1024) {
throw new Error("File too large for non-LFS storage.");
}
await this.runGit(`add ${key}`);
await this.runGit(`commit -m "Update ${key}"`);
await this.runGit("push");
} catch (e) {
log.error({ error: e }, "Failed to set key in Dataset repo.");
}
}
protected async cleanup() {
try {
await this.init();
await this.runGit("fetch --depth 1");
await this.runGit("reset --hard FETCH_HEAD");
} catch (e) {
log.error({ error: e }, "Failed to cleanup Dataset repo.");
}
}
protected async setupSshKey() {
fs.writeFileSync(this.keyPath, this.sshKey);
fs.chmodSync(this.keyPath, 0o600);
await this.runGit(`config core.sshCommand 'ssh -i ${this.keyPath}'`);
}
protected async runGit(command: string) {
const cmd = `git -C ${this.repoPath} ${command}`;
log.debug({ command: cmd }, "Running git command.");
return new Promise<string>((resolve, reject) => {
const proc = spawn(cmd, { shell: true });
const stdout: string[] = [];
const stderr: string[] = [];
proc.stdout.on("data", (data) => stdout.push(data.toString()));
proc.stderr.on("data", (data) => stderr.push(data.toString()));
proc.on("close", (code) => {
if (code !== 0) {
const errorOutput = stderr.join("");
log.error({ code, errorOutput }, "Git command failed.");
reject(
new Error(
`Git command failed with exit code ${code}: ${errorOutput}`
)
);
} else {
resolve(stdout.join(""));
}
});
});
}
static assertConfigured(input: Config): asserts input is ConfigWithDatasets {
if (!input.hfDatasetRepoUrl) {
throw new Error("HF_DATASET_REPO_URL is required when using Datasets.");
}
if (!input.hfPrivateSshKey) {
throw new Error("HF_PRIVATE_SSH_KEY is required when using Datasets.");
}
}
}
type ConfigWithDatasets = Config & {
hfDatasetRepoUrl: string;
hfPrivateSshKey: string;
};
export { DatasetPersistence };
-2
View File
@@ -13,7 +13,6 @@ import {
createPreprocessorMiddleware, createPreprocessorMiddleware,
finalizeBody, finalizeBody,
languageFilter, languageFilter,
limitOutputTokens,
removeOriginHeaders, removeOriginHeaders,
} from "./middleware/request"; } from "./middleware/request";
import { import {
@@ -76,7 +75,6 @@ const rewriteAnthropicRequest = (
addKey, addKey,
addAnthropicPreamble, addAnthropicPreamble,
languageFilter, languageFilter,
limitOutputTokens,
blockZoomerOrigins, blockZoomerOrigins,
removeOriginHeaders, removeOriginHeaders,
finalizeBody, finalizeBody,
+64
View File
@@ -0,0 +1,64 @@
/**
* Authenticates RisuAI.xyz users using a special x-risu-tk header provided by
* RisuAI.xyz. This lets us rate limit and limit queue concurrency properly,
* since otherwise RisuAI.xyz users share the same IP address and can't be
* distinguished.
* Contributors: @kwaroran
*/
import axios from "axios";
import { Request, Response, NextFunction } from "express";
const RISUAI_TOKEN_CHECKER_URL = "https://sv.risuai.xyz/public/api/checktoken";
const validRisuTokens = new Set<string>();
let lastFailedRisuTokenCheck = 0;
export async function checkRisuToken(
req: Request,
_res: Response,
next: NextFunction
) {
let header = req.header("x-risu-tk") || null;
if (!header) {
return next();
}
const timeSinceLastFailedCheck = Date.now() - lastFailedRisuTokenCheck;
if (timeSinceLastFailedCheck < 60 * 1000) {
req.log.warn(
{ timeSinceLastFailedCheck },
"Skipping RisuAI token check due to recent failed check"
);
return next();
}
try {
if (!validRisuTokens.has(header)) {
req.log.info("Authenticating new RisuAI token");
const validCheck = await axios.post<{ vaild: boolean }>(
RISUAI_TOKEN_CHECKER_URL,
{ token: header },
{ headers: { "Content-Type": "application/json" } }
);
if (!validCheck.data.vaild) {
req.log.warn("Invalid RisuAI token; using IP instead");
} else {
req.log.info("RisuAI token authenticated");
validRisuTokens.add(header);
req.risuToken = header;
}
} else {
req.log.debug("RisuAI token already known");
req.risuToken = header;
}
} catch (err) {
lastFailedRisuTokenCheck = Date.now();
req.log.warn(
{ error: err.message },
"Error authenticating RisuAI token; using IP instead"
);
}
next();
}
-8
View File
@@ -13,7 +13,6 @@ import {
createPreprocessorMiddleware, createPreprocessorMiddleware,
finalizeBody, finalizeBody,
languageFilter, languageFilter,
limitOutputTokens,
transformKoboldPayload, transformKoboldPayload,
} from "./middleware/request"; } from "./middleware/request";
import { import {
@@ -34,18 +33,11 @@ const rewriteRequest = (
req: Request, req: Request,
res: Response res: Response
) => { ) => {
if (config.queueMode !== "none") {
const msg = `Queueing is enabled on this proxy instance and is incompatible with the KoboldAI endpoint. Use the OpenAI endpoint instead.`;
proxyReq.destroy(new Error(msg));
return;
}
req.body.stream = false; req.body.stream = false;
const rewriterPipeline = [ const rewriterPipeline = [
addKey, addKey,
transformKoboldPayload, transformKoboldPayload,
languageFilter, languageFilter,
limitOutputTokens,
finalizeBody, finalizeBody,
]; ];
+3 -4
View File
@@ -21,7 +21,7 @@ export function writeErrorResponse(
statusCode: number, statusCode: number,
errorPayload: Record<string, any> errorPayload: Record<string, any>
) { ) {
const errorSource = errorPayload.error?.type.startsWith("proxy") const errorSource = errorPayload.error?.type?.startsWith("proxy")
? "proxy" ? "proxy"
: "upstream"; : "upstream";
@@ -60,8 +60,7 @@ export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
export const handleInternalError = ( export const handleInternalError = (
err: Error, err: Error,
req: Request, req: Request,
res: Response, res: Response
errorType: string = "proxy_internal_error"
) => { ) => {
try { try {
const isZod = err instanceof ZodError; const isZod = err instanceof ZodError;
@@ -90,7 +89,7 @@ export const handleInternalError = (
} else { } else {
writeErrorResponse(req, res, 500, { writeErrorResponse(req, res, 500, {
error: { error: {
type: errorType, type: "proxy_internal_error",
proxy_note: `Reverse proxy encountered an error before it could reach the upstream API.`, proxy_note: `Reverse proxy encountered an error before it could reach the upstream API.`,
message: err.message, message: err.message,
stack: err.stack, stack: err.stack,
@@ -0,0 +1,129 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../config";
import { countTokens } from "../../../tokenization";
import { RequestPreprocessor } from ".";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
* and outbound API format, which combined determine the size of the context.
* If the context is too large, an error is thrown.
* This preprocessor should run after any preprocessor that transforms the
* request body.
*/
export const checkContextSize: RequestPreprocessor = async (req) => {
let prompt;
switch (req.outboundApi) {
case "openai":
req.outputTokens = req.body.max_tokens;
prompt = req.body.messages;
break;
case "anthropic":
req.outputTokens = req.body.max_tokens_to_sample;
prompt = req.body.prompt;
break;
default:
throw new Error(`Unknown outbound API: ${req.outboundApi}`);
}
const result = await countTokens({ req, prompt, service: req.outboundApi });
req.promptTokens = result.token_count;
// TODO: Remove once token counting is stable
req.log.debug({ result: result }, "Counted prompt tokens.");
req.debug = req.debug ?? {};
req.debug = { ...req.debug, ...result };
maybeReassignModel(req);
validateContextSize(req);
};
function validateContextSize(req: Request) {
assertRequestHasTokenCounts(req);
const promptTokens = req.promptTokens;
const outputTokens = req.outputTokens;
const contextTokens = promptTokens + outputTokens;
const model = req.body.model;
const proxyMax =
(req.outboundApi === "openai" ? OPENAI_MAX_CONTEXT : CLAUDE_MAX_CONTEXT) ||
Number.MAX_SAFE_INTEGER;
let modelMax = 0;
if (model.match(/gpt-3.5-turbo-16k/)) {
modelMax = 16384;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 4096;
} else if (model.match(/gpt-4-32k/)) {
modelMax = 32768;
} else if (model.match(/gpt-4/)) {
modelMax = 8192;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?(?:-100k)/)) {
modelMax = 100000;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?$/)) {
modelMax = 9000;
} else if (model.match(/claude-2/)) {
modelMax = 100000;
} else {
// Don't really want to throw here because I don't want to have to update
// this ASAP every time a new model is released.
req.log.warn({ model }, "Unknown model, using 100k token limit.");
modelMax = 100000;
}
const finalMax = Math.min(proxyMax, modelMax);
z.number()
.int()
.max(finalMax, {
message: `Your request exceeds the context size limit for this model or proxy. (max: ${finalMax} tokens, requested: ${promptTokens} prompt + ${outputTokens} output = ${contextTokens} context tokens)`,
})
.parse(contextTokens);
req.log.debug(
{ promptTokens, outputTokens, contextTokens, modelMax, proxyMax },
"Prompt size validated"
);
req.debug.prompt_tokens = promptTokens;
req.debug.max_model_tokens = modelMax;
req.debug.max_proxy_tokens = proxyMax;
}
function assertRequestHasTokenCounts(
req: Request
): asserts req is Request & { promptTokens: number; outputTokens: number } {
z.object({
promptTokens: z.number().int().min(1),
outputTokens: z.number().int().min(1),
})
.nonstrict()
.parse(req);
}
/**
* For OpenAI-to-Anthropic requests, users can't specify the model, so we need
* to pick one based on the final context size. Ideally this would happen in
* the `transformOutboundPayload` preprocessor, but we don't have the context
* size at that point (and need a transformed body to calculate it).
*/
function maybeReassignModel(req: Request) {
if (req.inboundApi !== "openai" || req.outboundApi !== "anthropic") {
return;
}
const bigModel = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const contextSize = req.promptTokens! + req.outputTokens!;
if (contextSize > 8500) {
req.log.debug(
{ model: bigModel, contextSize },
"Using Claude 100k model for OpenAI-to-Anthropic request"
);
req.body.model = bigModel;
}
// Small model is the default already set in `transformOutboundPayload`
}
@@ -1,36 +0,0 @@
import { countTokens } from "../../../tokenization";
import { RequestPreprocessor } from ".";
import { openAIMessagesToClaudePrompt } from "./transform-outbound-payload";
export const checkPromptSize: RequestPreprocessor = async (req) => {
const prompt =
req.inboundApi === "openai" ? req.body.messages : req.body.prompt;
let result;
if (req.outboundApi === "openai") {
result = await countTokens({ req, prompt, service: "openai" });
} else {
// If we're doing OpenAI-to-Anthropic, we need to convert the messages to a
// prompt first before counting tokens, as that process affects the token
// count.
let promptStr =
req.inboundApi === "anthropic"
? prompt
: openAIMessagesToClaudePrompt(prompt);
result = await countTokens({
req,
prompt: promptStr,
service: "anthropic",
});
}
req.promptTokens = result.token_count;
// TODO: Remove once token counting is stable
req.log.debug({ result }, "Counted prompt tokens");
req.debug = req.debug ?? {};
req.debug = {
...req.debug,
...result,
};
};
+1 -2
View File
@@ -4,7 +4,7 @@ import type { ProxyReqCallback } from "http-proxy";
// Express middleware (runs before http-proxy-middleware, can be async) // Express middleware (runs before http-proxy-middleware, can be async)
export { createPreprocessorMiddleware } from "./preprocess"; export { createPreprocessorMiddleware } from "./preprocess";
export { checkPromptSize } from "./check-prompt-size"; export { checkContextSize } from "./check-context-size";
export { setApiFormat } from "./set-api-format"; export { setApiFormat } from "./set-api-format";
export { transformOutboundPayload } from "./transform-outbound-payload"; export { transformOutboundPayload } from "./transform-outbound-payload";
@@ -15,7 +15,6 @@ export { blockZoomerOrigins } from "./block-zoomer-origins";
export { finalizeBody } from "./finalize-body"; export { finalizeBody } from "./finalize-body";
export { languageFilter } from "./language-filter"; export { languageFilter } from "./language-filter";
export { limitCompletions } from "./limit-completions"; export { limitCompletions } from "./limit-completions";
export { limitOutputTokens } from "./limit-output-tokens";
export { removeOriginHeaders } from "./remove-origin-headers"; export { removeOriginHeaders } from "./remove-origin-headers";
export { transformKoboldPayload } from "./transform-kobold-payload"; export { transformKoboldPayload } from "./transform-kobold-payload";
@@ -1,46 +0,0 @@
import { Request } from "express";
import { config } from "../../../config";
import { isCompletionRequest } from "../common";
import { ProxyRequestMiddleware } from ".";
/** Enforce a maximum number of tokens requested from the model. */
export const limitOutputTokens: ProxyRequestMiddleware = (_proxyReq, req) => {
// TODO: do all of this shit in the zod validator
if (isCompletionRequest(req)) {
const requestedMax = Number.parseInt(getMaxTokensFromRequest(req));
const apiMax =
req.outboundApi === "openai"
? config.maxOutputTokensOpenAI
: config.maxOutputTokensAnthropic;
let maxTokens = requestedMax;
if (typeof requestedMax !== "number") {
maxTokens = apiMax;
}
maxTokens = Math.min(maxTokens, apiMax);
if (req.outboundApi === "openai") {
req.body.max_tokens = maxTokens;
} else if (req.outboundApi === "anthropic") {
req.body.max_tokens_to_sample = maxTokens;
}
if (requestedMax !== maxTokens) {
req.log.info(
{ requestedMax, configMax: apiMax, final: maxTokens },
"Limiting user's requested max output tokens"
);
}
}
};
function getMaxTokensFromRequest(req: Request) {
switch (req.outboundApi) {
case "anthropic":
return req.body?.max_tokens_to_sample;
case "openai":
return req.body?.max_tokens;
default:
throw new Error(`Unknown service: ${req.outboundApi}`);
}
}
+3 -3
View File
@@ -2,7 +2,7 @@ import { RequestHandler } from "express";
import { handleInternalError } from "../common"; import { handleInternalError } from "../common";
import { import {
RequestPreprocessor, RequestPreprocessor,
checkPromptSize, checkContextSize,
setApiFormat, setApiFormat,
transformOutboundPayload, transformOutboundPayload,
} from "."; } from ".";
@@ -17,8 +17,8 @@ export const createPreprocessorMiddleware = (
): RequestHandler => { ): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [ const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat), setApiFormat(apiFormat),
checkPromptSize,
transformOutboundPayload, transformOutboundPayload,
checkContextSize,
...(additionalPreprocessors ?? []), ...(additionalPreprocessors ?? []),
]; ];
@@ -30,7 +30,7 @@ export const createPreprocessorMiddleware = (
next(); next();
} catch (error) { } catch (error) {
req.log.error(error, "Error while executing request preprocessor"); req.log.error(error, "Error while executing request preprocessor");
handleInternalError(error as Error, req, res, "proxy_preprocessor_error"); handleInternalError(error as Error, req, res);
} }
}; };
}; };
@@ -1,14 +1,12 @@
import { Request } from "express"; import { Request } from "express";
import { z } from "zod"; import { z } from "zod";
import { config } from "../../../config";
import { OpenAIPromptMessage } from "../../../tokenization";
import { isCompletionRequest } from "../common"; import { isCompletionRequest } from "../common";
import { RequestPreprocessor } from "."; import { RequestPreprocessor } from ".";
import { OpenAIPromptMessage } from "../../../tokenization/openai";
/** const CLAUDE_OUTPUT_MAX = config.maxOutputTokensAnthropic;
* The maximum number of tokens an Anthropic prompt can have before we switch to const OPENAI_OUTPUT_MAX = config.maxOutputTokensOpenAI;
* the larger claude-100k context model.
*/
const CLAUDE_100K_TOKEN_THRESHOLD = 8200;
// https://console.anthropic.com/docs/api/reference#-v1-complete // https://console.anthropic.com/docs/api/reference#-v1-complete
const AnthropicV1CompleteSchema = z.object({ const AnthropicV1CompleteSchema = z.object({
@@ -17,7 +15,10 @@ const AnthropicV1CompleteSchema = z.object({
required_error: required_error:
"No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?", "No prompt found. Are you sending an OpenAI-formatted request to the Claude endpoint?",
}), }),
max_tokens_to_sample: z.coerce.number(), max_tokens_to_sample: z.coerce
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
stop_sequences: z.array(z.string()).optional(), stop_sequences: z.array(z.string()).optional(),
stream: z.boolean().optional().default(false), stream: z.boolean().optional().default(false),
temperature: z.coerce.number().optional().default(1), temperature: z.coerce.number().optional().default(1),
@@ -38,6 +39,8 @@ const OpenAIV1ChatCompletionSchema = z.object({
{ {
required_error: required_error:
"No prompt found. Are you sending an Anthropic-formatted request to the OpenAI endpoint?", "No prompt found. Are you sending an Anthropic-formatted request to the OpenAI endpoint?",
invalid_type_error:
"Messages were not formatted correctly. Refer to the OpenAI Chat API documentation for more information.",
} }
), ),
temperature: z.number().optional().default(1), temperature: z.number().optional().default(1),
@@ -51,7 +54,12 @@ const OpenAIV1ChatCompletionSchema = z.object({
.optional(), .optional(),
stream: z.boolean().optional().default(false), stream: z.boolean().optional().default(false),
stop: z.union([z.string(), z.array(z.string())]).optional(), stop: z.union([z.string(), z.array(z.string())]).optional(),
max_tokens: z.coerce.number().optional(), max_tokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, OPENAI_OUTPUT_MAX)),
frequency_penalty: z.number().optional().default(0), frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0), presence_penalty: z.number().optional().default(0),
logit_bias: z.any().optional(), logit_bias: z.any().optional(),
@@ -61,20 +69,19 @@ const OpenAIV1ChatCompletionSchema = z.object({
/** Transforms an incoming request body to one that matches the target API. */ /** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => { export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi; const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable = !isCompletionRequest(req); const notTransformable = !isCompletionRequest(req);
if (notTransformable) { if (alreadyTransformed || notTransformable) {
return; return;
} }
if (sameService) { if (sameService) {
// Just validate, don't transform.
const validator = const validator =
req.outboundApi === "openai" req.outboundApi === "openai"
? OpenAIV1ChatCompletionSchema ? OpenAIV1ChatCompletionSchema
: AnthropicV1CompleteSchema; : AnthropicV1CompleteSchema;
const result = validator.safeParse(req.body); const result = validator.safeParse(req.body);
if (!result.success) { if (!result.success) {
req.log.error( req.log.error(
{ issues: result.error.issues, body: req.body }, { issues: result.error.issues, body: req.body },
@@ -82,14 +89,12 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
); );
throw result.error; throw result.error;
} }
req.body = result.data;
validatePromptSize(req);
return; return;
} }
if (req.inboundApi === "openai" && req.outboundApi === "anthropic") { if (req.inboundApi === "openai" && req.outboundApi === "anthropic") {
req.body = openaiToAnthropic(req.body, req); req.body = await openaiToAnthropic(req.body, req);
validatePromptSize(req);
return; return;
} }
@@ -98,7 +103,7 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
); );
}; };
function openaiToAnthropic(body: any, req: Request) { async function openaiToAnthropic(body: any, req: Request) {
const result = OpenAIV1ChatCompletionSchema.safeParse(body); const result = OpenAIV1ChatCompletionSchema.safeParse(body);
if (!result.success) { if (!result.success) {
req.log.error( req.log.error(
@@ -118,26 +123,6 @@ function openaiToAnthropic(body: any, req: Request) {
const { messages, ...rest } = result.data; const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudePrompt(messages); const prompt = openAIMessagesToClaudePrompt(messages);
// No longer defaulting to `claude-v1.2` because it seems to be in the process
// of being deprecated. `claude-v1` is the new default.
// If you have keys that can still use `claude-v1.2`, you can set the
// CLAUDE_BIG_MODEL and CLAUDE_SMALL_MODEL environment variables in your .env
// file.
const CLAUDE_BIG = process.env.CLAUDE_BIG_MODEL || "claude-v1-100k";
const CLAUDE_SMALL = process.env.CLAUDE_SMALL_MODEL || "claude-v1";
const contextTokens = Number(req.promptTokens ?? 0) + Number(rest.max_tokens);
const model =
(contextTokens ?? 0) > CLAUDE_100K_TOKEN_THRESHOLD
? CLAUDE_BIG
: CLAUDE_SMALL;
req.log.debug(
{ contextTokens, model, CLAUDE_100K_TOKEN_THRESHOLD },
"Selected Claude model"
);
let stops = rest.stop let stops = rest.stop
? Array.isArray(rest.stop) ? Array.isArray(rest.stop)
? rest.stop ? rest.stop
@@ -153,7 +138,11 @@ function openaiToAnthropic(body: any, req: Request) {
return { return {
...rest, ...rest,
model, // Model may be overridden in `calculate-context-size.ts` to avoid having
// a circular dependency (`calculate-context-size.ts` needs an already-
// transformed request body to count tokens, but this function would like
// to know the count to select a model).
model: process.env.CLAUDE_SMALL_MODEL || "claude-v1",
prompt: prompt, prompt: prompt,
max_tokens_to_sample: rest.max_tokens, max_tokens_to_sample: rest.max_tokens,
stop_sequences: stops, stop_sequences: stops,
@@ -181,41 +170,3 @@ export function openAIMessagesToClaudePrompt(messages: OpenAIPromptMessage[]) {
.join("") + "\n\nAssistant:" .join("") + "\n\nAssistant:"
); );
} }
function validatePromptSize(req: Request) {
const promptTokens = req.promptTokens || 0;
const model = req.body.model;
let maxTokensForModel = 0;
if (model.match(/gpt-3.5/)) {
maxTokensForModel = 4096;
} else if (model.match(/gpt-4/)) {
maxTokensForModel = 8192;
} else if (model.match(/gpt-4-32k/)) {
maxTokensForModel = 32768;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?(?:-100k)/)) {
// Claude models don't throw an error if you exceed the token limit and
// instead just become extremely slow and give schizo results, so we will be
// more conservative with the token limit for them.
maxTokensForModel = 100000 * 0.98;
} else if (model.match(/claude-(?:instant-)?v1(?:\.\d)?$/)) {
maxTokensForModel = 9000 * 0.98;
} else {
// I don't trust my regular expressions enough to throw an error here so
// we just log a warning and allow 100k tokens.
req.log.warn({ model }, "Unknown model, using 100k token limit.");
maxTokensForModel = 100000;
}
if (req.debug) {
req.debug.calculated_max_tokens = maxTokensForModel;
}
z.number()
.max(
maxTokensForModel,
`Prompt is too long for model ${model} (${promptTokens} tokens, max ${maxTokensForModel})`
)
.parse(promptTokens);
req.log.debug({ promptTokens, maxTokensForModel }, "Prompt size validated");
}
@@ -282,7 +282,7 @@ function convertEventsToFinalResponse(events: string[], req: Request) {
* the final SSE event before the "DONE" event, so we can reuse that * the final SSE event before the "DONE" event, so we can reuse that
*/ */
const lastEvent = events[events.length - 2].toString(); const lastEvent = events[events.length - 2].toString();
const data = JSON.parse(lastEvent.slice("data: ".length)); const data = JSON.parse(lastEvent.slice(lastEvent.indexOf("data: ") + "data: ".length));
const response: AnthropicCompletionResponse = { const response: AnthropicCompletionResponse = {
...data, ...data,
log_id: req.id, log_id: req.id,
+13 -21
View File
@@ -269,7 +269,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
} }
} else if (statusCode === 401) { } else if (statusCode === 401) {
// Key is invalid or was revoked // Key is invalid or was revoked
keyPool.disable(req.key!); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`; errorPayload.proxy_note = `API key is invalid or revoked. ${tryAgainMessage}`;
} else if (statusCode === 429) { } else if (statusCode === 429) {
// OpenAI uses this for a bunch of different rate-limiting scenarios. // OpenAI uses this for a bunch of different rate-limiting scenarios.
@@ -341,11 +341,8 @@ function maybeHandleMissingPreambleError(
"Request failed due to missing preamble. Key will be marked as such for subsequent requests." "Request failed due to missing preamble. Key will be marked as such for subsequent requests."
); );
keyPool.update(req.key!, { requiresPreamble: true }); keyPool.update(req.key!, { requiresPreamble: true });
if (config.queueMode !== "none") { reenqueueRequest(req);
reenqueueRequest(req); throw new RetryableError("Claude request re-enqueued to add preamble.");
throw new RetryableError("Claude request re-enqueued to add preamble.");
}
errorPayload.proxy_note = `This Claude key requires special prompt formatting. Try again; the proxy will reformat your prompt next time.`;
} else { } else {
errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`; errorPayload.proxy_note = `Proxy received unrecognized error from Anthropic. Check the specific error for more information.`;
} }
@@ -357,11 +354,8 @@ function handleAnthropicRateLimitError(
) { ) {
if (errorPayload.error?.type === "rate_limit_error") { if (errorPayload.error?.type === "rate_limit_error") {
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
if (config.queueMode !== "none") { reenqueueRequest(req);
reenqueueRequest(req); throw new RetryableError("Claude rate-limited request re-enqueued.");
throw new RetryableError("Claude rate-limited request re-enqueued.");
}
errorPayload.proxy_note = `There are too many in-flight requests for this key. Try again later.`;
} else { } else {
errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`; errorPayload.proxy_note = `Unrecognized rate limit error from Anthropic. Key may be over quota.`;
} }
@@ -375,26 +369,24 @@ function handleOpenAIRateLimitError(
const type = errorPayload.error?.type; const type = errorPayload.error?.type;
if (type === "insufficient_quota") { if (type === "insufficient_quota") {
// Billing quota exceeded (key is dead, disable it) // Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!); keyPool.disable(req.key!, "quota");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key's quota has been exceeded. ${tryAgainMessage}`;
} else if (type === "access_terminated") { } else if (type === "access_terminated") {
// Account banned (key is dead, disable it) // Account banned (key is dead, disable it)
keyPool.disable(req.key!); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. ${tryAgainMessage}`;
} else if (type === "billing_not_active") { } else if (type === "billing_not_active") {
// Billing is not active (key is dead, disable it) // Billing is not active (key is dead, disable it)
keyPool.disable(req.key!); keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned key was deactivated by OpenAI. ${tryAgainMessage}`; errorPayload.proxy_note = `Assigned key was deactivated by OpenAI. ${tryAgainMessage}`;
} else if (type === "requests" || type === "tokens") { } else if (type === "requests" || type === "tokens") {
// Per-minute request or token rate limit is exceeded, which we can retry // Per-minute request or token rate limit is exceeded, which we can retry
keyPool.markRateLimited(req.key!); keyPool.markRateLimited(req.key!);
if (config.queueMode !== "none") { // I'm aware this is confusing -- throwing this class of error will cause
reenqueueRequest(req); // the proxy response handler to return without terminating the request,
// This is confusing, but it will bubble up to the top-level response // so that it can be placed back in the queue.
// handler and cause the request to go back into the request queue. reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued."); throw new RetryableError("Rate-limited request re-enqueued.");
}
errorPayload.proxy_note = `Assigned key's '${type}' rate limit has been exceeded. Try again later.`;
} else { } else {
// OpenAI probably overloaded // OpenAI probably overloaded
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`; errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
-2
View File
@@ -14,7 +14,6 @@ import {
finalizeBody, finalizeBody,
languageFilter, languageFilter,
limitCompletions, limitCompletions,
limitOutputTokens,
removeOriginHeaders, removeOriginHeaders,
} from "./middleware/request"; } from "./middleware/request";
import { import {
@@ -93,7 +92,6 @@ const rewriteRequest = (
const rewriterPipeline = [ const rewriterPipeline = [
addKey, addKey,
languageFilter, languageFilter,
limitOutputTokens,
limitCompletions, limitCompletions,
blockZoomerOrigins, blockZoomerOrigins,
removeOriginHeaders, removeOriginHeaders,
+25 -31
View File
@@ -16,7 +16,6 @@
*/ */
import type { Handler, Request } from "express"; import type { Handler, Request } from "express";
import { config, DequeueMode } from "../config";
import { keyPool, SupportedModel } from "../key-management"; import { keyPool, SupportedModel } from "../key-management";
import { logger } from "../logger"; import { logger } from "../logger";
import { AGNAI_DOT_CHAT_IP } from "./rate-limit"; import { AGNAI_DOT_CHAT_IP } from "./rate-limit";
@@ -27,31 +26,39 @@ export type QueuePartition = "claude" | "turbo" | "gpt-4";
const queue: Request[] = []; const queue: Request[] = [];
const log = logger.child({ module: "request-queue" }); const log = logger.child({ module: "request-queue" });
let dequeueMode: DequeueMode = "fair";
/** Maximum number of queue slots for Agnai.chat requests. */ /** Maximum number of queue slots for Agnai.chat requests. */
const AGNAI_CONCURRENCY_LIMIT = 15; const AGNAI_CONCURRENCY_LIMIT = 15;
/** Maximum number of queue slots for individual users. */ /** Maximum number of queue slots for individual users. */
const USER_CONCURRENCY_LIMIT = 1; const USER_CONCURRENCY_LIMIT = 1;
const sameIpPredicate = (incoming: Request) => (queued: Request) => /**
queued.ip === incoming.ip; * Returns a unique identifier for a request. This is used to determine if a
* request is already in the queue.
* This can be (in order of preference):
* - user token assigned by the proxy operator
* - x-risu-tk header, if the request is from RisuAI.xyz
* - IP address
*/
function getIdentifier(req: Request) {
if (req.user) {
return req.user.token;
}
if (req.risuToken) {
return req.risuToken;
}
return req.ip;
}
const sameUserPredicate = (incoming: Request) => (queued: Request) => { const sameUserPredicate = (incoming: Request) => (queued: Request) => {
const incomingUser = incoming.user ?? { token: incoming.ip }; const queuedId = getIdentifier(queued);
const queuedUser = queued.user ?? { token: queued.ip }; const incomingId = getIdentifier(incoming);
return queuedUser.token === incomingUser.token; return queuedId === incomingId;
}; };
export function enqueue(req: Request) { export function enqueue(req: Request) {
let enqueuedRequestCount = 0; const enqueuedRequestCount = queue.filter(sameUserPredicate(req)).length;
let isGuest = req.user?.token === undefined; let isGuest = req.user?.token === undefined;
if (isGuest) {
enqueuedRequestCount = queue.filter(sameIpPredicate(req)).length;
} else {
enqueuedRequestCount = queue.filter(sameUserPredicate(req)).length;
}
// All Agnai.chat requests come from the same IP, so we allow them to have // All Agnai.chat requests come from the same IP, so we allow them to have
// more spots in the queue. Can't make it unlimited because people will // more spots in the queue. Can't make it unlimited because people will
// intentionally abuse it. // intentionally abuse it.
@@ -150,18 +157,9 @@ export function dequeue(partition: QueuePartition): Request | undefined {
return undefined; return undefined;
} }
let req: Request; const req = modelQueue.reduce((prev, curr) =>
prev.startTime < curr.startTime ? prev : curr
if (dequeueMode === "fair") { );
// Dequeue the request that has been waiting the longest
req = modelQueue.reduce((prev, curr) =>
prev.startTime < curr.startTime ? prev : curr
);
} else {
// Dequeue a random request
const index = Math.floor(Math.random() * modelQueue.length);
req = modelQueue[index];
}
queue.splice(queue.indexOf(req), 1); queue.splice(queue.indexOf(req), 1);
if (req.onAborted) { if (req.onAborted) {
@@ -283,10 +281,6 @@ export function getQueueLength(partition: QueuePartition | "all" = "all") {
export function createQueueMiddleware(proxyMiddleware: Handler): Handler { export function createQueueMiddleware(proxyMiddleware: Handler): Handler {
return (req, res, next) => { return (req, res, next) => {
if (config.queueMode === "none") {
return proxyMiddleware(req, res, next);
}
req.proceed = () => { req.proceed = () => {
proxyMiddleware(req, res, next); proxyMiddleware(req, res, next);
}; };
+8 -3
View File
@@ -2,6 +2,7 @@ import { Request, Response, NextFunction } from "express";
import { config } from "../config"; import { config } from "../config";
export const AGNAI_DOT_CHAT_IP = "157.230.249.32"; export const AGNAI_DOT_CHAT_IP = "157.230.249.32";
const RATE_LIMIT_ENABLED = Boolean(config.modelRateLimit); const RATE_LIMIT_ENABLED = Boolean(config.modelRateLimit);
const RATE_LIMIT = Math.max(1, config.modelRateLimit); const RATE_LIMIT = Math.max(1, config.modelRateLimit);
const ONE_MINUTE_MS = 60 * 1000; const ONE_MINUTE_MS = 60 * 1000;
@@ -52,7 +53,11 @@ export const getUniqueIps = () => {
return lastAttempts.size; return lastAttempts.size;
}; };
export const ipLimiter = (req: Request, res: Response, next: NextFunction) => { export const ipLimiter = async (
req: Request,
res: Response,
next: NextFunction
) => {
if (!RATE_LIMIT_ENABLED) { if (!RATE_LIMIT_ENABLED) {
next(); next();
return; return;
@@ -68,7 +73,7 @@ export const ipLimiter = (req: Request, res: Response, next: NextFunction) => {
// If user is authenticated, key rate limiting by their token. Otherwise, key // If user is authenticated, key rate limiting by their token. Otherwise, key
// rate limiting by their IP address. Mitigates key sharing. // rate limiting by their IP address. Mitigates key sharing.
const rateLimitKey = req.user?.token || req.ip; const rateLimitKey = req.user?.token || req.risuToken || req.ip;
const { remaining, reset } = getStatus(rateLimitKey); const { remaining, reset } = getStatus(rateLimitKey);
res.set("X-RateLimit-Limit", config.modelRateLimit.toString()); res.set("X-RateLimit-Limit", config.modelRateLimit.toString());
@@ -83,7 +88,7 @@ export const ipLimiter = (req: Request, res: Response, next: NextFunction) => {
type: "proxy_rate_limited", type: "proxy_rate_limited",
message: `This proxy is rate limited to ${ message: `This proxy is rate limited to ${
config.modelRateLimit config.modelRateLimit
} model requests per minute. Please try again in ${Math.ceil( } prompts per minute. Please try again in ${Math.ceil(
tryAgainInMs / 1000 tryAgainInMs / 1000
)} seconds.`, )} seconds.`,
}, },
+17 -7
View File
@@ -6,14 +6,24 @@ equivalent OpenAI requests. */
import * as express from "express"; import * as express from "express";
import { gatekeeper } from "./auth/gatekeeper"; import { gatekeeper } from "./auth/gatekeeper";
import { checkRisuToken } from "./auth/check-risu-token";
import { kobold } from "./kobold"; import { kobold } from "./kobold";
import { openai } from "./openai"; import { openai } from "./openai";
import { anthropic } from "./anthropic"; import { anthropic } from "./anthropic";
const router = express.Router(); const proxyRouter = express.Router();
proxyRouter.use(
router.use(gatekeeper); express.json({ limit: "1536kb" }),
router.use("/kobold", kobold); express.urlencoded({ extended: true, limit: "1536kb" })
router.use("/openai", openai); );
router.use("/anthropic", anthropic); proxyRouter.use(gatekeeper);
export { router as proxyRouter }; proxyRouter.use(checkRisuToken);
proxyRouter.use((req, _res, next) => {
req.startTime = Date.now();
req.retryCount = 0;
next();
});
proxyRouter.use("/kobold", kobold);
proxyRouter.use("/openai", openai);
proxyRouter.use("/anthropic", anthropic);
export { proxyRouter as proxyRouter };
+10 -21
View File
@@ -2,6 +2,7 @@ import { assertConfigIsValid, config } from "./config";
import "source-map-support/register"; import "source-map-support/register";
import express from "express"; import express from "express";
import cors from "cors"; import cors from "cors";
import path from "path";
import pinoHttp from "pino-http"; import pinoHttp from "pino-http";
import childProcess from "child_process"; import childProcess from "child_process";
import { logger } from "./logger"; import { logger } from "./logger";
@@ -35,10 +36,6 @@ app.use(
'res.headers["set-cookie"]', 'res.headers["set-cookie"]',
"req.headers.authorization", "req.headers.authorization",
'req.headers["x-api-key"]', 'req.headers["x-api-key"]',
'req.headers["x-forwarded-for"]',
'req.headers["x-real-ip"]',
'req.headers["true-client-ip"]',
'req.headers["cf-connecting-ip"]',
// Don't log the prompt text on transform errors // Don't log the prompt text on transform errors
"body.messages", "body.messages",
"body.prompt", "body.prompt",
@@ -48,25 +45,19 @@ app.use(
}) })
); );
app.get("/health", (_req, res) => res.sendStatus(200));
app.use((req, _res, next) => {
req.startTime = Date.now();
req.retryCount = 0;
next();
});
app.use(cors());
app.use(
express.json({ limit: "10mb" }),
express.urlencoded({ extended: true, limit: "10mb" })
);
// TODO: Detect (or support manual configuration of) whether the app is behind // TODO: Detect (or support manual configuration of) whether the app is behind
// a load balancer/reverse proxy, which is necessary to determine request IP // a load balancer/reverse proxy, which is necessary to determine request IP
// addresses correctly. // addresses correctly.
app.set("trust proxy", true); app.set("trust proxy", true);
// routes app.set("view engine", "ejs");
app.set("views", path.join(__dirname, "views"));
app.get("/health", (_req, res) => res.sendStatus(200));
app.use(cors());
app.use(checkOrigin); app.use(checkOrigin);
// routes
app.get("/", handleInfoPage); app.get("/", handleInfoPage);
app.use("/admin", adminRouter); app.use("/admin", adminRouter);
app.use("/proxy", proxyRouter); app.use("/proxy", proxyRouter);
@@ -111,10 +102,8 @@ async function start() {
logQueue.start(); logQueue.start();
} }
if (config.queueMode !== "none") { logger.info("Starting request queue...");
logger.info("Starting request queue..."); startRequestQueue();
startRequestQueue();
}
app.listen(PORT, async () => { app.listen(PORT, async () => {
logger.info({ port: PORT }, "Now listening for connections."); logger.info({ port: PORT }, "Now listening for connections.");
-160
View File
@@ -1,160 +0,0 @@
import { spawn, ChildProcess } from "child_process";
import { join } from "path";
import { logger } from "../logger";
const TOKENIZER_SOCKET = "tcp://localhost:5555";
const log = logger.child({ module: "claude-ipc" });
const pythonLog = logger.child({ module: "claude-python" });
let tokenizer: ChildProcess;
let initialized = false;
let socket: any; // zeromq.Dealer, not sure how to import it safely as it is optional
export async function init() {
log.info("Initializing Claude tokenizer IPC");
try {
tokenizer = await launchTokenizer();
const zmq = await import("zeromq");
socket = new zmq.Dealer({ sendTimeout: 500 });
socket.connect(TOKENIZER_SOCKET);
await socket.send(["init"]);
const response = await socket.receive();
if (response.toString() !== "ok") {
throw new Error("Unexpected init response");
}
// Start message pump
processMessages();
// Test tokenizer
const result = await requestTokenCount({
requestId: "init-test",
prompt: "test prompt",
});
if (result !== 2) {
log.error({ result }, "Unexpected test token count");
throw new Error("Unexpected test token count");
}
initialized = true;
} catch (err) {
log.error({ err: err.message }, "Failed to initialize Claude tokenizer");
if (process.env.NODE_ENV !== "production") {
console.error(
`\nClaude tokenizer failed to initialize.\nIf you want to use the tokenizer, see the Optional Dependencies documentation.\n`
);
}
return false;
}
log.info("Claude tokenizer IPC ready");
return true;
}
const pendingRequests = new Map<
string,
{ resolve: (tokens: number) => void }
>();
export async function requestTokenCount({
requestId,
prompt,
}: {
requestId: string;
prompt: string;
}) {
if (!socket) {
throw new Error("Claude tokenizer is not initialized");
}
log.debug({ requestId, chars: prompt.length }, "Requesting token count");
await socket.send(["tokenize", requestId, prompt]);
log.debug({ requestId }, "Waiting for socket response");
return new Promise<number>(async (resolve, reject) => {
const resolveFn = (tokens: number) => {
log.debug({ requestId, tokens }, "Received token count");
pendingRequests.delete(requestId);
resolve(tokens);
};
pendingRequests.set(requestId, { resolve: resolveFn });
const timeout = initialized ? 500 : 10000;
setTimeout(() => {
if (pendingRequests.has(requestId)) {
pendingRequests.delete(requestId);
const err = "Tokenizer deadline exceeded";
log.warn({ requestId }, err);
reject(new Error(err));
}
}, timeout);
});
}
async function processMessages() {
if (!socket) {
throw new Error("Claude tokenizer is not initialized");
}
log.debug("Starting message loop");
for await (const [requestId, tokens] of socket) {
const request = pendingRequests.get(requestId.toString());
if (!request) {
log.error({ requestId }, "No pending request found for incoming message");
continue;
}
request.resolve(Number(tokens.toString()));
}
}
async function launchTokenizer() {
return new Promise<ChildProcess>((resolve, reject) => {
let resolved = false;
const python = process.platform === "win32" ? "python" : "python3";
const proc = spawn(python, [
"-u",
join(__dirname, "tokenization", "claude-tokenizer.py"),
]);
if (!proc) {
reject(new Error("Failed to spawn Claude tokenizer"));
}
function cleanup() {
socket?.close();
socket = undefined!;
tokenizer = undefined!;
}
proc.stdout!.on("data", (data) => {
pythonLog.info(data.toString().trim());
});
proc.stderr!.on("data", (data) => {
pythonLog.error(data.toString().trim());
});
proc.on("error", (err) => {
pythonLog.error({ err }, "Claude tokenizer error");
cleanup();
if (!resolved) {
resolved = true;
reject(err);
}
});
proc.on("close", (code) => {
pythonLog.info(`Claude tokenizer exited with code ${code}`);
cleanup();
if (code !== 0 && !resolved) {
resolved = true;
reject(new Error("Claude tokenizer exited immediately"));
}
});
// Wait a moment to catch any immediate errors (missing imports, etc)
setTimeout(() => {
if (!resolved) {
resolved = true;
resolve(proc);
}
}, 200);
});
}
-54
View File
@@ -1,54 +0,0 @@
"""
This is a small process running alongside the main NodeJS server intended to
tokenize prompts for Claude, as currently Anthropic only ships a Python
implemetnation for their tokenizer.
ZeroMQ is used for IPC between the NodeJS server and this process.
"""
import zmq
import anthropic
def create_socket():
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:5555")
return context, socket
def init(socket):
print("claude-tokenizer.py: starting")
try:
while True:
message = socket.recv_multipart()
routing_id, command = message
if command == b"init":
print("claude-tokenizer.py: initialized")
socket.send_multipart([routing_id, b"ok"])
break
except Exception as e:
print("claude-tokenizer.py: failed to initialize")
return
message_processor(socket)
def message_processor(socket):
while True:
try:
message = socket.recv_multipart()
routing_id, command, request_id, payload = message
payload = payload.decode("utf-8")
if command == b"exit":
print("claude-tokenizer.py: exiting")
break
elif command == b"tokenize":
token_count = anthropic.count_tokens(payload)
socket.send_multipart([routing_id, request_id, str(token_count).encode("utf-8")])
else:
print("claude-tokenizer.py: unknown message type")
except Exception as e:
print(f"claude-tokenizer.py: failed to process message ({e})")
break
if __name__ == "__main__":
context, socket = create_socket()
init(socket)
socket.close()
context.term()
+27
View File
@@ -0,0 +1,27 @@
import { getTokenizer } from "@anthropic-ai/tokenizer";
import { Tiktoken } from "tiktoken/lite";
let encoder: Tiktoken;
export function init() {
// they export a `countTokens` function too but it instantiates a new
// tokenizer every single time and it is not fast...
encoder = getTokenizer();
return true;
}
export function getTokenCount(prompt: string, _model: string) {
// Don't try tokenizing if the prompt is massive to prevent DoS.
// 500k characters should be sufficient for all supported models.
if (prompt.length > 500000) {
return {
tokenizer: "length fallback",
token_count: 100000,
};
}
return {
tokenizer: "@anthropic-ai/tokenizer",
token_count: encoder.encode(prompt.normalize("NFKC"), "all").length,
};
}
+1
View File
@@ -1 +1,2 @@
export { OpenAIPromptMessage } from "./openai";
export { init, countTokens } from "./tokenizer"; export { init, countTokens } from "./tokenizer";
+6 -5
View File
@@ -12,7 +12,7 @@ export function init() {
return true; return true;
} }
// Implmentation based and tested against: // Tested against:
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
export function getTokenCount(messages: any[], model: string) { export function getTokenCount(messages: any[], model: string) {
@@ -28,10 +28,11 @@ export function getTokenCount(messages: any[], model: string) {
for (const key of Object.keys(message)) { for (const key of Object.keys(message)) {
{ {
const value = message[key]; const value = message[key];
// Break if we get a huge message or exceed the token limit to prevent DoS // Break if we get a huge message or exceed the token limit to prevent
// 100k tokens allows for future 100k GPT-4 models and 250k characters is // DoS.
// just a sanity check // 100k tokens allows for future 100k GPT-4 models and 500k characters
if (value.length > 250000 || numTokens > 100000) { // is just a sanity check
if (value.length > 500000 || numTokens > 100000) {
numTokens = 100000; numTokens = 100000;
return { return {
tokenizer: "tiktoken (prompt length limit exceeded)", tokenizer: "tiktoken (prompt length limit exceeded)",
+12 -83
View File
@@ -1,35 +1,21 @@
import { Request } from "express"; import { Request } from "express";
import childProcess from "child_process";
import { config } from "../config"; import { config } from "../config";
import { logger } from "../logger";
import { import {
init as initIpc, init as initClaude,
requestTokenCount as requestClaudeTokenCount, getTokenCount as getClaudeTokenCount,
} from "./claude-ipc"; } from "./claude";
import { import {
init as initEncoder, init as initOpenAi,
getTokenCount as getOpenAITokenCount, getTokenCount as getOpenAITokenCount,
OpenAIPromptMessage, OpenAIPromptMessage,
} from "./openai"; } from "./openai";
let canTokenizeClaude = false;
export async function init() { export async function init() {
if (config.anthropicKey) { if (config.anthropicKey) {
if (!isPythonInstalled()) { initClaude();
const skipWarning = !!process.env.DISABLE_MISSING_PYTHON_WARNING;
process.env.MISSING_PYTHON_WARNING = skipWarning ? "" : "true";
} else {
canTokenizeClaude = await initIpc();
if (!canTokenizeClaude) {
logger.warn(
"Anthropic key is set, but tokenizer is not available. Claude prompts will use a naive estimate for token count."
);
}
}
} }
if (config.openaiKey) { if (config.openaiKey) {
initEncoder(); initOpenAi();
} }
} }
@@ -50,51 +36,15 @@ export async function countTokens({
prompt, prompt,
}: TokenCountRequest): Promise<TokenCountResult> { }: TokenCountRequest): Promise<TokenCountResult> {
const time = process.hrtime(); const time = process.hrtime();
switch (service) { switch (service) {
case "anthropic": case "anthropic":
if (!canTokenizeClaude) {
const result = guesstimateTokens(prompt);
return {
token_count: result,
tokenizer: "guesstimate (claude-ipc disabled)",
tokenization_duration_ms: getElapsedMs(time),
};
}
// If the prompt is absolutely massive (possibly malicious) don't even try
if (prompt.length > 500000) {
return {
token_count: guesstimateTokens(JSON.stringify(prompt)),
tokenizer: "guesstimate (prompt too long)",
tokenization_duration_ms: getElapsedMs(time),
};
}
try {
const result = await requestClaudeTokenCount({
requestId: String(req.id),
prompt,
});
return {
token_count: result,
tokenizer: "claude-ipc",
tokenization_duration_ms: getElapsedMs(time),
};
} catch (e: any) {
req.log.error("Failed to tokenize with claude_tokenizer", e);
const result = guesstimateTokens(prompt);
return {
token_count: result,
tokenizer: `guesstimate (claude-ipc failed: ${e.message})`,
tokenization_duration_ms: getElapsedMs(time),
};
}
case "openai":
const result = getOpenAITokenCount(prompt, req.body.model);
return { return {
...result, ...getClaudeTokenCount(prompt, req.body.model),
tokenization_duration_ms: getElapsedMs(time),
};
case "openai":
return {
...getOpenAITokenCount(prompt, req.body.model),
tokenization_duration_ms: getElapsedMs(time), tokenization_duration_ms: getElapsedMs(time),
}; };
default: default:
@@ -106,24 +56,3 @@ function getElapsedMs(time: [number, number]) {
const diff = process.hrtime(time); const diff = process.hrtime(time);
return diff[0] * 1000 + diff[1] / 1e6; return diff[0] * 1000 + diff[1] / 1e6;
} }
function guesstimateTokens(prompt: string) {
// From Anthropic's docs:
// The maximum length of prompt that Claude can see is its context window.
// Claude's context window is currently ~6500 words / ~8000 tokens /
// ~28000 Unicode characters.
// This suggests 0.28 tokens per character but in practice this seems to be
// a substantial underestimate in some cases.
return Math.ceil(prompt.length * 0.325);
}
function isPythonInstalled() {
try {
const python = process.platform === "win32" ? "python" : "python3";
childProcess.execSync(`${python} --version`, { stdio: "ignore" });
return true;
} catch (err) {
logger.debug({ err: err.message }, "Python not installed.");
return false;
}
}
+3
View File
@@ -10,6 +10,8 @@ declare global {
inboundApi: AIService | "kobold"; inboundApi: AIService | "kobold";
/** Denotes the format of the request being proxied to the API. */ /** Denotes the format of the request being proxied to the API. */
outboundApi: AIService; outboundApi: AIService;
/** If the request comes from a RisuAI.xyz user, this is their token. */
risuToken?: string;
user?: User; user?: User;
isStreaming?: boolean; isStreaming?: boolean;
startTime: number; startTime: number;
@@ -19,6 +21,7 @@ declare global {
proceed: () => void; proceed: () => void;
heartbeatInterval?: NodeJS.Timeout; heartbeatInterval?: NodeJS.Timeout;
promptTokens?: number; promptTokens?: number;
outputTokens?: number;
// TODO: remove later // TODO: remove later
debug: Record<string, any>; debug: Record<string, any>;
} }
+6
View File
@@ -0,0 +1,6 @@
<hr />
<footer>
<a href="/admin">Index</a> | <a href="/admin/logout">Logout</a>
</footer>
</body>
</html>
+61
View File
@@ -0,0 +1,61 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="csrf-token" content="<%= csrfToken %>">
<title><%= title %></title>
<style>
.pagination {
list-style-type: none;
padding: 0;
}
.pagination li {
display: inline-block;
}
.pagination li a {
display: block;
padding: 0.5em 1em;
text-decoration: none;
}
.pagination li.active a {
background-color: #58739c;
color: #fff;
}
table {
border-collapse: collapse;
border: 1px solid #ccc;
}
table td, table th {
border: 1px solid #ccc;
padding: 0.25em 0.5em;
}
th.active {
background-color: #e0e6f6;
}
td.actions {
padding: 0;
text-align: center;
}
td.actions a {
text-decoration: none;
padding: 0.5em;
height: 100%;
width: 100%;
}
td.actions:hover {
background-color: #ccc;
}
@media (max-width: 600px) {
table {
width: 100%;
}
table td, table th {
display: block;
width: 100%;
}
}
</style>
</head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
+23
View File
@@ -0,0 +1,23 @@
<div>
<label for="pageSize">Page Size</label>
<select id="pageSize" onchange="setPageSize(this.value)" style="margin-bottom: 1rem;">
<option value="10" <% if (pageSize === 10) { %>selected<% } %>>10</option>
<option value="20" <% if (pageSize === 20) { %>selected<% } %>>20</option>
<option value="50" <% if (pageSize === 50) { %>selected<% } %>>50</option>
<option value="100" <% if (pageSize === 100) { %>selected<% } %>>100</option>
<option value="200" <% if (pageSize === 200) { %>selected<% } %>>200</option>
</select>
</div>
<script>
function getPageSize() {
var match = window.location.search.match(/perPage=(\d+)/);
if (match) return parseInt(match[1]); else return document.cookie.match(/perPage=(\d+)/)?.[1] ?? 10;
}
function setPageSize(size) {
document.cookie = "perPage=" + size + "; path=/admin";
window.location.reload();
}
document.getElementById("pageSize").value = getPageSize();
</script>
+18
View File
@@ -0,0 +1,18 @@
<%- include("../_partials/admin-header", { title: "Create User - OAI Reverse Proxy Admin" }) %>
<!--
-->
<h1>Create User Token</h1>
<form action="/admin/manage/create-user" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input type="submit" value="Create" />
</form>
<% if (newToken) { %>
<p>Just created <code><%= recentUsers[0].token %></code>.</p>
<% } %>
<h3>Recent Tokens</h2>
<ul>
<% recentUsers.forEach(function(user) { %>
<li><a href="/admin/manage/view-user/<%= user.token %>"><%= user.token %></a></li>
<% }) %>
</ul>
<%- include("../_partials/admin-footer") %>
+28
View File
@@ -0,0 +1,28 @@
<%- include("../_partials/admin-header", { title: "Export Users - OAI Reverse Proxy Admin" }) %>
<h1>Export Users</h1>
<p>
Export users to JSON. The JSON will be an array of objects under the key
<code>users</code>. You can use this JSON to import users later.
</p>
<script>
function exportUsers() {
var xhr = new XMLHttpRequest();
xhr.open("GET", "/admin/manage/export-users.json", true);
xhr.responseType = "blob";
xhr.onload = function() {
if (this.status === 200) {
var blob = new Blob([this.response], { type: "application/json" });
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
a.href = url;
a.download = "users.json";
document.body.appendChild(a);
a.click();
a.remove();
}
};
xhr.send();
}
</script>
<button onclick="exportUsers()">Export</button>
<%- include("../_partials/admin-footer") %>
+44
View File
@@ -0,0 +1,44 @@
<%- include("../_partials/admin-header", { title: "Import Users - OAI Reverse Proxy Admin" }) %>
<h1>Import Users</h1>
<p>
Import users from JSON. The JSON should be an array of objects under the key
<code>users</code>. Each object should have the following fields:
</p>
<ul>
<li><code>token</code> (required): a unique identifier for the user</li>
<li><code>ip</code> (optional): IP addresses the user has connected from</li>
<li>
<code>type</code> (optional): either <code>normal</code> or
<code>special</code>
</li>
<li>
<code>promptCount</code> (optional): the number of times the user has sent a
prompt
</li>
<li>
<code>tokenCount</code> (optional): the number of tokens the user has
consumed (not yet implemented)
</li>
<li>
<code>createdAt</code> (optional): the timestamp when the user was created
</li>
<li>
<code>disabledAt</code> (optional): the timestamp when the user was disabled
</li>
<li>
<code>disabledReason</code> (optional): the reason the user was disabled
</li>
</ul>
<p>
If a user with the same token already exists, the existing user will be
updated with the new values.
</p>
<form action="/admin/manage/import-users?_csrf=<%= csrfToken %>" method="post" enctype="multipart/form-data">
<input type="file" name="users" />
<input type="submit" value="Import" />
</form>
</form>
<% if (imported > 0) { %>
<p>Imported <code><%= imported %></code> users.</p>
<% } %>
<%- include("../_partials/admin-footer") %>
+20
View File
@@ -0,0 +1,20 @@
<%- include("../_partials/admin-header", { title: "OAI Reverse Proxy Admin" }) %>
<h1>OAI Reverse Proxy Admin</h1>
<% if (!isPersistenceEnabled) { %>
<p style="color: red; background-color: #eedddd; padding: 1em">
<strong>⚠️ Users will be lost when the server restarts because persistence is
not configured.</strong><br />
<br />Be sure to export your users and import them again after restarting the
server if you want to keep them.<br />
<br /> See the <a target="_blank"
href="https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/docs/user-management.md#firebase-realtime-database">
user management documentation</a> to learn how to set up persistence.
</p>
<% } %>
<ul>
<li><a href="/admin/manage/list-users">List Users</a></li>
<li><a href="/admin/manage/create-user">Create User</a></li>
<li><a href="/admin/manage/import-users">Import Users</a></li>
<li><a href="/admin/manage/export-users">Export Users</a></li>
</ul>
<%- include("../_partials/admin-footer") %>
+105
View File
@@ -0,0 +1,105 @@
<%- include("../_partials/admin-header", { title: "Users - OAI Reverse Proxy Admin" }) %>
<h1>User Token List</h1>
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<% if (users.length === 0) { %>
<p>No users found.</p>
<% } else { %>
<table>
<thead>
<tr>
<th>Token</th>
<th <% if (sort.includes("ip")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=ip">IPs</a></th>
<th <% if (sort.includes("promptCount")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=promptCount">Prompts</a></th>
<th>Type</th>
<th <% if (sort.includes("createdAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=createdAt">Created (UTC)</a></th>
<th <% if (sort.includes("lastUsedAt")) { %>class="active"<% } %> ><a href="/admin/manage/list-users?sort=lastUsedAt">Last Used (UTC)</a></th>
<th colspan="2">Banned?</th>
</tr>
</thead>
<tbody>
<% users.forEach(function(user){ %>
<tr>
<td>
<code><a href="/admin/manage/view-user/<%= user.token %>"><%= user.token %></a></code>
</td>
<td><%= user.ip.length %></td>
<td><%= user.promptCount %></td>
<td><%= user.type %></td>
<td><%= user.createdAt %></td>
<td><%= user.lastUsedAt ?? "never" %></td>
<td class="actions">
<% if (user.disabledAt) { %>
<a title="Unban" href="#" class="unban" data-token="<%= user.token %>">🔄️</a>
<% } else { %>
<a title="Ban" href="#" class="ban" data-token="<%= user.token %>">🚫</a>
<% } %>
<td><%= user.disabledAt ? "Yes" : "No" %> <%= user.disabledReason ? `(${user.disabledReason})` : "" %></td>
</td>
</tr>
<% }); %>
</table>
<ul class="pagination">
<% if (page > 1) { %>
<li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page - 1 %>">&laquo;</a></li>
<% } %> <% for (var i = 1; i <= pageCount; i++) { %>
<li <% if (i === page) { %>class="active"<% } %>><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= i %>"><%= i %></a></li>
<% } %> <% if (page < pageCount) { %>
<li><a href="/admin/manage/list-users?sort=<%= sort %>&page=<%= page + 1 %>">&raquo;</a></li>
<% } %>
</ul>
<p>Showing <%= page * pageSize - pageSize + 1 %> to <%= users.length + page * pageSize - pageSize %> of <%= totalCount %> users.</p>
<%- include("../_partials/pagination") %>
<% } %>
<script>
document.querySelectorAll("td.actions a.ban").forEach(function (a) {
a.addEventListener("click", function (e) {
e.preventDefault();
var token = a.getAttribute("data-token");
if (confirm("Are you sure you want to ban this user?")) {
let reason = prompt("Reason for ban:");
fetch(
"/admin/manage/disable-user/" + token,
{
method: "POST",
credentials: "same-origin",
body: JSON.stringify({ reason, _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" }
}).then(() => window.location.reload());
}
});
});
document.querySelectorAll("td.actions a.unban").forEach(function (a) {
a.addEventListener("click", function (e) {
e.preventDefault();
var token = a.getAttribute("data-token");
if (confirm("Are you sure you want to unban this user?")) {
fetch(
"/admin/manage/reactivate-user/" + token,
{
method: "POST",
credentials: "same-origin",
body: JSON.stringify({ _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") }),
headers: { "Content-Type": "application/json" }
}
).then(() => window.location.reload());
}
});
});
</script>
<script>
document.querySelectorAll("td").forEach(function(td) {
if (td.innerText.match(/^\d{13}$/)) {
if (td.innerText == 0) return 'never';
var date = new Date(parseInt(td.innerText));
td.innerText = date.toISOString().replace("T", " ").replace(/\.\d+Z$/, "");
}
});
</script>
<%- include("../_partials/admin-footer") %>
+13
View File
@@ -0,0 +1,13 @@
<%- include("../_partials/admin-header", { title: "Login" }) %>
<h1>Login</h1>
<% if (failed) { %>
<p style="color: red;">Please try again.</p>
<% } %>
<form action="/admin/login" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<label for="token">Admin Key</label>
<input type="password" name="token" />
<input type="submit" value="Login" />
</form>
</body>
</html>
+64
View File
@@ -0,0 +1,64 @@
<%- include("../_partials/admin-header", { title: "View User - OAI Reverse Proxy Admin" }) %>
<h1>View User</h1>
<table class="table table-striped">
<thead>
<tr>
<th scope="col">Key</th>
<th scope="col">Value</th>
</tr>
<tbody>
<tr>
<th scope="row">Token</th>
<td><%- user.token %></td>
<tr>
<th scope="row">Type</th>
<td><%- user.type %></td>
</tr>
<tr>
<th scope="row">Prompt Count</th>
<td><%- user.promptCount %></td>
</tr>
<tr>
<th scope="row">Token Count</th>
<td><%- user.tokenCount %></td>
</tr>
<tr>
<th scope="row">Created At</th>
<td><%- user.createdAt %></td>
</tr>
<tr>
<th scope="row">Last Used At</th>
<td><%- user.lastUsedAt || "never" %></td>
</tr>
<tr>
<th scope="row">Disabled At</th>
<td><%- user.disabledAt %></td>
</tr>
<tr>
<th scope="row">Disabled Reason</th>
<td><%- user.disabledReason %></td>
</tr>
<tr>
<th scope="row">IPs</th>
<td>
<a href="#" id="ip-list-toggle">Show all (<%- user.ip.length %>)</a>
<ol id="ip-list" style="display:none; padding-left:1em; margin: 0;">
<% user.ip.forEach((ip) => { %>
<li><code><%- ip %></code></li>
<% }) %>
</ol>
</td>
</tr>
</tbody>
</table>
<script>
document.getElementById("ip-list-toggle").addEventListener("click", (e) => {
e.preventDefault();
document.getElementById("ip-list").style.display = "block";
document.getElementById("ip-list-toggle").style.display = "none";
});
</script>
<%- include("../_partials/admin-footer") %>