1 Commits

Author SHA1 Message Date
nai-degen 8d5059534a starts adding cohere api format and schemas 2024-06-09 12:46:02 -05:00
163 changed files with 3788 additions and 15344 deletions
+54 -101
View File
@@ -8,32 +8,12 @@
# Use production mode unless you are developing locally.
NODE_ENV=production
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# General settings:
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# URL for the image displayed on the login page.
# If not set, no image will be displayed.
# LOGIN_IMAGE_URL=https://example.com/your-logo.png
# Whether to enable the token-based or password-based login for the main info page.
# Defaults to true. Set to false to disable login and make the info page public.
# ENABLE_INFO_PAGE_LOGIN=true
# Authentication mode for the service info page. (token | password)
# If 'token', any valid user token is used (requires GATEKEEPER='user_token' mode).
# If 'password', SERVICE_INFO_PASSWORD is used.
# Defaults to 'token' if ENABLE_INFO_PAGE_LOGIN is true.
# SERVICE_INFO_AUTH_MODE=token
# Password for the service info page if SERVICE_INFO_AUTH_MODE is 'password'.
# SERVICE_INFO_PASSWORD=your-service-info-password
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
@@ -44,76 +24,36 @@ NODE_ENV=production
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=32768
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
# MAX_CONTEXT_TOKENS_OPENAI=16384
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=1024
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
# MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Disabled by default in local development mode, but enabled in production.
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
# | mistral-small | mistral-medium | mistral-large | aws-claude |
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
# | azure-gpt45 | azure-o1-mini | azure-o3-mini | deepseek | xai | o3 | o4-mini | gpt41 | gpt41-mini | gpt41-nano
# By default, all models are allowed
# To dissalow any, uncomment line below and edit
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt45,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o,azure-gpt45,azure-o1-mini,azure-o3-mini,deepseek
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | aws-claude-opus | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-dall-e
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai | xai
# openai | anthropic | aws | azure | google-ai | mistral-ai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
# ALLOWED_VISION_SERVICES=
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# Whether cookies should be set without the Secure flag, for hosts that don't
# support SSL. True by default in development, false in production.
# USE_INSECURE_COOKIES=false
# Reorganizes requests in the queue according to their token count, placing
# larger prompts further back. The penalty is determined by (promptTokens *
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
# When there is no queue or it is very short, the effect is negligible (this
# setting only reorders the queue, it does not artificially delay requests).
# TOKENS_PUNISHMENT_FACTOR=0.0
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# -------------------------------------------------------------------------------
# Blocking settings:
# Allows blocking requests depending on content, referers, or IP addresses.
# This is a convenience feature; if you need more robust functionality it is
# highly recommended to put this application behind nginx or Cloudflare, as they
# will have better performance.
# IP addresses or CIDR blocks from which requests will be blocked.
# IP_BLACKLIST=10.0.0.1/24
# URLs from which requests will be blocked.
@@ -122,13 +62,35 @@ NODE_ENV=production
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Comma-separated list of phrases that will be rejected. Surround phrases with
# quotes if they contain commas. You can use regular expression tokens.
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
# Surround phrases with quotes if they contain commas.
# Avoid short or common phrases as this tests the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="You can't say that here."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# The port and network interface to listen on.
# PORT=7860
# BIND_ADDRESS=0.0.0.0
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
# USE_INSECURE_COOKIES=false
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
@@ -136,11 +98,8 @@ NODE_ENV=production
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb | sqlite)
# Which persistence method to use. (memory | firebase_rtdb)
# GATEKEEPER_STORE=memory
# If using sqlite store, path to the SQLite database file for user data.
# Defaults to data/user-store.sqlite in the project directory.
# SQLITE_USER_STORE_PATH=data/user-store.sqlite3
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
@@ -151,8 +110,14 @@ NODE_ENV=production
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value, replacing dashes with underscores.
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
@@ -163,22 +128,12 @@ NODE_ENV=production
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# -------------------------------------------------------------------------------
# HTTP agent settings:
# If you need to change how the proxy makes requests to other servers, such
# as when checking keys or forwarding users' requests to external services,
# you can configure an alternative HTTP agent. Otherwise the default OS settings
# will be used.
# The name of the network interface to use. The first external IPv4 address
# belonging to this interface will be used for outgoing requests.
# HTTP_AGENT_INTERFACE=enp0s3
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
# Note that if your proxy server issues a self-signed certificate, you may need
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
# that variable in your environment, not in this file.
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# ------------------------------------------------------------------------------
# Secrets and keys:
@@ -187,25 +142,23 @@ NODE_ENV=production
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
GCP_CREDENTIALS=project-id:client-email:region:private-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# Restrict access to the admin interface to specific IP addresses, specified
# as a comma-separated list of CIDR ranges.
# To restrict access to the admin interface to specific IP addresses, set the
# ADMIN_WHITELIST environment variable to a comma-separated list of CIDR blocks.
# ADMIN_WHITELIST=0.0.0.0/0
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
+1 -1
View File
@@ -7,5 +7,5 @@
build
greeting.md
node_modules
.windsurfrules
http-client.private.env.json
-33
View File
@@ -1,33 +0,0 @@
You are a Senior Full Stack Developer and an Expert in ReactJS, NextJS, JavaScript, TypeScript, HTML, CSS and modern UI/UX frameworks (e.g., TailwindCSS, Shadcn, Radix). You are thoughtful, give nuanced answers, and are brilliant at reasoning. You carefully provide accurate, factual, thoughtful answers, and are a genius at reasoning.
- Follow the users requirements carefully & to the letter.
- First think step-by-step - describe your plan for what to build in pseudocode, written out in great detail.
- Confirm, then write code!
- Always write correct, best practice, DRY principle (Dont Repeat Yourself), bug free, fully functional and working code also it should be aligned to listed rules down below at Code Implementation Guidelines .
- Focus on easy and readability code, over being performant.
- Fully implement all requested functionality.
- Leave NO todos, placeholders or missing pieces.
- Ensure code is complete! Verify thoroughly finalised.
- Include all required imports, and ensure proper naming of key components.
- Be concise Minimize any other prose.
- If you think there might not be a correct answer, you say so.
- If you do not know the answer, say so, instead of guessing.
### Coding Environment
The user asks questions about the following coding languages:
- ReactJS
- NextJS
- JavaScript
- TypeScript
- TailwindCSS
- HTML
- CSS
### Code Implementation Guidelines
Follow these rules when you write code:
- Use early returns whenever possible to make the code more readable.
- Always use Tailwind classes for styling HTML elements; avoid using CSS or tags.
- Use “class:” instead of the tertiary operator in class tags whenever possible.
- Use descriptive variable and function/const names. Also, event functions should be named with a “handle” prefix, like “handleClick” for onClick and “handleKeyDown” for onKeyDown.
- Implement accessibility features on elements. For example, a tag should have a tabindex=“0”, aria-label, on:click, and on:keydown, and similar attributes.
- Use consts instead of functions, for example, “const toggle = () =>”. Also, define a type if possible.
-321
View File
@@ -1,321 +0,0 @@
# Project Codebase Guide
This document serves as a guide and index for the project codebase, designed to help developers and AI agents quickly understand its structure, components, and how to contribute.
## Table of Contents
1. [Project Overview](#project-overview)
2. [Directory Structure](#directory-structure)
3. [Core Components](#core-components)
* [Configuration (`src/config.ts`)](#configuration)
* [Server Entry Point (`src/server.ts`)](#server-entry-point)
* [Proxy Layer (`src/proxy/`)](#proxy-layer)
* [User Management (`src/user/`)](#user-management)
* [Admin Interface (`src/admin/`)](#admin-interface)
* [Shared Utilities (`src/shared/`)](#shared-utilities)
4. [Proxy Functionality](#proxy-functionality)
* [Routing (`src/proxy/routes.ts`)](#proxy-routing)
* [Supported Models & Providers](#supported-models--providers)
* [Middleware (`src/proxy/middleware/`)](#proxy-middleware)
* [Adding New Models](#adding-new-models)
* [Adding New APIs/Providers](#adding-new-apisproviders)
5. [Model Management](#model-management)
* [Model Family Definitions](#model-family-definitions)
* [Adding OpenAI Models](#adding-openai-models)
* [Model Mapping & Routing](#model-mapping--routing)
* [Service Information](#service-information)
* [Step-by-Step Guide for Adding a New Model](#step-by-step-guide-for-adding-a-new-model)
* [Model Patterns and Versioning](#model-patterns-and-versioning)
* [Response Format Handling](#response-format-handling)
6. [Key Management](#key-management)
* [Key Pool System](#key-pool-system)
* [Provider-Specific Key Management](#provider-specific-key-management)
* [Key Rotation and Health Checks](#key-rotation-and-health-checks)
7. [Data Management](#data-management)
* [Database (`src/shared/database/`)](#database)
* [File Storage (`src/shared/file-storage/`)](#file-storage)
8. [Authentication & Authorization](#authentication--authorization)
9. [Logging & Monitoring](#logging--monitoring)
10. [Deployment](#deployment)
11. [Contributing](#contributing)
## Project Overview
This project provides a proxy layer for various Large Language Models (LLMs) and potentially other AI APIs. It aims to offer a unified interface, manage API keys securely, handle rate limiting, usage tracking, and potentially add features like response caching or prompt modification.
## Directory Structure
```
.
├── .env.example # Example environment variables
├── .gitattributes # Git attributes
├── .gitignore # Git ignore rules
├── .husky/ # Git hooks
├── .prettierrc # Code formatting rules
├── CODEBASE_GUIDE.md # This file
├── README.md # Project README
├── data/ # Data files (e.g., SQLite DB)
├── docker/ # Docker configuration
├── docs/ # Documentation files
├── http-client.env.json # HTTP client environment
├── package-lock.json # NPM lock file
├── package.json # Project dependencies and scripts
├── patches/ # Patches for dependencies
├── public/ # Static assets served by the web server
├── render.yaml # Render deployment configuration
├── scripts/ # Utility scripts
├── src/ # Source code
│ ├── admin/ # Admin interface logic
│ ├── config.ts # Application configuration
│ ├── info-page.ts # Logic for the info page
│ ├── logger.ts # Logging setup
│ ├── proxy/ # Core proxy logic for different providers
│ ├── server.ts # Express server setup and main entry point
│ ├── service-info.ts # Service information logic
│ ├── shared/ # Shared utilities, types, and modules
│ └── user/ # User management logic
├── tsconfig.json # TypeScript configuration
```
## Core Components
### Configuration (`src/config.ts`)
* Loads environment variables and defines application settings.
* Contains configuration for database connections, API keys (placeholders/retrieval methods), logging levels, rate limits, etc.
* Uses `dotenv` and potentially a schema validation library (like Zod) to ensure required variables are present.
### Server Entry Point (`src/server.ts`)
* Initializes the Express application.
* Sets up core middleware (e.g., body parsing, CORS, logging).
* Mounts routers for different parts of the application (admin, user, proxy).
* Starts the HTTP server.
### Proxy Layer (`src/proxy/`)
* The heart of the application, handling requests to downstream AI APIs.
* Contains individual modules for each supported provider (e.g., `openai.ts`, `anthropic.ts`).
* Handles request transformation, authentication against the target API, and response handling.
* Uses middleware for common proxy tasks.
### User Management (`src/user/`)
* Handles user registration, login, session management, and potentially API key generation/management for end-users.
* Likely interacts with the database (`src/shared/database/`).
### Admin Interface (`src/admin/`)
* Provides an interface for administrators to manage users, monitor usage, configure settings, etc.
* May have its own set of routes and views.
### Shared Utilities (`src/shared/`)
* Contains reusable code across different modules.
* `api-schemas/`: Zod schemas for API request/response validation.
* `database/`: Database connection, schemas (e.g., Prisma), and query logic.
* `errors.ts`: Custom error classes.
* `key-management/`: Logic for managing API keys (if applicable).
* `models.ts`: Core data models/types used throughout the application.
* `prompt-logging/`: Logic for logging prompts and responses.
* `tokenization/`: Utilities for counting tokens.
* `utils.ts`: General utility functions.
## Proxy Functionality
### Proxy Routing (`src/proxy/routes.ts`)
* Defines the API endpoints for the proxy service (e.g., `/v1/chat/completions`).
* Maps incoming requests to the appropriate provider-specific handler based on the request path, headers, or body content (e.g., model requested).
* Applies relevant middleware (authentication, rate limiting, queuing, etc.).
### Supported Models & Providers
* **OpenAI:** Handled in `src/proxy/openai.ts`. Supports models like GPT-4, GPT-3.5-turbo, as well as o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini). Handles chat completions and potentially image generation (`src/proxy/openai-image.ts`).
* **Anthropic:** Handled in `src/proxy/anthropic.ts`. Supports Claude models. May use AWS Bedrock (`src/proxy/aws-claude.ts`) or Anthropic's direct API.
* **Google AI / Vertex AI:** Handled in `src/proxy/google-ai.ts` and `src/proxy/gcp.ts`. Supports Gemini models (gemini-flash, gemini-pro, gemini-ultra).
* **Mistral AI:** Handled in `src/proxy/mistral-ai.ts`. Supports Mistral models via their API or potentially AWS (`src/proxy/aws-mistral.ts`).
* **Azure OpenAI:** Handled in `src/proxy/azure.ts`. Provides an alternative endpoint for OpenAI models via Azure.
* **Deepseek:** Handled in `src/proxy/deepseek.ts`.
* **Xai:** Handled in `src/proxy/xai.ts`.
* **AWS (General):** `src/proxy/aws.ts` might contain shared AWS logic (e.g., authentication).
### Middleware (`src/proxy/middleware/`)
* **`gatekeeper.ts`:** Likely handles initial request validation, authentication, and authorization checks before hitting provider logic. Checks origin (`check-origin.ts`), potentially custom tokens (`check-risu-token.ts`).
* **`rate-limit.ts`:** Implements rate limiting logic, potentially per-user or per-key.
* **`queue.ts`:** Manages request queuing, possibly to handle concurrency limits or prioritize requests.
### Adding New Models
1. **Identify the Provider:** Determine if the new model belongs to an existing provider (e.g., a new OpenAI model) or a new one.
2. **Update Provider Logic (if existing):**
* Modify the relevant provider file (e.g., `src/proxy/openai.ts`).
* Update model lists or logic that selects/validates models.
* Adjust any request/response transformations if the new model has a different API schema.
* Update model information in shared files like `src/shared/models.ts` if necessary.
3. **Update Routing (if necessary):** Modify `src/proxy/routes.ts` if the new model requires a different endpoint or routing logic.
4. **Configuration:** Add any new API keys or configuration parameters to `.env.example` and `src/config.ts`.
5. **Testing:** Add unit or integration tests for the new model.
### Adding New APIs/Providers
1. **Create Provider Module:** Create a new file in `src/proxy/` (e.g., `src/proxy/new-provider.ts`).
2. **Implement Handler:**
* Write the core logic to handle requests for this provider. This typically involves:
* Receiving the standardized request from the router.
* Transforming the request into the format expected by the new provider's API.
* Authenticating with the new provider's API (fetching keys from config).
* Making the API call (consider using a robust HTTP client like `axios` or `node-fetch`).
* Handling streaming responses if applicable (using helpers from `src/shared/streaming.ts`).
* Transforming the provider's response back into a standardized format.
* Handling errors gracefully.
3. **Add Routing:**
* Import the new handler in `src/proxy/routes.ts`.
* Add new routes or modify existing routing logic to direct requests to the new handler based on model name, path, or other criteria.
* Apply necessary middleware (gatekeeper, rate limiter, queue).
4. **Create Key Management:**
* Create a new directory in `src/shared/key-management/` for the provider.
* Implement provider-specific key management (key checkers, token counters).
5. **Configuration:**
* Add configuration variables (API keys, base URLs) to `.env.example` and `src/config.ts`.
* Update `src/config.ts` to load and validate the new variables.
6. **Model Information:** Add details about the new provider and its models to `src/shared/models.ts` or similar shared locations.
7. **Tokenization (if applicable):** If token counting is needed, add or update tokenization logic in `src/shared/tokenization/`.
8. **Testing:** Implement thorough tests for the new provider integration.
9. **Documentation:** Update this guide and any other relevant documentation.
## Model Management
### Model Family Definitions
* **Model Family Definitions:** The project uses a family-based approach to group similar models together. These are defined in `src/shared/models.ts`.
* Each model is part of a model family (e.g., "gpt4", "claude", "gemini-pro") which helps with routing, key management, and feature support.
* The `MODEL_FAMILIES` array contains all supported model families, and the `MODEL_FAMILY_SERVICE` mapping connects each family to its provider service.
### Adding OpenAI Models
When adding new OpenAI models to the codebase, there are several files that must be updated:
1. **Update Model Types (`src/shared/models.ts`):**
- Add the new model to the `OpenAIModelFamily` type
- Add the model to the `MODEL_FAMILIES` array
- Add the Azure variants for the model if applicable
- Add the model to `MODEL_FAMILY_SERVICE` mapping
- Update `OPENAI_MODEL_FAMILY_MAP` with regex patterns to match the model names
2. **Update Context Size Limits (`src/proxy/middleware/request/preprocessors/validate-context-size.ts`):**
- Add regex matching for the new model
- Set the appropriate context token limit for the model
3. **Update Token Cost Tracking (`src/shared/stats.ts`):**
- Add pricing information for the new model in the `getTokenCostUsd` function
- Include both input and output prices in the comments for clarity
4. **Update Feature Support Checks (`src/proxy/openai.ts`):**
- If the model supports special features like the reasoning API parameter (`isO1Model` function), update the appropriate function
- For model feature detection, prefer using regex patterns over explicit lists when possible, as this handles date-stamped versions better
5. **Update Display Names (`src/info-page.ts`):**
- Add friendly display names for the new models in the `MODEL_FAMILY_FRIENDLY_NAME` object
6. **Update Key Management Provider Files:**
- For OpenAI keys in `src/shared/key-management/openai/provider.ts`, add token counters for the new models
- For Azure OpenAI keys in `src/shared/key-management/azure/provider.ts`, add token counters for the Azure versions
### Model Patterns and Versioning
The codebase handles several patterns for model naming and versioning:
1. **Date-stamped Models:** Many models include date stamps (e.g., `gpt-4-0125-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4o(-\\d{4}-\\d{2}-\\d{2})?$`.
2. **O-Series Models:** OpenAI's o-series models (o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini) follow a different naming convention. The codebase handles these with dedicated model families and regex patterns.
3. **Preview/Non-Preview Variants:** Some models have preview variants (e.g., `gpt-4.5-preview`). The regex patterns in `OPENAI_MODEL_FAMILY_MAP` account for these with patterns like `^gpt-4\\.5(-preview)?(-\\d{4}-\\d{2}-\\d{2})?$`.
When adding new models, try to follow the existing patterns for consistency.
### Response Format Handling
The codebase includes special handling for different API response formats:
1. **Chat vs. Text Completions:** There's transformation logic in `openai.ts` to convert between chat completions and text completions formats (`transformTurboInstructResponse`).
2. **Newer API Formats:** For newer APIs like the Responses API, there's transformation logic (`transformResponsesApiResponse`) to convert responses to a format compatible with existing clients.
When adding support for new models or APIs, consider whether transformation is needed to maintain compatibility with existing clients.
## Key Management
### Key Pool System
The project uses a sophisticated key pool system (`src/shared/key-management/key-pool.ts`) to manage API keys for different providers. Key features include:
* **Key Selection:** The system selects the appropriate key based on model family, region preferences, and other criteria.
* **Rotation:** Keys are rotated to distribute usage and avoid hitting rate limits.
* **Health Checks:** Keys are checked periodically to ensure they're still valid and within rate limits.
### Provider-Specific Key Management
Each provider has its own key management module in `src/shared/key-management/`:
* **Key Checkers:** Each provider implements key checkers to validate keys and check their status.
* **Token Counters:** Providers implement token counting logic specific to their pricing model.
* **Models Support:** Keys are associated with specific model families they support.
When adding a new model or provider, you'll need to update or create the appropriate key management files.
### Key Rotation and Health Checks
The key pool system includes logic for:
* **Rotation Strategy:** Keys are selected based on a prioritization strategy (`prioritize-keys.ts`).
* **Disabling Unhealthy Keys:** Keys that fail health checks are temporarily disabled.
* **Rate Limit Awareness:** The system tracks usage to avoid hitting provider rate limits.
## Data Management
### Database (`src/shared/database/`)
* Likely uses Prisma or a similar ORM.
* Defines database schemas (e.g., for users, API keys, usage logs).
* Provides functions for interacting with the database.
* Configuration is managed in `src/config.ts`.
### File Storage (`src/shared/file-storage/`)
* May be used for storing logs, cached data, or user-uploaded files.
* Could integrate with local storage or cloud providers (e.g., S3, GCS).
## Authentication & Authorization
* **User Auth:** Handled in `src/user/` potentially using sessions (`src/shared/with-session.ts`) or JWTs.
* **Proxy Auth:** The `gatekeeper.ts` middleware likely verifies incoming requests to the proxy endpoints. This could involve checking:
* Custom API keys stored in the database (`src/shared/database/`).
* Specific tokens (`check-risu-token.ts`).
* HMAC signatures (`src/shared/hmac-signing.ts`).
* Origin checks (`check-origin.ts`).
* **Downstream Auth:** Each provider module (`src/proxy/*.ts`) handles authentication with the actual AI service API using keys from the configuration.
## Logging & Monitoring
* **Logging:** Configured in `src/logger.ts`, likely using a library like `pino` or `winston`. Logs requests, errors, and important events.
* **Prompt Logging:** Specific logic for logging prompts and responses might exist in `src/shared/prompt-logging/`.
* **Stats/Monitoring:** `src/shared/stats.ts` might handle collecting and exposing application metrics.
## Deployment
* **Docker:** The project likely includes Docker configuration for containerized deployment.
* **Render:** The `render.yaml` file suggests the project is or can be deployed on Render.
* **Environment Variables:** The `.env.example` file provides a template for required environment variables in production.
## Contributing
When contributing to this project:
1. **Follow Coding Standards:** Use the established patterns and standards in the codebase. The `.prettierrc` file defines code formatting rules.
2. **Update Documentation:** Keep this guide updated when adding new components or changing existing ones.
3. **Add Tests:** Ensure your changes are tested appropriately.
4. **Update Configuration:** If your changes require new environment variables, update `.env.example`.
*This guide provides a high-level overview. For detailed information, refer to the specific source code files.*
+36 -38
View File
@@ -1,20 +1,16 @@
# OAI Reverse Proxy - just a shitty fork
# OAI Reverse Proxy
Reverse proxy server for various LLM APIs.
### Table of Contents
<!-- TOC -->
* [OAI Reverse Proxy](#oai-reverse-proxy)
* [Table of Contents](#table-of-contents)
* [What is this?](#what-is-this)
* [Features](#features)
* [Usage Instructions](#usage-instructions)
* [Personal Use (single-user)](#personal-use-single-user)
* [Updating](#updating)
* [Local Development](#local-development)
* [Self-hosting](#self-hosting)
* [Building](#building)
* [Forking](#forking)
<!-- TOC -->
- [What is this?](#what-is-this)
- [Features](#features)
- [Usage Instructions](#usage-instructions)
- [Self-hosting](#self-hosting)
- [Alternatives](#alternatives)
- [Huggingface (outdated, not advised)](#huggingface-outdated-not-advised)
- [Render (outdated, not advised)](#render-outdated-not-advised)
- [Local Development](#local-development)
## What is this?
This project allows you to run a reverse proxy server for various LLM APIs.
@@ -23,8 +19,7 @@ This project allows you to run a reverse proxy server for various LLM APIs.
- [x] Support for multiple APIs
- [x] [OpenAI](https://openai.com/)
- [x] [Anthropic](https://www.anthropic.com/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/) (Claude4 is fucked, dont care)
- [x] [Vertex AI (GCP)](https://cloud.google.com/vertex-ai/)
- [x] [AWS Bedrock](https://aws.amazon.com/bedrock/)
- [x] [Google MakerSuite/Gemini API](https://ai.google.dev/)
- [x] [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [x] Translation from OpenAI-formatted prompts to any other API, including streaming responses
@@ -33,40 +28,43 @@ This project allows you to run a reverse proxy server for various LLM APIs.
- [x] Simple role-based permissions
- [x] Per-model token quotas
- [x] Temporary user accounts
- [x] Event audit logging
- [x] Optional full logging of prompts and completions
- [x] Prompt and completion logging
- [x] Abuse detection and prevention
- [x] IP address and user token model invocation rate limits
- [x] IP blacklists
- [x] Proof-of-work challenge for access by anonymous users
---
## Usage Instructions
If you'd like to run your own instance of this server, you'll need to deploy it somewhere and configure it with your API keys. A few easy options are provided below, though you can also deploy it to any other service you'd like if you know what you're doing and the service supports Node.js.
### Personal Use (single-user)
If you just want to run the proxy server to use yourself without hosting it for others:
1. Install [Node.js](https://nodejs.org/en/download/) >= 18.0.0
2. Clone this repository
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
4. Install dependencies with `npm install`
5. Run `npm run build`
6. Run `npm start`
#### Updating
You must re-run `npm install` and `npm run build` whenever you pull new changes from the repository.
#### Local Development
Use `npm run start:dev` to run the proxy in development mode with watch mode enabled. Use `npm run type-check` to run the type checker across the project.
### Self-hosting
[See here for instructions on how to self-host the application on your own VPS or local machine and expose it to the internet for others to use.](./docs/self-hosting.md)
[See here for instructions on how to self-host the application on your own VPS or local machine.](./docs/self-hosting.md)
**Ensure you set the `TRUSTED_PROXIES` environment variable according to your deployment.** Refer to [.env.example](./.env.example) and [config.ts](./src/config.ts) for more information.
### Alternatives
Fiz and Sekrit are working on some alternative ways to deploy this conveniently. While I'm not involved in this effort beyond providing technical advice regarding my code, I'll link to their work here for convenience: [Sekrit's rentry](https://rentry.org/sekrit)
### Huggingface (outdated, not advised)
[See here for instructions on how to deploy to a Huggingface Space.](./docs/deploy-huggingface.md)
### Render (outdated, not advised)
[See here for instructions on how to deploy to Render.com.](./docs/deploy-render.md)
## Local Development
To run the proxy locally for development or testing, install Node.js >= 18.0.0 and follow the steps below.
1. Clone the repo
2. Install dependencies with `npm install`
3. Create a `.env` file in the root of the project and add your API keys. See the [.env.example](./.env.example) file for an example.
4. Start the server in development mode with `npm run start:dev`.
You can also use `npm run start:dev:tsc` to enable project-wide type checking at the cost of slower startup times. `npm run type-check` can be used to run type checking without starting the server.
## Building
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory. You should run this whenever you pull new changes from the repository.
To build the project, run `npm run build`. This will compile the TypeScript code to JavaScript and output it to the `build` directory.
Note that if you are trying to build the server on a very memory-constrained (<= 1GB) VPS, you may need to run the build with `NODE_OPTIONS=--max_old_space_size=2048 npm run build` to avoid running out of memory during the build process, assuming you have swap enabled. The application itself should run fine on a 512MB VPS for most reasonable traffic levels.
## Forking
If you are forking the repository on GitGud, you may wish to disable GitLab CI/CD or you will be spammed with emails about failed builds due not having any CI runners. You can do this by going to *Settings > General > Visibility, project features, permissions* and then disabling the "CI/CD" feature.
+2 -1
View File
@@ -17,8 +17,9 @@ ARG GREETING_URL
RUN if [ -n "$GREETING_URL" ]; then \
curl -sL "$GREETING_URL" > greeting.md; \
fi
COPY . .
COPY package*.json greeting.md* ./
RUN npm install
COPY . .
RUN npm run build
RUN --mount=type=secret,id=_env,dst=/etc/secrets/.env cat /etc/secrets/.env >> .env
EXPOSE 10000
+1 -1
View File
@@ -1,6 +1,6 @@
# Deploy to Render.com
**⚠️ This method is no longer supported or recommended and may not work. Please use the [self-hosting instructions](./self-hosting.md) instead.**
**⚠️ This method is no longer recommended. Please use the [self-hosting instructions](./self-hosting.md) instead.**
Render.com offers a free tier that includes 750 hours of compute time per month. This is enough to run a single proxy instance 24/7. Instances shut down after 15 minutes without traffic but start up again automatically when a request is received. You can use something like https://app.checklyhq.com/ to ping your proxy every 15 minutes to keep it alive.
-35
View File
@@ -1,35 +0,0 @@
# Configuring the proxy for Vertex AI (GCP)
The proxy supports GCP models via the `/proxy/gcp/claude` endpoint. There are a few extra steps necessary to use GCP compared to the other supported APIs.
- [Setting keys](#setting-keys)
- [Setup Vertex AI](#setup-vertex-ai)
- [Supported model IDs](#supported-model-ids)
## Setting keys
Use the `GCP_CREDENTIALS` environment variable to set the GCP API keys.
Like other APIs, you can provide multiple keys separated by commas. Each GCP key, however, is a set of credentials including the project id, client email, region and private key. These are separated by a colon (`:`).
For example:
```
GCP_CREDENTIALS=my-first-project:xxx@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,my-first-project2:xxx2@yyy.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----
```
## Setup Vertex AI
1. Go to [https://cloud.google.com/vertex-ai](https://cloud.google.com/vertex-ai) and sign up for a GCP account. ($150 free credits without credit card or $300 free credits with credit card, credits expire in 90 days)
2. Go to [https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) to enable Vertex AI API.
3. Go to [https://console.cloud.google.com/vertex-ai](https://console.cloud.google.com/vertex-ai) and navigate to Model Garden to apply for access to the Claude models.
4. Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account#step_index=1) , and make sure to grant the role of "Vertex AI User" or "Vertex AI Administrator".
5. On the service account page you just created, create a new key and select "JSON". The JSON file will be downloaded automatically.
6. The required credential is in the JSON file you just downloaded.
## Supported model IDs
Users can send these model IDs to the proxy to invoke the corresponding models.
- **Claude**
- `claude-3-haiku@20240307`
- `claude-3-sonnet@20240229`
- `claude-3-opus@20240229`
- `claude-3-5-sonnet@20240620`
+1 -1
View File
@@ -129,7 +129,7 @@ also significantly reduce hash rates on mobile devices.
- Intel Core i9-13900K (Chrome, in VM limited to 4 cores): 12.2 - 13.0 H/s
- iPad Pro (M2) (Safari, 6 workers): 8.0 - 10 H/s
- Thermal throttles early. 8 cores is normal concurrency, but unstable.
- iPhone 15 Pro Max (Safari): 4.0 - 4.6 H/s
- iPhone 13 Pro (Safari): 4.0 - 4.6 H/s
- Samsung Galaxy S10e (Chrome): 3.6 - 3.8 H/s
- This is a 2019 phone almost matching an iPhone five years newer because of
bad Safari performance.
-12
View File
@@ -12,7 +12,6 @@ Several of these features require you to set secrets in your environment. If usi
- [Memory](#memory)
- [Firebase Realtime Database](#firebase-realtime-database)
- [Firebase setup instructions](#firebase-setup-instructions)
- [SQLite Database](#sqlite-database)
- [Whitelisting admin IP addresses](#whitelisting-admin-ip-addresses)
## No user management (`GATEKEEPER=none`)
@@ -64,17 +63,6 @@ To use Firebase Realtime Database to persist user data, set the following enviro
The proxy server will attempt to connect to your Firebase Realtime Database at startup and will throw an error if it cannot connect. If you see this error, check that your `FIREBASE_RTDB_URL` and `FIREBASE_KEY` secrets are set correctly.
### SQLite Database
To use a local SQLite database file to persist user data, set the following environment variables:
- `GATEKEEPER_STORE`: Set this to `sqlite`.
- `SQLITE_USER_STORE_PATH` (Optional): Specifies the path to the SQLite database file.
- If not set, it defaults to `data/user-store.sqlite` within the project directory.
- Ensure that the directory where the SQLite file will be created (e.g., the `data/` directory) is writable by the application process.
Using SQLite provides a simple way to persist user data locally without relying on external services. User data will be saved to the specified file and will be available across server restarts.
## Whitelisting admin IP addresses
You can add your own IP ranges to the `ADMIN_WHITELIST` environment variable for additional security.
+425 -1510
View File
File diff suppressed because it is too large Load Diff
+12 -19
View File
@@ -5,11 +5,10 @@
"scripts": {
"build": "tsc && copyfiles -u 1 src/**/*.ejs build",
"database:migrate": "ts-node scripts/migrate.ts",
"postinstall": "patch-package",
"prepare": "husky install",
"start": "node --trace-deprecation --trace-warnings build/server.js",
"start": "node build/server.js",
"start:dev": "nodemon --watch src --exec ts-node --transpile-only src/server.ts",
"start:debug": "ts-node --inspect --transpile-only src/server.ts",
"start:replit": "tsc && node build/server.js",
"start:watch": "nodemon --require source-map-support/register build/server.js",
"type-check": "tsc --noEmit"
},
@@ -21,14 +20,14 @@
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"@aws-crypto/sha256-js": "^5.2.0",
"@huggingface/jinja": "^0.3.0",
"@node-rs/argon2": "^1.8.3",
"@smithy/eventstream-codec": "^2.1.3",
"@smithy/eventstream-serde-node": "^2.1.3",
"@smithy/protocol-http": "^3.2.1",
"@smithy/signature-v4": "^2.1.3",
"@smithy/types": "^2.10.1",
"@smithy/util-utf8": "^2.1.1",
"axios": "^1.7.4",
"axios": "^1.3.5",
"better-sqlite3": "^10.0.0",
"check-disk-space": "^3.4.0",
"cookie-parser": "^1.4.6",
@@ -37,35 +36,30 @@
"csrf-csrf": "^2.3.0",
"dotenv": "^16.3.1",
"ejs": "^3.1.10",
"express": "^4.19.3",
"express": "^4.18.2",
"express-session": "^1.17.3",
"firebase-admin": "^12.5.0",
"firebase-admin": "^12.1.0",
"glob": "^10.3.12",
"googleapis": "^122.0.0",
"http-proxy": "1.18.1",
"http-proxy-middleware": "^3.0.2",
"http-proxy-middleware": "^3.0.0-beta.1",
"ipaddr.js": "^2.1.0",
"memorystore": "^1.6.7",
"multer": "^1.4.5-lts.1",
"node-schedule": "^2.1.1",
"patch-package": "^8.0.0",
"pino": "^8.11.0",
"pino-http": "^8.3.3",
"proxy-agent": "^6.4.0",
"sanitize-html": "^2.13.0",
"sharp": "^0.34.2",
"sanitize-html": "2.12.1",
"sharp": "^0.32.6",
"showdown": "^2.1.0",
"source-map-support": "^0.5.21",
"stream-json": "^1.8.0",
"tiktoken": "^1.0.10",
"tinyws": "^0.1.0",
"uuid": "^9.0.0",
"zlib": "^1.0.5",
"zod": "^3.22.3",
"zod-error": "^1.5.0"
},
"devDependencies": {
"@smithy/types": "^3.3.0",
"@types/better-sqlite3": "^7.6.10",
"@types/cookie-parser": "^1.4.3",
"@types/cors": "^2.8.13",
@@ -78,7 +72,7 @@
"@types/stream-json": "^1.7.7",
"@types/uuid": "^9.0.1",
"concurrently": "^8.0.1",
"esbuild": "^0.25.5",
"esbuild": "^0.17.16",
"esbuild-register": "^3.4.2",
"husky": "^8.0.3",
"nodemon": "^3.0.1",
@@ -89,8 +83,7 @@
"typescript": "^5.4.2"
},
"overrides": {
"node-fetch@2.x": {
"whatwg-url": "14.x"
}
"postcss": "^8.4.31",
"follow-redirects": "^1.15.4"
}
}
-23
View File
@@ -1,23 +0,0 @@
# Patches
Contains monkey patches for certain packages, applied using `patch-package`.
## `http-proxy+1.18.1.patch`
Modifies the `http-proxy` package to work around an incompatibility with
body-parser and SOCKS5 proxies due to some esoteric stream handling behavior
when `socks-proxy-agent` is used instead of a generic http.Agent.
Modification involves adjusting the `buffer` property on ProxyServer's `options`
object to be a function that returns a stream instead of a stream itself. This
allows us to give it a function which produces a new Readable from the already-
parsed request body.
With the old implementation we would need to create an entirely new ProxyServer
instance for each request, which is not ideal under heavy load.
`http-proxy` hasn't been updated in six years so it's unlikely that this patch
will be broken by future updates, but it's stil pinned to 1.18.1 for now.
### See also
https://github.com/chimurai/http-proxy-middleware/issues/40
https://github.com/chimurai/http-proxy-middleware/issues/299
https://github.com/http-party/node-http-proxy/pull/1027
-13
View File
@@ -1,13 +0,0 @@
diff --git a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
index 7ae7355..c825c27 100644
--- a/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
+++ b/node_modules/http-proxy/lib/http-proxy/passes/web-incoming.js
@@ -167,7 +167,7 @@ module.exports = {
}
}
- (options.buffer || req).pipe(proxyReq);
+ (options.buffer(req) || req).pipe(proxyReq);
proxyReq.on('response', function(proxyRes) {
if(server) { server.emit('proxyRes', proxyRes, req, res); }
+2 -1
View File
@@ -30,6 +30,7 @@ self.onmessage = async (event) => {
nonce = data.nonce;
const c = data.challenge;
// decode salt to Uint8Array
const salt = new Uint8Array(c.s.length / 2);
for (let i = 0; i < c.s.length; i += 2) {
salt[i / 2] = parseInt(c.s.slice(i, i + 2), 16);
@@ -98,7 +99,7 @@ const solve = async () => {
self.postMessage({ type: "solved", nonce: solution.nonce });
active = false;
} else {
if (Date.now() - lastNotify >= 500) {
if (Date.now() - lastNotify > 1000) {
console.log("Last nonce", nonce, "Hashes", hashesSinceLastNotify);
self.postMessage({ type: "progress", hashes: hashesSinceLastNotify });
lastNotify = Date.now();
-33
View File
@@ -230,39 +230,6 @@ Content-Type: application/json
]
}
###
# @name Proxy / GCP Claude -- Native Completion
POST {{proxy-host}}/proxy/gcp/claude/v1/complete
Authorization: Bearer {{proxy-key}}
anthropic-version: 2023-01-01
Content-Type: application/json
{
"model": "claude-v2",
"max_tokens_to_sample": 10,
"temperature": 0,
"stream": true,
"prompt": "What is genshin impact\n\n:Assistant:"
}
###
# @name Proxy / GCP Claude -- OpenAI-to-Anthropic API Translation
POST {{proxy-host}}/proxy/gcp/claude/chat/completions
Authorization: Bearer {{proxy-key}}
Content-Type: application/json
{
"model": "gpt-3.5-turbo",
"max_tokens": 50,
"stream": true,
"messages": [
{
"role": "user",
"content": "What is genshin impact?"
}
]
}
###
# @name Proxy / Azure OpenAI -- Native Chat Completions
POST {{proxy-host}}/proxy/azure/openai/chat/completions
-2
View File
@@ -51,8 +51,6 @@ function getRandomModelFamily() {
"mistral-large",
"aws-claude",
"aws-claude-opus",
"gcp-claude",
"gcp-claude-opus",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
-118
View File
@@ -1,118 +0,0 @@
// uses the aws sdk to sign a request, then uses axios to send it to the bedrock REST API manually
import axios from "axios";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID!;
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY!;
// Copied from amazon bedrock docs
// List models
// ListFoundationModels
// Service: Amazon Bedrock
// List of Bedrock foundation models that you can use. For more information, see Foundation models in the
// Bedrock User Guide.
// Request Syntax
// GET /foundation-models?
// byCustomizationType=byCustomizationType&byInferenceType=byInferenceType&byOutputModality=byOutputModality&byProvider=byProvider
// HTTP/1.1
// URI Request Parameters
// The request uses the following URI parameters.
// byCustomizationType (p. 38)
// List by customization type.
// Valid Values: FINE_TUNING
// byInferenceType (p. 38)
// List by inference type.
// Valid Values: ON_DEMAND | PROVISIONED
// byOutputModality (p. 38)
// List by output modality type.
// Valid Values: TEXT | IMAGE | EMBEDDING
// byProvider (p. 38)
// A Bedrock model provider.
// Pattern: ^[a-z0-9-]{1,63}$
// Request Body
// The request does not have a request body
// Run inference on a text model
// Send an invoke request to run inference on a Titan Text G1 - Express model. We set the accept
// parameter to accept any content type in the response.
// POST https://bedrock.us-east-1.amazonaws.com/model/amazon.titan-text-express-v1/invoke
// -H accept: */*
// -H content-type: application/json
// Payload
// {"inputText": "Hello world"}
// Example response
// Response for the above request.
// -H content-type: application/json
// Payload
// <the model response>
const AMZ_REGION = "us-east-1";
const AMZ_HOST = "invoke-bedrock.us-east-1.amazonaws.com";
async function listModels() {
const httpRequest = new HttpRequest({
method: "GET",
protocol: "https:",
hostname: AMZ_HOST,
path: "/foundation-models",
headers: { ["Host"]: AMZ_HOST },
});
const signedRequest = await signRequest(httpRequest);
const response = await axios.get(
`https://${signedRequest.hostname}${signedRequest.path}`,
{ headers: signedRequest.headers }
);
console.log(response.data);
}
async function invokeModel() {
const model = "anthropic.claude-v1";
const httpRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: AMZ_HOST,
path: `/model/${model}/invoke`,
headers: {
["Host"]: AMZ_HOST,
["accept"]: "*/*",
["content-type"]: "application/json",
},
body: JSON.stringify({
temperature: 0.5,
prompt: "\n\nHuman:Hello world\n\nAssistant:",
max_tokens_to_sample: 10,
}),
});
console.log("httpRequest", httpRequest);
const signedRequest = await signRequest(httpRequest);
const response = await axios.post(
`https://${signedRequest.hostname}${signedRequest.path}`,
signedRequest.body,
{ headers: signedRequest.headers }
);
console.log(response.status);
console.log(response.headers);
console.log(response.data);
console.log("full url", response.request.res.responseUrl);
}
async function signRequest(request: HttpRequest) {
const signer = new SignatureV4({
sha256: Sha256,
credentials: {
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY,
},
region: AMZ_REGION,
service: "bedrock",
});
return await signer.sign(request, { signingDate: new Date() });
}
// listModels();
// invokeModel();
-53
View File
@@ -1,53 +0,0 @@
const axios = require("axios");
function randomInteger(max) {
return Math.floor(Math.random() * max + 1);
}
async function testQueue() {
const requests = Array(10).fill(undefined).map(async function() {
const maxTokens = randomInteger(2000);
const headers = {
"Authorization": "Bearer test",
"Content-Type": "application/json",
"X-Forwarded-For": `${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}.${randomInteger(255)}`,
};
const payload = {
model: "gpt-4o-mini-2024-07-18",
max_tokens: 20 + maxTokens,
stream: false,
messages: [{role: "user", content: "You are being benchmarked regarding your reliability at outputting exact, machine-comprehensible data. Output the sentence \"The quick brown fox jumps over the lazy dog.\" Do not precede it with quotemarks or any form of preamble, and do not output anything after the sentence."}],
temperature: 0,
};
try {
const response = await axios.post(
"http://localhost:7860/proxy/openai/v1/chat/completions",
payload,
{ headers }
);
if (response.status !== 200) {
console.error(`Request {$maxTokens} finished with status code ${response.status} and response`, response.data);
return;
}
const content = response.data.choices[0].message.content;
console.log(
`Request ${maxTokens} `,
content === "The quick brown fox jumps over the lazy dog." ? "OK" : `mangled: ${content}`
);
} catch (error) {
const msg = error.response;
console.error(`Error in req ${maxTokens}:`, error.message, msg || "");
}
});
await Promise.all(requests);
console.log("All requests finished");
}
testQueue();
-38
View File
@@ -13,7 +13,6 @@ import { eventsApiRouter } from "./api/events";
import { usersApiRouter } from "./api/users";
import { usersWebRouter as webRouter } from "./web/manage";
import { logger } from "../logger";
import { keyPool } from "../shared/key-management";
const adminRouter = Router();
@@ -37,43 +36,6 @@ adminRouter.use(injectCsrfToken);
adminRouter.use("/users", authorize({ via: "header" }), usersApiRouter);
adminRouter.use("/events", authorize({ via: "header" }), eventsApiRouter);
// Special endpoint to validate organization verification status for all OpenAI keys
// This checks both gpt-image-1 and o3 streaming access which require verified organizations
adminRouter.post("/validate-gpt-image-keys", authorize({ via: "header" }), async (req, res) => {
try {
logger.info("Manual validation of organization verification status initiated");
// Use the specialized validation function that tests each key's organization verification
// status using o3 streaming and waits for the results
const results = await keyPool.validateGptImageAccess();
logger.info({
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length
}, "Manual organization verification check completed");
return res.json({
success: true,
message: "Organization verification check completed",
results: {
total: results.total,
verified: results.verified.length,
removed: results.removed.length,
errors: results.errors.length,
// Only include hashes, not full keys
verified_keys: results.verified,
removed_keys: results.removed,
error_details: results.errors
}
});
} catch (error) {
logger.error({ error }, "Error validating organization verification status for OpenAI keys");
return res.status(500).json({ error: "Failed to validate keys", details: error.message });
}
});
adminRouter.use(checkCsrfToken);
adminRouter.use(injectLocals);
adminRouter.use("/", loginRouter);
+23 -95
View File
@@ -17,7 +17,7 @@ import {
} from "../../shared/users/schema";
import { getLastNImages } from "../../shared/file-storage/image-history";
import { blacklists, parseCidrs, whitelists } from "../../shared/cidr";
import { invalidatePowChallenges } from "../../user/web/pow-captcha";
import { invalidatePowHmacKey } from "../../user/web/pow-captcha";
const router = Router();
@@ -132,11 +132,10 @@ router.post("/create-user", (req, res) => {
)
.transform((data: any) => {
const expiresAt = Date.now() + data.temporaryUserDuration * 60 * 1000;
const tokenLimits = MODEL_FAMILIES.reduce((limits, modelFamily) => {
const quotaValue = data[`temporaryUserQuota_${modelFamily}`];
limits[modelFamily] = typeof quotaValue === 'number' ? quotaValue : 0;
const tokenLimits = MODEL_FAMILIES.reduce((limits, model) => {
limits[model] = data[`temporaryUserQuota_${model}`];
return limits;
}, {} as any);
}, {} as UserTokenCounts);
return { ...data, expiresAt, tokenLimits };
});
@@ -190,70 +189,7 @@ router.post("/import-users", upload.single("users"), (req, res) => {
if (!req.file) throw new HttpError(400, "No file uploaded");
const data = JSON.parse(req.file.buffer.toString());
// Transform old token count format to new format
const transformedUsers = data.users.map((user: any) => {
if (user.tokenCounts) {
const transformedTokenCounts: any = {};
for (const [family, value] of Object.entries(user.tokenCounts)) {
if (typeof value === 'number') {
// Old format: just a number (legacy_total)
transformedTokenCounts[family] = {
input: 0,
output: 0,
legacy_total: value
};
} else if (typeof value === 'object' && value !== null) {
// New format or partially new format
const transformedCounts: { input: number; output: number; legacy_total?: number } = {
input: (value as any).input || 0,
output: (value as any).output || 0
};
if ((value as any).legacy_total !== undefined) {
transformedCounts.legacy_total = (value as any).legacy_total;
}
transformedTokenCounts[family] = transformedCounts;
}
}
user.tokenCounts = transformedTokenCounts;
}
// Handle tokenLimits - should be flat numbers
if (user.tokenLimits) {
const transformedTokenLimits: any = {};
for (const [family, value] of Object.entries(user.tokenLimits)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenLimits[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenLimits[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenLimits = transformedTokenLimits;
}
// Handle tokenRefresh - should be flat numbers
if (user.tokenRefresh) {
const transformedTokenRefresh: any = {};
for (const [family, value] of Object.entries(user.tokenRefresh)) {
if (typeof value === 'number') {
// Already in correct format
transformedTokenRefresh[family] = value;
} else if (typeof value === 'object' && value !== null) {
// Old format with input/output/legacy_total - sum them up
const val = value as any;
transformedTokenRefresh[family] = (val.input ?? 0) + (val.output ?? 0) + (val.legacy_total ?? 0);
}
}
user.tokenRefresh = transformedTokenRefresh;
}
return user;
});
const result = z.array(UserPartialSchema).safeParse(transformedUsers);
const result = z.array(UserPartialSchema).safeParse(data.users);
if (!result.success) throw new HttpError(400, result.error.toString());
const upserts = result.data.map((user) => userStore.upsertUser(user));
@@ -332,14 +268,7 @@ router.post("/maintenance", (req, res) => {
let flash = { type: "", message: "" };
switch (action) {
case "recheck": {
const checkable: LLMService[] = [
"openai",
"anthropic",
"aws",
"gcp",
"azure",
"google-ai"
];
const checkable: LLMService[] = ["openai", "anthropic", "aws", "azure"];
checkable.forEach((s) => keyPool.recheck(s));
const keyCount = keyPool
.list()
@@ -388,7 +317,7 @@ router.post("/maintenance", (req, res) => {
user.disabledReason = "Admin forced expiration.";
userStore.upsertUser(user);
});
invalidatePowChallenges();
invalidatePowHmacKey();
flash.type = "success";
flash.message = `${temps.length} temporary users marked for expiration.`;
break;
@@ -409,20 +338,24 @@ router.post("/maintenance", (req, res) => {
case "setDifficulty": {
const selected = req.body["pow-difficulty"];
const valid = ["low", "medium", "high", "extreme"];
const isNumber = Number.isInteger(Number(selected));
if (!selected || !valid.includes(selected) && !isNumber) {
throw new HttpError(400, "Invalid difficulty " + selected);
if (!selected || !valid.includes(selected)) {
throw new HttpError(400, "Invalid difficulty" + selected);
}
config.powDifficultyLevel = isNumber ? Number(selected) : selected;
invalidatePowChallenges();
config.powDifficultyLevel = selected;
break;
}
case "generateTempIpReport": {
const tempUsers = userStore
.getUsers()
.filter((u) => u.type === "temporary");
const ipv4RangeMap = new Map<string, Set<string>>();
const ipv6RangeMap = new Map<string, Set<string>>();
const ipv4RangeMap: Map<string, Set<string>> = new Map<
string,
Set<string>
>();
const ipv6RangeMap: Map<string, Set<string>> = new Map<
string,
Set<string>
>();
tempUsers.forEach((u) => {
u.ip.forEach((ip) => {
@@ -432,14 +365,14 @@ router.post("/maintenance", (req, res) => {
const subnet =
parsed.toNormalizedString().split(".").slice(0, 3).join(".") +
".0/24";
const userSet = ipv4RangeMap.get(subnet) || new Set();
const userSet = ipv4RangeMap.get(subnet) || new Set<string>();
userSet.add(u.token);
ipv4RangeMap.set(subnet, userSet);
} else if (parsed.kind() === "ipv6") {
const subnet =
parsed.toNormalizedString().split(":").slice(0, 4).join(":") +
"::/48";
const userSet = ipv6RangeMap.get(subnet) || new Set();
const userSet = ipv6RangeMap.get(subnet) || new Set<string>();
userSet.add(u.token);
ipv6RangeMap.set(subnet, userSet);
}
@@ -611,14 +544,9 @@ router.post("/generate-stats", (req, res) => {
function getSumsForUser(user: User) {
const sums = MODEL_FAMILIES.reduce(
(s, model) => {
const counts = user.tokenCounts[model] ?? { input: 0, output: 0 };
// Ensure inputTokens and outputTokens are numbers, defaulting to 0 if NaN or undefined
const inputTokens = Number(counts.input) || 0;
const outputTokens = Number(counts.output) || 0;
// We could also consider legacy_total here if input and output are 0
// For now, sumTokens and sumCost will be based on current input/output.
s.sumTokens += inputTokens + outputTokens;
s.sumCost += getTokenCostUsd(model, inputTokens, outputTokens);
const tokens = user.tokenCounts[model] ?? 0;
s.sumTokens += tokens;
s.sumCost += getTokenCostUsd(model, tokens);
return s;
},
{ sumTokens: 0, sumCost: 0, prettyUsage: "" }
+9 -29
View File
@@ -38,20 +38,15 @@
<h3>Difficulty Level</h3>
<div>
<label for="difficulty">Difficulty Level:</label>
<select name="difficulty" id="difficulty" onchange="difficultyChanged(event)">
<span id="currentDifficulty">Current: <%= difficulty %></span>
<select name="difficulty" id="difficulty">
<option value="low">Low</option>
<option value="medium">Medium</option>
<option value="high">High</option>
<option value="extreme">Extreme</option>
<option value="custom">Custom</option>
</select>
<div id="custom-difficulty-container" style="display: none">
<label for="customDifficulty">Hashes required (average):</label>
<input type="number" id="customDifficulty" value="0" min="1" max="1000000000" />
</div>
<button onclick='doAction("setDifficulty")'>Update Difficulty</button>
</div>
<div><span id="currentDifficulty">Current Difficulty: <%= difficulty %></span></div>
<% } %>
<form id="maintenanceForm" action="/admin/manage/maintenance" method="post">
<input id="_csrf" type="hidden" name="_csrf" value="<%= csrfToken %>" />
@@ -68,14 +63,14 @@
<div>
<h2>IP Whitelists and Blacklists</h2>
<p>
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Entries can be specified as single
addresses or
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
supported but not recommended for use with the current version of the proxy.
You can specify IP ranges to whitelist or blacklist from accessing the proxy. Note that changes here are not
persisted across server restarts. If you want to make changes permanent, you can copy the values to your deployment
configuration.
</p>
<p>
<strong>Note:</strong> Changes here are not persisted across server restarts. If you want to make changes permanent,
you can copy the values to your deployment configuration.
Entries can be specified as single addresses or
<a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation">CIDR notation</a>. IPv6 is
supported but not recommended for use with the current version of the proxy.
</p>
<% for (let i = 0; i < whitelists.length; i++) { %>
<%- include("partials/admin-cidr-widget", { list: whitelists[i] }) %>
@@ -104,25 +99,10 @@
</div>
<script>
function difficultyChanged(event) {
const value = event.target.value;
if (value === "custom") {
document.getElementById("custom-difficulty-container").style.display = "block";
} else {
document.getElementById("custom-difficulty-container").style.display = "none";
}
}
function doAction(action) {
document.getElementById("hiddenAction").value = action;
if (action === "setDifficulty") {
const selected = document.getElementById("difficulty").value;
const hiddenDifficulty = document.getElementById("hiddenDifficulty");
if (selected === "custom") {
hiddenDifficulty.value = document.getElementById("customDifficulty").value;
} else {
hiddenDifficulty.value = selected;
}
document.getElementById("hiddenDifficulty").value = document.getElementById("difficulty").value;
}
document.getElementById("maintenanceForm").submit();
}
+5 -11
View File
@@ -18,19 +18,13 @@
</li>
<li>
<code>tokenCounts</code> (optional): the number of tokens the user has
consumed. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing an object with
<code>input</code> and <code>output</code> token counts.
consumed. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
</li>
<li>
<code>tokenLimits</code> (optional): the maximum number of tokens the user can
consume. This should be an object with model family keys (e.g. <code>turbo</code>,
<code>gpt4</code>, <code>claude</code>), each containing a single number
representing the total token quota.
</li>
<li>
<code>tokenRefresh</code> (optional): the amount of tokens to refresh when quotas
are reset. Same format as <code>tokenLimits</code>.
<code>tokenLimits</code> (optional): the number of tokens the user can
consume. This should be an object with keys <code>turbo</code>,
<code>gpt4</code>, and <code>claude</code>.
</li>
<li>
<code>createdAt</code> (optional): the timestamp when the user was created
+1 -1
View File
@@ -43,7 +43,7 @@
<legend>Bulk Quota Management</legend>
<p>
<button id="refresh-quotas" type="button" onclick="submitForm('resetQuotas')">Refresh All Quotas</button>
Immediately refreshes all users' quotas by the configured amounts.
Resets all users' quotas to the values set in the <code>TOKEN_QUOTA_*</code> environment variables.
</p>
<p>
<button id="clear-token-counts" type="button" onclick="submitForm('resetCounts')">Clear All Token Counts</button>
+9 -18
View File
@@ -101,10 +101,6 @@
<% ["nickname", "type", "disabledAt", "disabledReason", "maxIps", "adminNote"].forEach(function (key) { %>
<input type="hidden" name="<%- key %>" value="<%- user[key] %>" />
<% }); %>
<!-- tokenRefresh_ keys are dynamically generated -->
<% Object.entries(quota).forEach(([family]) => { %>
<input type="hidden" name="tokenRefresh_<%- family %>" value="<%- user.tokenRefresh[family] || quota[family] %>" />
<% }); %>
</form>
<h3>Quota Information</h3>
@@ -115,7 +111,7 @@
<button type="submit" class="btn btn-primary">Refresh Quotas for User</button>
</form>
<% } %>
<%- include("partials/shared_quota-info", { quota, user, showRefreshEdit: true }) %>
<%- include("partials/shared_quota-info", { quota, user }) %>
<p><a href="/admin/manage/list-users">Back to User List</a></p>
@@ -126,25 +122,18 @@
const token = a.dataset.token;
const field = a.dataset.field;
const existingValue = document.querySelector(`#current-values input[name=${field}]`).value;
let value = prompt(`Enter new value for '${field}':`, existingValue);
let value = prompt(`Enter new value for '${field}'':`, existingValue);
if (value !== null) {
if (value === "") {
value = null;
}
const payload = { _csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content") };
if (field.startsWith("tokenRefresh_")) {
const family = field.slice("tokenRefresh_".length);
payload.tokenRefresh = { [family]: Number(value) };
} else {
payload[field] = value;
}
fetch(`/admin/manage/edit-user/${token}`, {
method: "POST",
credentials: "same-origin",
body: JSON.stringify(payload),
body: JSON.stringify({
[field]: value,
_csrf: document.querySelector("meta[name=csrf-token]").getAttribute("content"),
}),
headers: { "Content-Type": "application/json", Accept: "application/json" },
})
.then((res) => Promise.all([res.ok, res.json()]))
@@ -152,7 +141,9 @@
const url = new URL(window.location.href);
const params = new URLSearchParams();
if (!ok) {
alert(`Failed to edit user: ${json.message}`);
params.set("flash", `error: ${json.error.message}`);
} else {
params.set("flash", `success: User's ${field} updated.`);
}
url.search = params.toString();
window.location.assign(url);
+65 -178
View File
@@ -29,40 +29,10 @@ type Config = {
* same but the APIs are different. Vertex is the GCP product for enterprise.
**/
googleAIKey?: string;
/**
* Comma-delimited list of Google AI experimental model names that are
* allowed to bypass the experimental model block. By default, all models
* containing "exp" are blocked, but specific models listed here will be
* permitted.
*
* @example "gemini-2.0-flash-exp,gemini-exp-1206"
*/
allowedExpModels?: string;
/**
* Comma-delimited list of Mistral AI API keys.
*/
mistralAIKey?: string;
/**
* Comma-delimited list of Deepseek API keys.
*/
deepseekKey?: string;
/**
* Comma-delimited list of Xai (Grok) API keys.
*/
xaiKey?: string;
/**
* Comma-delimited list of Cohere API keys.
*/
cohereKey?: string;
/**
* Comma-delimited list of Qwen API keys.
*/
qwenKey?: string;
/**
* Comma-delimited list of Moonshot API keys.
*/
moonshotKey?: string;
/**
* Comma-delimited list of AWS credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and AWS region.
@@ -75,13 +45,6 @@ type Config = {
* @example `AWS_CREDENTIALS=access_key_1:secret_key_1:us-east-1,access_key_2:secret_key_2:us-west-2`
*/
awsCredentials?: string;
/**
* Comma-delimited list of GCP credentials. Each credential item should be a
* colon-delimited list of access key, secret key, and GCP region.
*
* @example `GCP_CREDENTIALS=project1:1@1.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----,project2:2@2.com:us-east5:-----BEGIN PRIVATE KEY-----xxx-----END PRIVATE KEY-----`
*/
gcpCredentials?: string;
/**
* Comma-delimited list of Azure OpenAI credentials. Each credential item
* should be a colon-delimited list of Azure resource name, deployment ID, and
@@ -103,6 +66,11 @@ type Config = {
* management mode is set to 'user_token'.
*/
adminKey?: string;
/**
* The password required to view the service info/status page. If not set, the
* info page will be publicly accessible.
*/
serviceInfoPassword?: string;
/**
* Which user management mode to use.
* - `none`: No user management. Proxy is open to all requests with basic
@@ -119,14 +87,10 @@ type Config = {
* - `memory`: Users are stored in memory and are lost on restart (default)
* - `firebase_rtdb`: Users are stored in a Firebase Realtime Database;
* requires `firebaseKey` and `firebaseRtdbUrl` to be set.
* - `sqlite`: Users are stored in an SQLite database; requires
* `sqliteUserStorePath` to be set.
*/
gatekeeperStore: "memory" | "firebase_rtdb" | "sqlite";
gatekeeperStore: "memory" | "firebase_rtdb";
/** URL of the Firebase Realtime Database if using the Firebase RTDB store. */
firebaseRtdbUrl?: string;
/** Path to the SQLite database file for storing user data. */
sqliteUserStorePath?: string;
/**
* Base64-encoded Firebase service account key if using the Firebase RTDB
* store. Note that you should encode the *entire* JSON key file, not just the
@@ -385,7 +349,7 @@ type Config = {
*
* Defaults to no services, meaning image prompts are disabled. Use a comma-
* separated list. Available services are:
* openai,anthropic,google-ai,mistral-ai,aws,gcp,azure,xai
* openai,anthropic,google-ai,mistral-ai,aws,azure
*/
allowedVisionServices: LLMService[];
/**
@@ -407,51 +371,6 @@ type Config = {
* Takes precedence over the adminWhitelist.
*/
ipBlacklist: string[];
/**
* If set, pushes requests further back into the queue according to their
* token costs by factor*tokens*milliseconds (or more intuitively
* factor*thousands_of_tokens*seconds).
* Accepts floats.
*/
tokensPunishmentFactor: number;
/**
* Configuration for HTTP requests made by the proxy to other servers, such
* as when checking keys or forwarding users' requests to external services.
* If not set, all requests will be made using the default agent.
*
* If set, the proxy may make requests to other servers using the specified
* settings. This is useful if you wish to route users' requests through
* another proxy or VPN, or if you have multiple network interfaces and want
* to use a specific one for outgoing requests.
*/
httpAgent?: {
/**
* The name of the network interface to use. The first external IPv4 address
* belonging to this interface will be used for outgoing requests.
*/
interface?: string;
/**
* The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and
* HTTPS. If not set, the proxy will be made using the default agent.
* - SOCKS4: `socks4://some-socks-proxy.com:9050`
* - SOCKS5: `socks5://username:password@some-socks-proxy.com:9050`
* - HTTP: `http://proxy-server-over-tcp.com:3128`
* - HTTPS: `https://proxy-server-over-tls.com:3129`
*
* **Note:** If your proxy server issues a certificate, you may need to set
* `NODE_EXTRA_CA_CERTS` to the path to your certificate, otherwise this
* application will reject TLS connections.
*/
proxyUrl?: string;
};
/** URL for the image on the login page. Defaults to empty string (no image). */
loginImageUrl?: string;
/** Whether to enable the token-based login page for the service info page. Defaults to true. */
enableInfoPageLogin?: boolean;
/** Authentication mode for the service info page. (token | password) */
serviceInfoAuthMode: "token" | "password";
/** Password for the service info page if serviceInfoAuthMode is 'password'. */
serviceInfoPassword?: string;
};
// To change configs, create a file called .env in the root directory.
@@ -461,19 +380,13 @@ export const config: Config = {
bindAddress: getEnvWithDefault("BIND_ADDRESS", "0.0.0.0"),
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
anthropicKey: getEnvWithDefault("ANTHROPIC_KEY", ""),
qwenKey: getEnvWithDefault("QWEN_KEY", ""),
googleAIKey: getEnvWithDefault("GOOGLE_AI_KEY", ""),
allowedExpModels: getEnvWithDefault("ALLOWED_EXP_MODELS", ""),
mistralAIKey: getEnvWithDefault("MISTRAL_AI_KEY", ""),
deepseekKey: getEnvWithDefault("DEEPSEEK_KEY", ""),
xaiKey: getEnvWithDefault("XAI_KEY", ""),
cohereKey: getEnvWithDefault("COHERE_KEY", ""),
moonshotKey: getEnvWithDefault("MOONSHOT_KEY", ""),
awsCredentials: getEnvWithDefault("AWS_CREDENTIALS", ""),
gcpCredentials: getEnvWithDefault("GCP_CREDENTIALS", ""),
azureCredentials: getEnvWithDefault("AZURE_CREDENTIALS", ""),
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
adminKey: getEnvWithDefault("ADMIN_KEY", ""),
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", ""),
sqliteDataPath: getEnvWithDefault(
"SQLITE_DATA_PATH",
path.join(DATA_DIR, "database.sqlite")
@@ -481,11 +394,7 @@ export const config: Config = {
eventLogging: getEnvWithDefault("EVENT_LOGGING", false),
eventLoggingTrim: getEnvWithDefault("EVENT_LOGGING_TRIM", 5),
gatekeeper: getEnvWithDefault("GATEKEEPER", "none"),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory") as Config["gatekeeperStore"],
sqliteUserStorePath: getEnvWithDefault(
"SQLITE_USER_STORE_PATH",
path.join(DATA_DIR, "user-store.sqlite")
),
gatekeeperStore: getEnvWithDefault("GATEKEEPER_STORE", "memory"),
maxIpsPerUser: getEnvWithDefault("MAX_IPS_PER_USER", 0),
maxIpsAutoBan: getEnvWithDefault("MAX_IPS_AUTO_BAN", false),
captchaMode: getEnvWithDefault("CAPTCHA_MODE", "none"),
@@ -498,23 +407,40 @@ export const config: Config = {
firebaseKey: getEnvWithDefault("FIREBASE_KEY", undefined),
textModelRateLimit: getEnvWithDefault("TEXT_MODEL_RATE_LIMIT", 4),
imageModelRateLimit: getEnvWithDefault("IMAGE_MODEL_RATE_LIMIT", 4),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 32768),
maxContextTokensOpenAI: getEnvWithDefault("MAX_CONTEXT_TOKENS_OPENAI", 16384),
maxContextTokensAnthropic: getEnvWithDefault(
"MAX_CONTEXT_TOKENS_ANTHROPIC",
32768
0
),
maxOutputTokensOpenAI: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_OPENAI", "MAX_OUTPUT_TOKENS"],
1024
400
),
maxOutputTokensAnthropic: getEnvWithDefault(
["MAX_OUTPUT_TOKENS_ANTHROPIC", "MAX_OUTPUT_TOKENS"],
1024
),
allowedModelFamilies: getEnvWithDefault(
"ALLOWED_MODEL_FAMILIES",
getDefaultModelFamilies()
400
),
allowedModelFamilies: getEnvWithDefault("ALLOWED_MODEL_FAMILIES", [
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"gpt4o",
"claude",
"claude-opus",
"gemini-pro",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"aws-claude",
"aws-claude-opus",
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"azure-gpt4o",
]),
rejectPhrases: parseCsv(getEnvWithDefault("REJECT_PHRASES", "")),
rejectMessage: getEnvWithDefault(
"REJECT_MESSAGE",
@@ -566,15 +492,6 @@ export const config: Config = {
getEnvWithDefault("ADMIN_WHITELIST", "0.0.0.0/0,::/0")
),
ipBlacklist: parseCsv(getEnvWithDefault("IP_BLACKLIST", "")),
tokensPunishmentFactor: getEnvWithDefault("TOKENS_PUNISHMENT_FACTOR", 0.0),
httpAgent: {
interface: getEnvWithDefault("HTTP_AGENT_INTERFACE", undefined),
proxyUrl: getEnvWithDefault("HTTP_AGENT_PROXY_URL", undefined),
},
loginImageUrl: getEnvWithDefault("LOGIN_IMAGE_URL", ""),
enableInfoPageLogin: getEnvWithDefault("ENABLE_INFO_PAGE_LOGIN", true),
serviceInfoAuthMode: getEnvWithDefault("SERVICE_INFO_AUTH_MODE", "token") as Config["serviceInfoAuthMode"],
serviceInfoPassword: getEnvWithDefault("SERVICE_INFO_PASSWORD", undefined),
} as const;
function generateSigningKey() {
@@ -591,10 +508,7 @@ function generateSigningKey() {
config.anthropicKey,
config.googleAIKey,
config.mistralAIKey,
config.deepseekKey,
config.xaiKey,
config.awsCredentials,
config.gcpCredentials,
config.azureCredentials,
];
if (secrets.filter((s) => s).length === 0) {
@@ -613,7 +527,7 @@ function generateSigningKey() {
}
const signingKey = generateSigningKey();
export const SECRET_SIGNING_KEY = signingKey;
export const COOKIE_SECRET = signingKey;
export async function assertConfigIsValid() {
if (process.env.MODEL_RATE_LIMIT !== undefined) {
@@ -696,41 +610,6 @@ export async function assertConfigIsValid() {
);
}
if (config.gatekeeperStore === "sqlite" && !config.sqliteUserStorePath) {
throw new Error(
"SQLite user store requires `SQLITE_USER_STORE_PATH` to be set."
);
}
if (Object.values(config.httpAgent || {}).filter(Boolean).length === 0) {
delete config.httpAgent;
} else if (config.httpAgent) {
if (config.httpAgent.interface && config.httpAgent.proxyUrl) {
throw new Error(
"Cannot set both `HTTP_AGENT_INTERFACE` and `HTTP_AGENT_PROXY_URL`."
);
}
}
if (config.enableInfoPageLogin) {
if (!["token", "password"].includes(config.serviceInfoAuthMode)) {
throw new Error(
`Invalid SERVICE_INFO_AUTH_MODE: ${config.serviceInfoAuthMode}. Must be 'token' or 'password'.`
);
}
if (config.serviceInfoAuthMode === "password" && !config.serviceInfoPassword) {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'password' but SERVICE_INFO_PASSWORD is not set."
);
}
// If service info login is token-based, gatekeeper must be 'user_token' mode for getUser() to be effective.
if (config.serviceInfoAuthMode === "token" && config.gatekeeper !== "user_token") {
throw new Error(
"SERVICE_INFO_AUTH_MODE is 'token' for info page login, but GATEKEEPER is not 'user_token'. User token authentication will not work."
);
}
}
// Ensure forks which add new secret-like config keys don't unwittingly expose
// them to users.
for (const key of getKeys(config)) {
@@ -744,16 +623,15 @@ export async function assertConfigIsValid() {
`Config key "${key}" may be sensitive but is exposed. Add it to SENSITIVE_KEYS or OMITTED_KEYS.`
);
}
await maybeInitializeFirebase();
}
/**
* Config keys that are masked on the info page, but not hidden as their
* presence may be relevant to the user due to privacy implications.
*/
export const SENSITIVE_KEYS: (keyof Config)[] = [
"googleSheetsSpreadsheetId",
"httpAgent",
];
export const SENSITIVE_KEYS: (keyof Config)[] = ["googleSheetsSpreadsheetId"];
/**
* Config keys that are not displayed on the info page at all, generally because
@@ -766,17 +644,12 @@ export const OMITTED_KEYS = [
"openaiKey",
"anthropicKey",
"googleAIKey",
"deepseekKey",
"xaiKey",
"cohereKey",
"qwenKey",
"moonshotKey",
"mistralAIKey",
"awsCredentials",
"gcpCredentials",
"azureCredentials",
"proxyKey",
"adminKey",
"serviceInfoPassword",
"rejectPhrases",
"rejectMessage",
"showTokenCosts",
@@ -785,7 +658,6 @@ export const OMITTED_KEYS = [
"firebaseKey",
"firebaseRtdbUrl",
"sqliteDataPath",
"sqliteUserStorePath",
"eventLogging",
"eventLoggingTrim",
"gatekeeperStore",
@@ -804,9 +676,6 @@ export const OMITTED_KEYS = [
"adminWhitelist",
"ipBlacklist",
"powTokenPurgeHours",
"loginImageUrl",
"enableInfoPageLogin",
"serviceInfoPassword",
] satisfies (keyof Config)[];
type OmitKeys = (typeof OMITTED_KEYS)[number];
@@ -867,9 +736,7 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
"ANTHROPIC_KEY",
"GOOGLE_AI_KEY",
"AWS_CREDENTIALS",
"GCP_CREDENTIALS",
"AZURE_CREDENTIALS",
"QWEN_KEY",
].includes(String(env))
) {
return value as unknown as T;
@@ -886,6 +753,32 @@ function getEnvWithDefault<T>(env: string | string[], defaultValue: T): T {
}
}
let firebaseApp: firebase.app.App | undefined;
async function maybeInitializeFirebase() {
if (!config.gatekeeperStore.startsWith("firebase")) {
return;
}
const firebase = await import("firebase-admin");
const firebaseKey = Buffer.from(config.firebaseKey!, "base64").toString();
const app = firebase.initializeApp({
credential: firebase.credential.cert(JSON.parse(firebaseKey)),
databaseURL: config.firebaseRtdbUrl,
});
await app.database().ref("connection-test").set(Date.now());
firebaseApp = app;
}
export function getFirebaseApp(): firebase.app.App {
if (!firebaseApp) {
throw new Error("Firebase app not initialized.");
}
return firebaseApp;
}
function parseCsv(val: string): string[] {
if (!val) return [];
@@ -893,9 +786,3 @@ function parseCsv(val: string): string[] {
const matches = val.match(regex) || [];
return matches.map((item) => item.replace(/^"|"$/g, "").trim());
}
function getDefaultModelFamilies(): ModelFamily[] {
return MODEL_FAMILIES.filter(
(f) => !f.includes("o1-pro") && !f.includes("o3-pro")
) as ModelFamily[];
}
+129 -253
View File
@@ -1,8 +1,4 @@
/* ──────────────────────────────────────────────────────────────
Login-gated info page
drop-in replacement for src/info-page.ts
──────────────────────────────────────────────────────────── */
/** This whole module kinda sucks */
import fs from "fs";
import express, { Router, Request, Response } from "express";
import showdown from "showdown";
@@ -12,166 +8,41 @@ import { getLastNImages } from "./shared/file-storage/image-history";
import { keyPool } from "./shared/key-management";
import { MODEL_FAMILY_SERVICE, ModelFamily } from "./shared/models";
import { withSession } from "./shared/with-session";
import { injectCsrfToken, checkCsrfToken } from "./shared/inject-csrf";
import { getUser } from "./shared/users/user-store";
/* ──────────────── TYPES: extend express-session ──────────── */
declare module "express-session" {
interface Session {
infoPageAuthed?: boolean;
}
}
/* ──────────────── misc constants ─────────────────────────── */
const INFO_PAGE_TTL = 2_000; // ms
const LOGIN_ROUTE = "/";
import { checkCsrfToken, injectCsrfToken } from "./shared/inject-csrf";
const INFO_PAGE_TTL = 2000;
const MODEL_FAMILY_FRIENDLY_NAME: { [f in ModelFamily]: string } = {
qwen: "Qwen",
cohere: "Cohere",
deepseek: "Deepseek",
xai: "Grok",
moonshot: "Moonshot",
turbo: "GPT-4o Mini / 3.5 Turbo",
turbo: "GPT-3.5 Turbo",
gpt4: "GPT-4",
"gpt4-32k": "GPT-4 32k",
"gpt4-turbo": "GPT-4 Turbo",
gpt4o: "GPT-4o",
gpt41: "GPT-4.1",
"gpt41-mini": "GPT-4.1 Mini",
"gpt41-nano": "GPT-4.1 Nano",
gpt5: "GPT-5",
"gpt5-mini": "GPT-5 Mini",
"gpt5-nano": "GPT-5 Nano",
"gpt5-chat-latest": "GPT-5 Chat Latest",
gpt45: "GPT-4.5",
o1: "OpenAI o1",
"o1-mini": "OpenAI o1 mini",
"o1-pro": "OpenAI o1 pro",
"o3-pro": "OpenAI o3 pro",
"o3-mini": "OpenAI o3 mini",
"o3": "OpenAI o3",
"o4-mini": "OpenAI o4 mini",
"codex-mini": "OpenAI Codex Mini",
"dall-e": "DALL-E",
"gpt-image": "GPT Image",
claude: "Claude (Sonnet)",
"claude-opus": "Claude (Opus)",
"gemini-flash": "Gemini Flash",
"gemini-pro": "Gemini Pro",
"gemini-ultra": "Gemini Ultra",
"mistral-tiny": "Mistral 7B",
"mistral-small": "Mistral Nemo",
"mistral-small": "Mixtral Small", // Originally 8x7B, but that now refers to the older open-weight version. Mixtral Small is a newer closed-weight update to the 8x7B model.
"mistral-medium": "Mistral Medium",
"mistral-large": "Mistral Large",
"aws-claude": "AWS Claude (Sonnet)",
"aws-claude-opus": "AWS Claude (Opus)",
"aws-mistral-tiny": "AWS Mistral 7B",
"aws-mistral-small": "AWS Mistral Nemo",
"aws-mistral-medium": "AWS Mistral Medium",
"aws-mistral-large": "AWS Mistral Large",
"gcp-claude": "GCP Claude (Sonnet)",
"gcp-claude-opus": "GCP Claude (Opus)",
"azure-turbo": "Azure GPT-3.5 Turbo",
"azure-gpt4": "Azure GPT-4",
"azure-gpt4-32k": "Azure GPT-4 32k",
"azure-gpt4-turbo": "Azure GPT-4 Turbo",
"azure-gpt4o": "Azure GPT-4o",
"azure-gpt45": "Azure GPT-4.5",
"azure-gpt41": "Azure GPT-4.1",
"azure-gpt41-mini": "Azure GPT-4.1 Mini",
"azure-gpt41-nano": "Azure GPT-4.1 Nano",
"azure-gpt5": "Azure GPT-5",
"azure-gpt5-mini": "Azure GPT-5 Mini",
"azure-gpt5-nano": "Azure GPT-5 Nano",
"azure-gpt5-chat-latest": "Azure GPT-5 Chat Latest",
"azure-o1": "Azure o1",
"azure-o1-mini": "Azure o1 mini",
"azure-o1-pro": "Azure o1 pro",
"azure-o3-pro": "Azure o3 pro",
"azure-o3-mini": "Azure o3 mini",
"azure-o3": "Azure o3",
"azure-o4-mini": "Azure o4 mini",
"azure-codex-mini": "Azure Codex Mini",
"azure-dall-e": "Azure DALL-E",
"azure-gpt-image": "Azure GPT Image",
};
const converter = new showdown.Converter();
/* optional markdown greeting */
const customGreeting = fs.existsSync("greeting.md")
? `<div id="servergreeting">${fs.readFileSync("greeting.md", "utf8")}</div>`
? `\n## Server Greeting\n${fs.readFileSync("greeting.md", "utf8")}`
: "";
/* ──────────────── Login page ──────────────────────── */
function renderLoginPage(csrf: string, error?: string) {
const errBlock = error
? `<div class="error-message">${escapeHtml(error)}</div>`
: "";
const pageTitle = getServerTitle();
return `<!DOCTYPE html>
<html>
<head>
<title>${pageTitle} Login</title>
<style>
body{font-family:Arial, sans-serif;display:flex;justify-content:center;
align-items:center;height:100vh;margin:0;padding:20px;background:#f5f5f5;}
.login-container{background:#fff;border-radius:8px;box-shadow:0 4px 8px rgba(0,0,0,.1);
padding:30px;width:100%;max-width:400px;text-align:center;}
.logo-image{max-width:200px;margin-bottom:20px;}
.form-group{margin-bottom:20px;}
input[type=text], input[type=password]{width:100%;padding:10px;border:1px solid #ddd;border-radius:4px;
box-sizing:border-box;font-size:16px;}
button{background:#4caf50;color:#fff;border:none;padding:12px 20px;border-radius:4px;
cursor:pointer;font-size:16px;width:100%;}
button:hover{background:#45a049;}
.error-message{color:#f44336;margin-bottom:15px;}
@media (prefers-color-scheme: dark) {
body { background: #2c2c2c; color: #e0e0e0; }
.login-container { background: #383838; box-shadow: 0 4px 12px rgba(0,0,0,0.4); border: 1px solid #4a4a4a; }
input[type=text], input[type=password] { background: #4a4a4a; color: #e0e0e0; border: 1px solid #5a5a5a; }
input[type=text]::placeholder, input[type=password]::placeholder { color: #999; }
button { background: #007bff; } /* Using a blue for dark mode button */
button:hover { background: #0056b3; }
.error-message { color: #ff8a80; } /* Lighter red for errors in dark mode */
}
</style>
</head>
<body>
<div class="login-container">
${config.loginImageUrl ? `<img src="${config.loginImageUrl}" alt="Logo" class="logo-image">` : ''}
${errBlock}
<form method="POST" action="${LOGIN_ROUTE}">
<div class="form-group">
${config.serviceInfoAuthMode === "password"
? `<input type="password" id="password" name="password" required placeholder="Service Password">`
: `<input type="text" id="token" name="token" required placeholder="Your token">`}
<input type="hidden" name="_csrf" value="${csrf}">
</div>
<button type="submit">Access Dashboard</button>
</form>
</div>
</body>
</html>`;
}
/* ──────────────── login-required middleware ──────────────── */
function requireLogin(
req: Request,
res: Response,
next: express.NextFunction
) {
if (req.session?.infoPageAuthed) return next();
return res.send(renderLoginPage(res.locals.csrfToken));
}
/* ──────────────── INFO PAGE CACHING ──────────────────────── */
let infoPageHtml: string | undefined;
let infoPageLastUpdated = 0;
export function handleInfoPage(req: Request, res: Response) {
export const handleInfoPage = (req: Request, res: Response) => {
if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) {
return res.send(infoPageHtml);
}
@@ -186,46 +57,60 @@ export function handleInfoPage(req: Request, res: Response) {
infoPageLastUpdated = Date.now();
res.send(infoPageHtml);
}
};
/* ──────────────── RENDER FULL INFO PAGE ──────────────────── */
export function renderPage(info: ServiceInfo) {
const title = getServerTitle();
const headerHtml = buildInfoPageHeader(info);
return `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" />
<link rel="stylesheet" href="/res/css/sakura.css" />
<link rel="stylesheet" href="/res/css/sakura-dark.css"
media="screen and (prefers-color-scheme: dark)" />
<style>
body{font-family:sans-serif;padding:1em;max-width:900px;margin:0;}
.self-service-links{display:flex;justify-content:center;margin-bottom:1em;
padding:0.5em;font-size:0.8em;}
.self-service-links a{margin:0 0.5em;}
</style>
</head>
<body>
${headerHtml}
<hr/>
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
</body>
<head>
<meta charset="utf-8" />
<meta name="robots" content="noindex" />
<title>${title}</title>
<link rel="stylesheet" href="/res/css/reset.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura.css" media="screen" />
<link rel="stylesheet" href="/res/css/sakura-dark.css" media="screen and (prefers-color-scheme: dark)" />
<style>
body {
font-family: sans-serif;
padding: 1em;
max-width: 900px;
margin: 0;
}
.self-service-links {
display: flex;
justify-content: center;
margin-bottom: 1em;
padding: 0.5em;
font-size: 0.8em;
}
.self-service-links a {
margin: 0 0.5em;
}
</style>
</head>
<body>
${headerHtml}
<hr />
${getSelfServiceLinks()}
<h2>Service Info</h2>
<pre>${JSON.stringify(info, null, 2)}</pre>
</body>
</html>`;
}
/* ──────────────── header & helper functions ──────────────── */
/* (all copied verbatim from original file) */
/**
* If the server operator provides a `greeting.md` file, it will be included in
* the rendered info page.
**/
function buildInfoPageHeader(info: ServiceInfo) {
const title = getServerTitle();
// TODO: use some templating engine instead of this mess
let infoBody = `# ${title}`;
if (config.promptLogging) {
infoBody += `\n## Prompt Logging Enabled
This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
@@ -244,9 +129,9 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
for (const modelFamily of config.allowedModelFamilies) {
const service = MODEL_FAMILY_SERVICE[modelFamily];
const hasKeys = keyPool.list().some(
(k) => k.service === service && k.modelFamilies.includes(modelFamily)
);
const hasKeys = keyPool.list().some((k) => {
return k.service === service && k.modelFamilies.includes(modelFamily);
});
const wait = info[modelFamily]?.estimatedQueueTime;
if (hasKeys && wait) {
@@ -257,7 +142,9 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
}
infoBody += "\n\n" + waits.join(" / ");
infoBody += customGreeting;
infoBody += buildRecentImageSection();
return converter.makeHtml(infoBody);
@@ -265,60 +152,63 @@ This proxy keeps full logs of all prompts and AI responses. Prompt logs are anon
function getSelfServiceLinks() {
if (config.gatekeeper !== "user_token") return "";
const links = [["Check your user token", "/user/lookup"]];
if (config.captchaMode !== "none") {
links.unshift(["Request a user token", "/user/captcha"]);
}
return `<div class="self-service-links">${links
.map(([t, l]) => `<a href="${l}">${t}</a>`)
.map(([text, link]) => `<a target="_blank" href="${link}">${text}</a>`)
.join(" | ")}</div>`;
}
function getServerTitle() {
if (process.env.SERVER_TITLE) return process.env.SERVER_TITLE;
if (process.env.SPACE_ID)
// Use manually set title if available
if (process.env.SERVER_TITLE) {
return process.env.SERVER_TITLE;
}
// Huggingface
if (process.env.SPACE_ID) {
return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
if (process.env.RENDER)
}
// Render
if (process.env.RENDER) {
return `Render / ${process.env.RENDER_SERVICE_NAME}`;
return "Tunnel";
}
return "OAI Reverse Proxy";
}
function buildRecentImageSection() {
const imageModels: ModelFamily[] = [
"azure-dall-e",
"dall-e",
"gpt-image",
"azure-gpt-image",
];
// Condition 1: Is the feature enabled via config?
// Condition 2: Is at least one relevant image model family allowed in config?
const dalleModels: ModelFamily[] = ["azure-dall-e", "dall-e"];
if (
!config.showRecentImages ||
imageModels.every((f) => !config.allowedModelFamilies.includes(f))
dalleModels.every((f) => !config.allowedModelFamilies.includes(f))
) {
return ""; // Exit if feature is disabled or no relevant models are allowed
}
// Condition 3: Are there any actual images to display?
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
// If the feature is enabled and models are allowed, but no images exist,
// do not render the section, including its title.
return "";
}
// If all conditions pass (feature enabled, models allowed, images exist), build and return the HTML
let html = `<h2>Recent Image Generations</h2>`;
html += `<div style="display:flex;flex-wrap:wrap;" id="recent-images">`;
let html = `<h2>Recent DALL-E Generations</h2>`;
const recentImages = getLastNImages(12).reverse();
if (recentImages.length === 0) {
html += `<p>No images yet.</p>`;
return html;
}
html += `<div style="display: flex; flex-wrap: wrap;" id="recent-images">`;
for (const { url, prompt } of recentImages) {
const thumbUrl = url.replace(/\.png$/, "_t.jpg");
const escapedPrompt = escapeHtml(prompt);
html += `<div style="margin:0.5em" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}"
alt="${escapedPrompt}" style="max-width:150px;max-height:150px;"/></a></div>`;
html += `<div style="margin: 0.5em;" class="recent-image">
<a href="${url}" target="_blank"><img src="${thumbUrl}" title="${escapedPrompt}" alt="${escapedPrompt}" style="max-width: 150px; max-height: 150px;" /></a>
</div>`;
}
html += `</div><p style="clear:both;text-align:center;">
<a href="/user/image-history">View all recent images</a></p>`;
html += `</div>`;
html += `<p style="clear: both; text-align: center;"><a href="/user/image-history">View all recent images</a></p>`;
return html;
}
@@ -333,71 +223,57 @@ function escapeHtml(unsafe: string) {
.replace(/]/g, "&#93;");
}
function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
try {
const [u, s] = spaceId.split("/");
return `https://${u}-${s.replace(/_/g, "-")}.hf.space`;
} catch {
const [username, spacename] = spaceId.split("/");
return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
} catch (e) {
return "";
}
}
/* ──────────────── ROUTER ─────────────────────────────────── */
const infoPageRouter = Router();
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" }),
withSession,
injectCsrfToken,
checkCsrfToken
);
/* login attempt */
infoPageRouter.post(LOGIN_ROUTE, (req, res) => {
if (config.serviceInfoAuthMode === "password") {
const password = (req.body.password || "").trim();
// Simple string comparison; for production, consider a timing-safe comparison library
if (config.serviceInfoPassword && password === config.serviceInfoPassword) {
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else {
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid password. Please try again."));
}
} else {
// Token-based authentication (using any valid user token)
const token = (req.body.token || "").trim();
const user = getUser(token); // returns undefined if invalid
if (user && !user.disabledAt) {
// Only allow access if user exists AND is not disabled
req.session!.infoPageAuthed = true;
return res.redirect("/");
} else if (user && user.disabledAt) {
// User exists but is disabled
const reason = user.disabledReason || "Your account has been disabled";
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, `Access denied: ${reason}`));
} else {
// User doesn't exist
return res
.status(401)
.send(renderLoginPage(res.locals.csrfToken, "Invalid token. Please try again."));
}
function checkIfUnlocked(
req: Request,
res: Response,
next: express.NextFunction
) {
if (config.serviceInfoPassword?.length && !req.session?.unlocked) {
return res.redirect("/unlock-info");
}
});
/* GET / either login form or info page */
if (config.enableInfoPageLogin) {
infoPageRouter.get(LOGIN_ROUTE, requireLogin, handleInfoPage);
} else {
infoPageRouter.get(LOGIN_ROUTE, handleInfoPage);
next();
}
/* ─── Removed the public /status route : simply not added ─── */
const infoPageRouter = Router();
if (config.serviceInfoPassword?.length) {
infoPageRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
infoPageRouter.use(withSession);
infoPageRouter.use(injectCsrfToken, checkCsrfToken);
infoPageRouter.post("/unlock-info", (req, res) => {
if (req.body.password !== config.serviceInfoPassword) {
return res.status(403).send("Incorrect password");
}
req.session!.unlocked = true;
res.redirect("/");
});
infoPageRouter.get("/unlock-info", (_req, res) => {
if (_req.session?.unlocked) return res.redirect("/");
res.send(`
<form method="post" action="/unlock-info">
<h1>Unlock Service Info</h1>
<input type="hidden" name="_csrf" value="${res.locals.csrfToken}" />
<input type="password" name="password" placeholder="Password" />
<button type="submit">Unlock</button>
</form>
`);
});
infoPageRouter.use(checkIfUnlocked);
}
infoPageRouter.get("/", handleInfoPage);
infoPageRouter.get("/status", (req, res) => {
res.json(buildInfo(req.protocol + "://" + req.get("host"), false));
});
export { infoPageRouter };
-9
View File
@@ -1,9 +0,0 @@
import { NextFunction, Request, Response } from "express";
export function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/") && !req.path.match(/^\/(v1alpha|v1beta)\//)) {
req.url = `/v1${req.url}`;
}
next();
}
+170 -199
View File
@@ -1,16 +1,22 @@
import { Request, RequestHandler, Router } from "express";
import { Request, Response, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
addAnthropicPreamble,
createPreprocessorMiddleware,
finalizeBody,
createOnProxyReqHandler,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { claudeModels } from "../shared/claude-models";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { sendErrorToClient } from "./middleware/response/error-generator";
let modelsCache: any = null;
let modelsCacheTime = 0;
@@ -20,32 +26,40 @@ const getModelsResponse = () => {
return modelsCache;
}
if (!config.anthropicKey) return { object: "list", data: [], has_more: false, first_id: null, last_id: null };
if (!config.anthropicKey) return { object: "list", data: [] };
const date = new Date()
const models = claudeModels.map(model => ({
// Common
id: model.anthropicId,
owned_by: "anthropic",
// Anthropic
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
// OpenAI
const claudeVariants = [
"claude-v1",
"claude-v1-100k",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-v1.3",
"claude-v1.3-100k",
"claude-v1.2",
"claude-v1.0",
"claude-instant-v1.1",
"claude-instant-v1.1-100k",
"claude-instant-v1.0",
"claude-2",
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
];
const models = claudeVariants.map((id) => ({
id,
object: "model",
created: date.getTime(),
}));
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = {
// Common
object: "list",
data: models,
// Anthropic
has_more: false,
first_id: models[0]?.id,
last_id: models[models.length - 1]?.id,
};
modelsCacheTime = date.getTime();
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
@@ -54,7 +68,8 @@ const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const anthropicBlockingResponseHandler: ProxyResHandlerWithBody = async (
/** Only used for non-streaming requests. */
const anthropicResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -107,6 +122,12 @@ export function transformAnthropicChatResponseToAnthropicText(
};
}
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAnthropicTextResponseToOpenAI(
anthropicBody: Record<string, any>,
req: Request
@@ -157,187 +178,75 @@ export function transformAnthropicChatResponseToOpenAI(
};
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Sonnet.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (model.includes("claude")) return; // use whatever model the user requested
req.body.model = "claude-3-5-sonnet-latest";
}
/**
* If client requests more than 4096 output tokens the request must have a
* particular version header.
* https://docs.anthropic.com/en/release-notes/api#july-15th-2024
*
* Also adds the required beta header for 1-hour cache duration if requested.
* Also validates Claude 4.1 Opus parameters (temperature/top_p).
*/
function setAnthropicBetaHeader(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const { max_tokens_to_sample } = req.body;
// Initialize beta headers array
const betaHeaders: string[] = [];
// Add max tokens beta header if needed
if (max_tokens_to_sample > 4096) {
betaHeaders.push("max-tokens-3-5-sonnet-2024-07-15");
}
// Add extended cache TTL beta header if 1h cache is requested
if (req.body.cache_control?.ttl === "1h") {
betaHeaders.push("extended-cache-ttl-2025-04-11");
}
// Set the combined beta headers if any were added
if (betaHeaders.length > 0) {
req.headers["anthropic-beta"] = betaHeaders.join(",");
}
}
/**
* Adds web search tool for Claude-3.5 and Claude-3.7 models when enable_web_search is true
*
* Supports all optional parameters documented in the Claude API:
* - max_uses: Limit the number of searches per request
* - allowed_domains: Only include results from these domains
* - blocked_domains: Never include results from these domains
* - user_location: Localize search results
*/
function addWebSearchTool(req: Request) {
// Check if this is a Claude model that supports web search and if web search is enabled
const isClaude35 = req.body.model?.includes("claude-3-5") || req.body.model?.includes("claude-3.5");
const isClaude37 = req.body.model?.includes("claude-3-7") || req.body.model?.includes("claude-3.7");
const isClaude4 = req.body.model?.includes("claude-sonnet-4") || req.body.model?.includes("claude-opus-4");
const useWebSearch = (isClaude35 || isClaude37 || isClaude4) && Boolean(req.body.enable_web_search);
if (useWebSearch) {
// Create the base web search tool
const webSearchTool: any = {
'type': 'web_search_20250305',
'name': 'web_search',
};
// Add optional parameters if provided by the client
// max_uses: Limit the number of searches per request
if (typeof req.body.web_search_max_uses === 'number') {
webSearchTool.max_uses = req.body.web_search_max_uses;
delete req.body.web_search_max_uses;
}
// allowed_domains: Only include results from these domains
if (Array.isArray(req.body.web_search_allowed_domains)) {
webSearchTool.allowed_domains = req.body.web_search_allowed_domains;
delete req.body.web_search_allowed_domains;
}
// blocked_domains: Never include results from these domains
if (Array.isArray(req.body.web_search_blocked_domains)) {
webSearchTool.blocked_domains = req.body.web_search_blocked_domains;
delete req.body.web_search_blocked_domains;
}
// user_location: Localize search results
if (req.body.web_search_user_location) {
webSearchTool.user_location = req.body.web_search_user_location;
delete req.body.web_search_user_location;
}
// Add the web search tool to the tools array
req.body.tools = [...(req.body.tools || []), webSearchTool];
}
// Delete custom parameters as they're not standard Claude API parameters
delete req.body.enable_web_search;
delete req.body.reasoning_effort;
}
function selectUpstreamPath(manager: ProxyReqManager) {
const req = manager.request;
const pathname = req.url.split("?")[0];
req.log.debug({ pathname }, "Anthropic path filter");
const isText = req.outboundApi === "anthropic-text";
const isChat = req.outboundApi === "anthropic-chat";
if (isChat && pathname === "/v1/complete") {
manager.setPath("/v1/messages");
}
if (isText && pathname === "/v1/chat/completions") {
manager.setPath("/v1/complete");
}
if (isChat && pathname === "/v1/chat/completions") {
manager.setPath("/v1/messages");
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
manager.setPath("/v1/messages");
}
}
const anthropicProxy = createQueuedProxyMiddleware({
target: "https://api.anthropic.com",
mutations: [selectUpstreamPath, addKey, finalizeBody],
blockingResponseHandler: anthropicBlockingResponseHandler,
const anthropicProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.anthropic.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({
pipeline: [addKey, addAnthropicPreamble, finalizeBody],
}),
proxyRes: createOnProxyResHandler([anthropicResponseHandler]),
error: handleProxyError,
},
// Abusing pathFilter to rewrite the paths dynamically.
pathFilter: (pathname, req) => {
const isText = req.outboundApi === "anthropic-text";
const isChat = req.outboundApi === "anthropic-chat";
if (isChat && pathname === "/v1/complete") {
req.url = "/v1/messages";
}
if (isText && pathname === "/v1/chat/completions") {
req.url = "/v1/complete";
}
if (isChat && pathname === "/v1/chat/completions") {
req.url = "/v1/messages";
}
if (isChat && ["sonnet", "opus"].includes(req.params.type)) {
req.url = "/v1/messages";
}
return true;
},
}),
});
const nativeAnthropicChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "anthropic" },
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const nativeTextPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const textToChatPreprocessor = createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes text completion prompts to anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAnthropicTextRequest: RequestHandler = (req, res, next) => {
const model = req.body.model;
const isClaude4Model = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.startsWith("claude-3") || isClaude4Model) {
if (req.body.model?.startsWith("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToTextPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader] }
);
const oaiToTextPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-text",
service: "anthropic",
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
},
{ afterTransform: [setAnthropicBetaHeader, addWebSearchTool] }
);
const oaiToChatPreprocessor = createPreprocessorMiddleware({
inApi: "openai",
outApi: "anthropic-chat",
service: "anthropic",
});
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
@@ -345,9 +254,7 @@ const oaiToChatPreprocessor = createPreprocessorMiddleware(
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
maybeReassignModel(req);
const model = req.body.model;
const isClaude4 = model?.includes("claude-sonnet-4") || model?.includes("claude-opus-4");
if (model?.includes("claude-3") || isClaude4) {
if (req.body.model?.includes("claude-3")) {
oaiToChatPreprocessor(req, res, next);
} else {
oaiToTextPreprocessor(req, res, next);
@@ -360,7 +267,11 @@ anthropicRouter.get("/v1/models", handleModelRequest);
anthropicRouter.post(
"/v1/messages",
ipLimiter,
nativeAnthropicChatPreprocessor,
createPreprocessorMiddleware({
inApi: "anthropic-chat",
outApi: "anthropic-chat",
service: "anthropic",
}),
anthropicProxy
);
// Anthropic text completion endpoint. Translates to Anthropic chat completion
@@ -380,5 +291,65 @@ anthropicRouter.post(
preprocessOpenAICompatRequest,
anthropicProxy
);
// Temporarily force Anthropic Text to Anthropic Chat for frontends which do not
// yet support the new model. Forces claude-3. Will be removed once common
// frontends have been updated.
anthropicRouter.post(
"/v1/:type(sonnet|opus)/:action(complete|messages)",
ipLimiter,
handleAnthropicTextCompatRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "anthropic",
}),
anthropicProxy
);
function handleAnthropicTextCompatRequest(
req: Request,
res: Response,
next: any
) {
const type = req.params.type;
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = `claude-3-${type}-20240229`;
req.log.info(
{ type, inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling Anthropic compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/anthropic\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/anthropic/" + type,
correct_endpoint: "/anthropic",
},
},
});
}
req.body.model = compatModel;
next();
}
/**
* If a client using the OpenAI compatibility endpoint requests an actual OpenAI
* model, reassigns it to Claude 3 Sonnet.
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
if (!model.startsWith("gpt-")) return;
req.body.model = "claude-3-sonnet-20240229";
}
export const anthropic = anthropicRouter;
-341
View File
@@ -1,341 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { v4 } from "uuid";
import {
transformAnthropicChatResponseToAnthropicText,
transformAnthropicChatResponseToOpenAI,
} from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const awsBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
}
const awsClaudeProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest, finalizeSignedRequest],
blockingResponseHandler: awsBlockingResponseHandler,
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
oaiToAwsChatPreprocessor(req, res, next);
} else {
oaiToAwsTextPreprocessor(req, res, next);
}
};
const awsClaudeRouter = Router();
// Native(ish) Anthropic text completion endpoint.
awsClaudeRouter.post(
"/v1/complete",
ipLimiter,
preprocessAwsTextRequest,
awsClaudeProxy
);
// Native Anthropic chat completion endpoint.
awsClaudeRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsClaudeProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsClaudeRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
awsClaudeProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends AWS model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-AWS model name to AWS model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.includes("anthropic.claude")) {
return;
}
// Anthropic model names can look like:
// - claude-v1
// - claude-2.1
// - claude-3-5-sonnet-20240620 (old format: number-model)
// - claude-3-opus-latest (old format: number-model)
// - claude-sonnet-4-20250514 (new format: model-number)
// - claude-opus-4-latest (new format: model-number)
// - anthropic.claude-3-sonnet-20240229-v1:0 (AWS format with old naming)
// - anthropic.claude-sonnet-4-20250514-v1:0 (AWS format with new naming)
const pattern =
/^(?:anthropic\.)?claude-(?:(?:(instant-)?(v)?(\d+)([.-](\d))?(-\d+k)?(-sonnet-|-opus-|-haiku-)?(latest|\d*))|(?:(sonnet-|opus-|haiku-)(\d+)([.-](\d))?(-\d+k)?-(latest|\d+)))(?:-v\d+(?::\d+)?)?$/i;
const match = model.match(pattern);
if (!match) {
throw new Error(`Provided model name (${model}) doesn't resemble a Claude model ID.`);
}
// Check which format matched (old or new)
// New format: claude-sonnet-4-20250514 or anthropic.claude-sonnet-4-20250514-v1:0
// Old format: claude-3-sonnet-20240229 or anthropic.claude-3-sonnet-20240229-v1:0
const isNewFormat = !!match[9];
let major, minor, name, rev;
if (isNewFormat) {
// New format: claude-sonnet-4-20250514
// match[9] = sonnet-/opus-/haiku-
// match[10] = 4 (major version)
// match[12] = minor version (if any, from [.-](\d) pattern)
// match[14] = revision (latest or date)
const modelType = match[9]?.match(/([a-z]+)/)?.[1] || "";
name = modelType;
major = match[10];
minor = match[12];
rev = match[14];
// Special case: if revision is a single digit and no minor version,
// treat revision as minor version (e.g., claude-opus-4-1 -> version 4.1)
if (!minor && rev && /^\d$/.test(rev)) {
minor = rev;
rev = undefined;
}
// Handle instant case for completeness
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
} else {
// Old format: claude-3-sonnet-20240229
// match[1] = instant- (if any)
// match[3] = 3 (major version)
// match[5] = minor version (if any)
// match[7] = -sonnet-/-opus-/-haiku- (if any)
// match[8] = revision (latest or date)
const instant = match[1];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
major = match[3];
minor = match[5];
name = match[7]?.match(/([a-z]+)/)?.[1] || "";
rev = match[8];
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "1":
case "1.0":
req.body.model = "anthropic.claude-v1";
return;
case "2":
case "2.0":
req.body.model = "anthropic.claude-v2";
return;
case "2.1":
req.body.model = "anthropic.claude-v2:1";
return;
case "3":
case "3.0":
// there is only one snapshot for all Claude 3 models so there is no need
// to check the revision
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
return;
case "haiku":
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
return;
}
break;
case "3.5":
switch (name) {
case "sonnet":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-sonnet-20241022-v2:0";
return;
case "20240620":
req.body.model = "anthropic.claude-3-5-sonnet-20240620-v1:0";
return;
}
break;
case "haiku":
switch (rev) {
case "20241022":
case "latest":
req.body.model = "anthropic.claude-3-5-haiku-20241022-v1:0";
return;
}
case "opus":
// Add after model id is announced never
break;
}
case "3.7":
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-3-7-sonnet-20250219-v1:0";
return;
}
break;
case "4":
case "4.0":
// Mapping "claude-4-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "sonnet":
req.body.model = "anthropic.claude-sonnet-4-20250514-v1:0";
return;
case "opus":
req.body.model = "anthropic.claude-opus-4-20250514-v1:0";
return;
// No case for "haiku" here, as "claude-4-haiku" is not defined
// in claude-models.ts. It will fall through and throw an error.
}
break;
case "4.1":
// Mapping "claude-4.1-..." variants to their actual AWS Bedrock IDs
// as defined in src/shared/claude-models.ts.
switch (name) {
case "opus":
req.body.model = "anthropic.claude-opus-4-1-20250805-v1:0";
return;
// No sonnet or haiku variants for 4.1 yet
}
break;
}
throw new Error(`Provided model name (${model}) could not be mapped to a known AWS Claude model ID.`);
}
export const awsClaude = awsClaudeRouter;
-95
View File
@@ -1,95 +0,0 @@
import { Request, Router } from "express";
import {
detectMistralInputApi,
transformMistralTextToMistralChat,
} from "./mistral-ai";
import { ipLimiter } from "./rate-limit";
import { ProxyResHandlerWithBody } from "./middleware/response";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signAwsRequest,
} from "./middleware/request";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
const awsMistralBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-ai" && req.outboundApi === "mistral-text") {
newBody = transformMistralTextToMistralChat(body);
}
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const awsMistralProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signAwsRequest,finalizeSignedRequest],
blockingResponseHandler: awsMistralBlockingResponseHandler,
});
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If it looks like an AWS model, use it as-is
if (model.startsWith("mistral.")) {
return;
}
// Mistral 7B Instruct
else if (model.includes("7b")) {
req.body.model = "mistral.mistral-7b-instruct-v0:2";
}
// Mistral 8x7B Instruct
else if (model.includes("8x7b")) {
req.body.model = "mistral.mixtral-8x7b-instruct-v0:1";
}
// Mistral Large (Feb 2024)
else if (model.includes("large-2402")) {
req.body.model = "mistral.mistral-large-2402-v1:0";
}
// Mistral Large 2 (July 2024)
else if (model.includes("large")) {
req.body.model = "mistral.mistral-large-2407-v1:0";
}
// Mistral Small (Feb 2024)
else if (model.includes("small")) {
req.body.model = "mistral.mistral-small-2402-v1:0";
} else {
throw new Error(
`Can't map '${model}' to a supported AWS model ID; make sure you are requesting a Mistral model supported by Amazon Bedrock`
);
}
}
const nativeMistralChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "mistral-ai", outApi: "mistral-ai", service: "aws" },
{
beforeTransform: [detectMistralInputApi],
afterTransform: [maybeReassignModel],
}
);
const awsMistralRouter = Router();
awsMistralRouter.post(
"/v1/chat/completions",
ipLimiter,
nativeMistralChatPreprocessor,
awsMistralProxy
);
export const awsMistral = awsMistralRouter;
+318 -81
View File
@@ -1,98 +1,335 @@
/* Shared code between AWS Claude and AWS Mistral endpoints. */
import { Request, Response, Router } from "express";
import { Request, RequestHandler, Response, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { config } from "../config";
import { addV1 } from "./add-v1";
import { awsClaude } from "./aws-claude";
import { awsMistral } from "./aws-mistral";
import { AwsBedrockKey, keyPool } from "../shared/key-management";
import { claudeModels, findByAwsId } from "../shared/claude-models";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createPreprocessorMiddleware,
signAwsRequest,
finalizeSignedRequest,
createOnProxyReqHandler,
} from "./middleware/request";
import {
ProxyResHandlerWithBody,
createOnProxyResHandler,
} from "./middleware/response";
import { transformAnthropicChatResponseToAnthropicText, transformAnthropicChatResponseToOpenAI } from "./anthropic";
import { sendErrorToClient } from "./middleware/response/error-generator";
const awsRouter = Router();
awsRouter.get(["/:vendor?/v1/models", "/:vendor?/models"], handleModelsRequest);
awsRouter.use("/claude", addV1, awsClaude);
awsRouter.use("/mistral", addV1, awsMistral);
const LATEST_AWS_V2_MINOR_VERSION = "1";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
const MODELS_CACHE_TTL = 10000;
let modelsCache: Record<string, any> = {};
let modelsCacheTime: Record<string, number> = {};
function handleModelsRequest(req: Request, res: Response) {
if (!config.awsCredentials) return { object: "list", data: [] };
const vendor = req.params.vendor?.length
? req.params.vendor === "claude"
? "anthropic"
: req.params.vendor
: "all";
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
const variants = [
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0",
];
const cacheTime = modelsCacheTime[vendor] || 0;
if (new Date().getTime() - cacheTime < MODELS_CACHE_TTL) {
return res.json(modelsCache[vendor]);
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
/** Only used for non-streaming requests. */
const awsResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
const availableAwsModelIds = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "aws") continue;
(key as AwsBedrockKey).modelIds.forEach((id) => availableAwsModelIds.add(id));
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-text":
req.log.info("Transforming Anthropic Text back to OpenAI format");
newBody = transformAwsTextResponseToOpenAI(body, req);
break;
case "openai<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
case "anthropic-text<-anthropic-chat":
req.log.info("Transforming AWS Anthropic Chat back to Text format");
newBody = transformAnthropicChatResponseToAnthropicText(body);
break;
}
const mistralMappings = new Map([
["mistral.mistral-7b-instruct-v0:2", "Mistral 7B Instruct"],
["mistral.mixtral-8x7b-instruct-v0:1", "Mixtral 8x7B Instruct"],
["mistral.mistral-large-2402-v1:0", "Mistral Large 2402"],
["mistral.mistral-large-2407-v1:0", "Mistral Large 2407"],
["mistral.mistral-small-2402-v1:0", "Mistral Small 2402"],
]);
// AWS does not always confirm the model in the response, so we have to add it
if (!newBody.model && req.body.model) {
newBody.model = req.body.model;
}
const date = new Date();
const claudeModelsList = claudeModels
.filter(model => availableAwsModelIds.has(model.awsId))
.map(model => ({
id: model.anthropicId,
owned_by: "anthropic",
type: "model",
display_name: model.displayName,
created_at: date.toISOString(),
object: "model",
created: date.getTime(),
permission: [],
root: "anthropic",
parent: null,
}));
const mistralModelsList = Array.from(mistralMappings.keys())
.filter(id => availableAwsModelIds.has(id))
.map(id => {
return {
id,
owned_by: "mistral",
type: "model",
display_name: mistralMappings.get(id) || id.split('.')[1],
created_at: date.toISOString(),
object: "model",
created: date.getTime(),
permission: [],
root: "mistral",
parent: null,
};
});
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const allModels = [...claudeModelsList, ...mistralModelsList];
const filteredModels = vendor === "all"
? allModels
: allModels.filter(m => m.root === vendor);
modelsCache[vendor] = {
object: "list",
data: filteredModels,
has_more: false,
first_id: filteredModels[0]?.id,
last_id: filteredModels[filteredModels.length - 1]?.id,
/**
* Transforms a model response from the Anthropic API to match those from the
* OpenAI API, for users using Claude via the OpenAI-compatible endpoint. This
* is only used for non-streaming requests as streaming requests are handled
* on-the-fly.
*/
function transformAwsTextResponseToOpenAI(
awsBody: Record<string, any>,
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
return {
id: "aws-" + v4(),
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage: {
prompt_tokens: req.promptTokens,
completion_tokens: req.outputTokens,
total_tokens: totalTokens,
},
choices: [
{
message: {
role: "assistant",
content: awsBody.completion?.trim(),
},
finish_reason: awsBody.stop_reason,
index: 0,
},
],
};
modelsCacheTime[vendor] = date.getTime();
}
return res.json(modelsCache[vendor]);
const awsProxy = createQueueMiddleware({
beforeProxy: signAwsRequest,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([awsResponseHandler]),
error: handleProxyError,
},
}),
});
const nativeTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const textToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "anthropic-text", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes text completion prompts to aws anthropic-chat if they need translation
* (claude-3 based models do not support the old text completion endpoint).
*/
const preprocessAwsTextRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
textToChatPreprocessor(req, res, next);
} else {
nativeTextPreprocessor(req, res, next);
}
};
const oaiToAwsTextPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-text", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
const oaiToAwsChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
if (req.body.model?.includes("claude-3")) {
oaiToAwsChatPreprocessor(req, res, next);
} else {
oaiToAwsTextPreprocessor(req, res, next);
}
};
const awsRouter = Router();
awsRouter.get("/v1/models", handleModelRequest);
// Native(ish) Anthropic text completion endpoint.
awsRouter.post("/v1/complete", ipLimiter, preprocessAwsTextRequest, awsProxy);
// Native Anthropic chat completion endpoint.
awsRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "aws" },
{ afterTransform: [maybeReassignModel] }
),
awsProxy
);
// Temporary force-Claude3 endpoint
awsRouter.post(
"/v1/sonnet/:action(complete|messages)",
ipLimiter,
handleCompatibilityRequest,
createPreprocessorMiddleware({
inApi: "anthropic-text",
outApi: "anthropic-chat",
service: "aws",
}),
awsProxy
);
// OpenAI-to-AWS Anthropic compatibility endpoint.
awsRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
awsProxy
);
/**
* Tries to deal with:
* - frontends sending AWS model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that AWS doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*/
function maybeReassignModel(req: Request) {
const model = req.body.model;
// If client already specified an AWS Claude model ID, use it
if (model.includes("anthropic.claude")) {
return;
}
const pattern =
/^(claude-)?(instant-)?(v)?(\d+)(\.(\d+))?(-\d+k)?(-sonnet-?|-opus-?|-haiku-?)(\d*)/i;
const match = model.match(pattern);
// If there's no match, return the latest v2 model
if (!match) {
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
const instant = match[2];
const major = match[4];
const minor = match[6];
if (instant) {
req.body.model = "anthropic.claude-instant-v1";
return;
}
// There's only one v1 model
if (major === "1") {
req.body.model = "anthropic.claude-v1";
return;
}
// Try to map Anthropic API v2 models to AWS v2 models
if (major === "2") {
if (minor === "0") {
req.body.model = "anthropic.claude-v2";
return;
}
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
// AWS currently only supports one v3 model.
const variant = match[8]; // sonnet, opus, or haiku
const variantVersion = match[9];
if (major === "3") {
if (variant.includes("opus")) {
req.body.model = "anthropic.claude-3-opus-20240229-v1:0";
} else if (variant.includes("haiku")) {
req.body.model = "anthropic.claude-3-haiku-20240307-v1:0";
} else {
req.body.model = "anthropic.claude-3-sonnet-20240229-v1:0";
}
return;
}
// Fallback to latest v2 model
req.body.model = `anthropic.claude-v2:${LATEST_AWS_V2_MINOR_VERSION}`;
return;
}
export function handleCompatibilityRequest(
req: Request,
res: Response,
next: any
) {
const action = req.params.action;
const alreadyInChatFormat = Boolean(req.body.messages);
const compatModel = "anthropic.claude-3-sonnet-20240229-v1:0";
req.log.info(
{ inputModel: req.body.model, compatModel, alreadyInChatFormat },
"Handling AWS compatibility request"
);
if (action === "messages" || alreadyInChatFormat) {
return sendErrorToClient({
req,
res,
options: {
title: "Unnecessary usage of compatibility endpoint",
message: `Your client seems to already support the new Claude API format. This endpoint is intended for clients that do not yet support the new format.\nUse the normal \`/aws/claude\` proxy endpoint instead.`,
format: "unknown",
statusCode: 400,
reqId: req.id,
obj: {
requested_endpoint: "/aws/claude/sonnet",
correct_endpoint: "/aws/claude",
},
},
});
}
req.body.model = compatModel;
next();
}
export const aws = awsRouter;
+70 -18
View File
@@ -1,30 +1,73 @@
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { generateModelList } from "./openai";
import { keyPool } from "../shared/key-management";
import {
AzureOpenAIModelFamily,
getAzureOpenAIModelFamily,
ModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { KNOWN_OPENAI_MODELS } from "./openai";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addAzureKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
let modelsCache: any = null;
let modelsCacheTime = 0;
const handleModelRequest: RequestHandler = (_req, res) => {
function getModelsResponse() {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return res.status(200).json(modelsCache);
return modelsCache;
}
if (!config.azureCredentials) return { object: "list", data: [] };
let available = new Set<AzureOpenAIModelFamily>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "azure") continue;
key.modelFamilies.forEach((family) =>
available.add(family as AzureOpenAIModelFamily)
);
}
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
available = new Set([...available].filter((x) => allowed.has(x)));
const result = generateModelList("azure");
const models = KNOWN_OPENAI_MODELS.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "azure",
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
})).filter((model) => available.has(getAzureOpenAIModelFamily(model.id)));
modelsCache = { object: "list", data: result };
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
return modelsCache;
}
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
@@ -40,17 +83,26 @@ const azureOpenaiResponseHandler: ProxyResHandlerWithBody = async (
res.status(200).json({ ...body, proxy: body.proxy });
};
const azureOpenAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { hostname, protocol } = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addAzureKey, finalizeSignedRequest],
blockingResponseHandler: azureOpenaiResponseHandler,
const azureOpenAIProxy = createQueueMiddleware({
beforeProxy: addAzureKey,
proxyMiddleware: createProxyMiddleware({
target: "will be set by router",
router: (req) => {
if (!req.signedRequest) throw new Error("signedRequest not set");
const { hostname, path } = req.signedRequest;
return `https://${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([azureOpenaiResponseHandler]),
error: handleProxyError,
},
}),
});
const azureOpenAIRouter = Router();
azureOpenAIRouter.get("/v1/models", handleModelRequest);
azureOpenAIRouter.post(
-222
View File
@@ -1,222 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { CohereKey, keyPool } from "../shared/key-management";
import { isCohereModel, normalizeMessages } from "../shared/api-schemas/cohere";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "cohere" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const cohereResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Cohere key directly
const modelToUse = "command"; // Use any Cohere model here - just for key selection
const cohereKey = keyPool.get(modelToUse, "cohere") as CohereKey;
if (!cohereKey || !cohereKey.key) {
log.warn("No valid Cohere key available for model listing");
throw new Error("No valid Cohere API key available");
}
// Fetch models directly from Cohere API
const response = await axios.get("https://api.cohere.com/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${cohereKey.key}`,
"Cohere-Version": "2022-12-06"
},
});
if (!response.data || !response.data.models) {
throw new Error("Unexpected response format from Cohere API");
}
// Extract models and filter by those that support the chat endpoint
const filteredModels = response.data.models
.filter((model: any) => {
return model.endpoints && model.endpoints.includes("chat");
})
.map((model: any) => ({
id: model.name,
name: model.name,
// Adding additional OpenAI-compatible fields
context_window: model.context_window_size || 4096,
max_tokens: model.max_tokens || 4096
}));
log.debug({ modelCount: filteredModels.length, models: filteredModels.map((m: any) => m.id) }, "Filtered models from Cohere API");
// Format response to ensure OpenAI compatibility
const models = {
object: "list",
data: filteredModels.map((model: any) => ({
id: model.id,
object: "model",
created: Math.floor(Date.now() / 1000),
owned_by: "cohere",
permission: [],
root: model.id,
parent: null,
context_length: model.context_window,
})),
};
log.debug({ modelCount: filteredModels.length }, "Retrieved models from Cohere API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
// Provide detailed logging for better troubleshooting
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Cohere models"
);
} else {
log.error({ error }, "Unknown error fetching Cohere models");
}
// Return empty list as fallback
return {
object: "list",
data: [],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to prepare messages for Cohere API
function prepareMessages(req: Request) {
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages = normalizeMessages(req.body.messages);
}
}
// Function to remove parameters not supported by Cohere models
function removeUnsupportedParameters(req: Request) {
const model = req.body.model;
// Remove parameters that Cohere doesn't support
if (req.body.logit_bias !== undefined) {
delete req.body.logit_bias;
}
if (req.body.top_logprobs !== undefined) {
delete req.body.top_logprobs;
}
if (req.body.max_completion_tokens !== undefined) {
delete req.body.max_completion_tokens;
}
// Handle structured output format
if (req.body.response_format && req.body.response_format.schema) {
// Transform to Cohere's format if needed
const jsonSchema = req.body.response_format.schema;
req.body.response_format = {
type: "json_object",
schema: jsonSchema
};
}
// Logging for debugging
if (process.env.NODE_ENV !== 'production') {
log.debug({ body: req.body }, "Request after parameter cleanup");
}
}
// Set up count token functionality for Cohere models
function countCohereTokens(req: Request) {
const model = req.body.model;
if (isCohereModel(model)) {
// Count tokens using prompt tokens (simplified)
if (req.promptTokens) {
req.log.debug(
{ tokens: req.promptTokens },
"Estimated token count for Cohere prompt"
);
}
}
}
const cohereProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
// Add Cohere-Version header to every request
(manager) => {
manager.setHeader("Cohere-Version", "2022-12-06");
},
finalizeBody
],
target: "https://api.cohere.ai/compatibility",
blockingResponseHandler: cohereResponseHandler,
});
const cohereRouter = Router();
cohereRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [ prepareMessages, removeUnsupportedParameters, countCohereTokens ] }
),
cohereProxy
);
cohereRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "cohere" },
{ afterTransform: [] }
),
cohereProxy
);
cohereRouter.get("/v1/models", handleModelRequest);
export const cohere = cohereRouter;
-135
View File
@@ -1,135 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { DeepseekKey, keyPool } from "../shared/key-management";
let modelsCache: any = null;
let modelsCacheTime = 0;
const deepseekResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Deepseek key directly using keyPool.get()
const modelToUse = "deepseek-chat"; // Use any Deepseek model here - just for key selection
const deepseekKey = keyPool.get(modelToUse, "deepseek") as DeepseekKey;
if (!deepseekKey || !deepseekKey.key) {
throw new Error("Failed to get valid Deepseek key");
}
// Fetch models from Deepseek API with authorization
const response = await axios.get("https://api.deepseek.com/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${deepseekKey.key}`
},
});
// If successful, update the cache
if (response.data && response.data.data) {
modelsCache = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
owned_by: "deepseek",
})),
};
} else {
throw new Error("Unexpected response format from Deepseek API");
}
} catch (error) {
console.error("Error fetching Deepseek models:", error);
throw error; // No fallback - error will be passed to caller
}
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const modelsResponse = await getModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const deepseekProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.deepseek.com/beta",
blockingResponseHandler: deepseekResponseHandler,
});
const deepseekRouter = Router();
// combines all the assistant messages at the end of the context and adds the
// beta 'prefix' option, makes prefills work the same way they work for Claude
function enablePrefill(req: Request) {
// If you want to disable
if (process.env.NO_DEEPSEEK_PREFILL) return
const msgs = req.body.messages;
if (msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// maybe we should also add a newline between messages? no for now.
content = msgs[i--].content + content;
}
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
}
function removeReasonerStuff(req: Request) {
if (req.body.model === "deepseek-reasoner") {
// https://api-docs.deepseek.com/guides/reasoning_model
delete req.body.presence_penalty;
delete req.body.frequency_penalty;
delete req.body.temperature;
delete req.body.top_p;
delete req.body.logprobs;
delete req.body.top_logprobs;
}
}
deepseekRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "deepseek" },
{ afterTransform: [ enablePrefill, removeReasonerStuff ] }
),
deepseekProxy
);
deepseekRouter.get("/v1/models", handleModelRequest);
export const deepseek = deepseekRouter;
-13
View File
@@ -12,7 +12,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
// pass the _proxy_ key in this header too, instead of providing it as a
// Bearer token in the Authorization header. So we need to check both.
// Prefer the Authorization header if both are present.
// Google AI uses a key querystring parameter.
if (req.headers.authorization) {
const token = req.headers.authorization?.slice("Bearer ".length);
@@ -26,18 +25,6 @@ function getProxyAuthorizationFromRequest(req: Request): string | undefined {
return token;
}
if (req.headers["x-goog-api-key"]) {
const token = req.headers["x-goog-api-key"]?.toString();
delete req.headers["x-goog-api-key"];
return token;
}
if (req.query.key) {
const token = req.query.key?.toString();
delete req.query.key;
return token;
}
return undefined;
}
-257
View File
@@ -1,257 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { config } from "../config";
import { transformAnthropicChatResponseToOpenAI } from "./anthropic";
import { ipLimiter } from "./rate-limit";
import {
createPreprocessorMiddleware,
finalizeSignedRequest,
signGcpRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { validateClaude41OpusParameters } from "../shared/claude-4-1-validation";
const LATEST_GCP_SONNET_MINOR_VERSION = "20240229";
let modelsCache: any = null;
let modelsCacheTime = 0;
const getModelsResponse = () => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
if (!config.gcpCredentials) return { object: "list", data: [] };
// https://docs.anthropic.com/en/docs/about-claude/models
const variants = [
"claude-3-haiku@20240307",
"claude-3-5-haiku@20241022",
"claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-v2@20241022",
"claude-3-7-sonnet@20250219",
"claude-sonnet-4@20250514",
"claude-opus-4@20250514",
"claude-opus-4-1@20250805",
];
const models = variants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "anthropic",
permission: [],
root: "claude",
parent: null,
}));
modelsCache = { object: "list", data: models };
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
const gcpBlockingResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
let newBody = body;
switch (`${req.inboundApi}<-${req.outboundApi}`) {
case "openai<-anthropic-chat":
req.log.info("Transforming Anthropic Chat back to OpenAI format");
newBody = transformAnthropicChatResponseToOpenAI(body);
break;
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const gcpProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
return `${signedRequest.protocol}//${signedRequest.hostname}`;
},
mutations: [signGcpRequest, finalizeSignedRequest],
blockingResponseHandler: gcpBlockingResponseHandler,
});
const oaiToChatPreprocessor = createPreprocessorMiddleware(
{ inApi: "openai", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
);
/**
* Routes an OpenAI prompt to either the legacy Claude text completion endpoint
* or the new Claude chat completion endpoint, based on the requested model.
*/
const preprocessOpenAICompatRequest: RequestHandler = (req, res, next) => {
oaiToChatPreprocessor(req, res, next);
};
const gcpRouter = Router();
gcpRouter.get("/v1/models", handleModelRequest);
// Native Anthropic chat completion endpoint.
gcpRouter.post(
"/v1/messages",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "anthropic-chat", outApi: "anthropic-chat", service: "gcp" },
{ afterTransform: [maybeReassignModel] }
),
gcpProxy
);
// OpenAI-to-GCP Anthropic compatibility endpoint.
gcpRouter.post(
"/v1/chat/completions",
ipLimiter,
preprocessOpenAICompatRequest,
gcpProxy
);
/**
* Tries to deal with:
* - frontends sending GCP model names even when they want to use the OpenAI-
* compatible endpoint
* - frontends sending Anthropic model names that GCP doesn't recognize
* - frontends sending OpenAI model names because they expect the proxy to
* translate them
*
* If client sends GCP model ID it will be used verbatim. Otherwise, various
* strategies are used to try to map a non-GCP model name to GCP model ID.
*/
function maybeReassignModel(req: Request) {
// Validate Claude 4.1 Opus parameters before processing
validateClaude41OpusParameters(req);
const model = req.body.model;
const DEFAULT_MODEL = "claude-3-5-sonnet-v2@20241022";
// If it looks like an GCP model, use it as-is
if (model.startsWith("claude-") && model.includes("@")) {
return;
}
// Anthropic model names can look like:
// - claude-3-sonnet
// - claude-3.5-sonnet
// - claude-3-5-haiku
// - claude-3-5-haiku-latest
// - claude-3-5-sonnet-20240620
// - claude-opus-4-1 (new format)
// - claude-4.1-opus (alternative format)
const pattern = /^claude-(?:(\d+)[.-]?(\d)?-(sonnet|opus|haiku)(?:-(latest|\d+))?|(opus|sonnet|haiku)-(\d+)[.-]?(\d)?(?:-(latest|\d+))?)/i;
const match = model.match(pattern);
if (!match) {
req.body.model = DEFAULT_MODEL;
return;
}
// Handle both formats: claude-3-5-sonnet and claude-opus-4-1
const [_, major1, minor1, flavor1, rev1, flavor2, major2, minor2, rev2] = match;
let major, minor, flavor, rev;
if (major1) {
// Old format: claude-3-5-sonnet
major = major1;
minor = minor1;
flavor = flavor1;
rev = rev1;
} else {
// New format: claude-opus-4-1
major = major2;
minor = minor2;
flavor = flavor2;
rev = rev2;
}
const ver = minor ? `${major}.${minor}` : major;
switch (ver) {
case "3":
case "3.0":
switch (flavor) {
case "haiku":
req.body.model = "claude-3-haiku@20240307";
break;
case "opus":
req.body.model = "claude-3-opus@20240229";
break;
case "sonnet":
req.body.model = "claude-3-sonnet@20240229";
break;
default:
req.body.model = "claude-3-sonnet@20240229";
}
return;
case "3.5":
switch (flavor) {
case "haiku":
req.body.model = "claude-3-5-haiku@20241022";
return;
case "opus":
// no 3.5 opus yet
req.body.model = DEFAULT_MODEL;
return;
case "sonnet":
if (rev === "20240620") {
req.body.model = "claude-3-5-sonnet@20240620";
} else {
// includes -latest, edit if anthropic actually releases 3.5 sonnet v3
req.body.model = DEFAULT_MODEL;
}
return;
default:
req.body.model = DEFAULT_MODEL;
}
return;
case "3.7":
switch (flavor) {
case "sonnet":
req.body.model = "claude-3-7-sonnet@20250219";
return;
}
break;
case "4":
case "4.0":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4@20250514";
return;
case "sonnet":
req.body.model = "claude-sonnet-4@20250514";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
case "4.1":
switch (flavor) {
case "opus":
req.body.model = "claude-opus-4-1@20250805";
return;
default:
req.body.model = DEFAULT_MODEL;
}
break;
default:
req.body.model = DEFAULT_MODEL;
}
}
export const gcp = gcpRouter;
+42 -203
View File
@@ -1,24 +1,25 @@
import { Request, RequestHandler, Router, Response, NextFunction } from "express";
import { Request, RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { v4 } from "uuid";
import { GoogleAIKey, keyPool } from "../shared/key-management";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeSignedRequest,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/mutators/add-google-ai-key";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import axios from "axios";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { addGoogleAIKey } from "./middleware/request/preprocessors/add-google-ai-key";
let modelsCache: any = null;
let modelsCacheTime = 0;
// Cache for native Google AI models
let nativeModelsCache: any = null;
let nativeModelsCacheTime = 0;
// https://ai.google.dev/models/gemini
// TODO: list models https://ai.google.dev/tutorials/rest_quickstart#list_models
@@ -29,24 +30,10 @@ const getModelsResponse = () => {
if (!config.googleAIKey) return { object: "list", data: [] };
const keys = keyPool
.list()
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
if (keys.length === 0) {
modelsCache = { object: "list", data: [] };
modelsCacheTime = new Date().getTime();
return modelsCache;
}
const googleAIVariants = ["gemini-pro", "gemini-1.0-pro", "gemini-1.5-pro"];
// Get all model IDs from keys, excluding any with "bard" in the name
const modelIds = Array.from(
new Set(keys.map((k) => k.modelIds).flat())
).filter((id) => id.startsWith("models/") && !id.includes("bard"));
// Strip "models/" prefix from IDs before creating model objects
const models = modelIds.map((id) => ({
// Strip "models/" prefix from ID for consistency with request processing
id: id.startsWith("models/") ? id.slice("models/".length) : id,
const models = googleAIVariants.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "google",
@@ -61,51 +48,12 @@ const getModelsResponse = () => {
return modelsCache;
};
// Function to fetch native models from Google AI API
const getNativeModelsResponse = async () => {
// Return cached value if it was refreshed in the last minute
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
return nativeModelsCache;
}
/*
* The official Google API requires an API key. However SillyTavern only needs
* a list of model IDs and does not care about any other model metadata. We
* can therefore generate a **synthetic** response from the keys already
* loaded into the proxy (same source we use for the OpenAI-compatible
* endpoint) and completely avoid the outbound request. This removes the
* need for the frontend to supply the proxy password as an API key and
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
* is missing.
*/
const openaiStyle = getModelsResponse();
const models = (openaiStyle.data || []).map((m: any) => ({
// Google AI Studio returns names in the format "models/<id>"
name: `models/${m.id}`,
supportedGenerationMethods: ["generateContent"],
}));
nativeModelsCache = { models };
nativeModelsCacheTime = new Date().getTime();
return nativeModelsCache;
};
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
const handleModelRequest: RequestHandler = (_req, res) => {
res.status(200).json(getModelsResponse());
};
// Native Gemini API model list request
const handleNativeModelRequest: RequestHandler = async (_req: Request, res: any) => {
try {
const modelsResponse = await getNativeModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleNativeModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const googleAIBlockingResponseHandler: ProxyResHandlerWithBody = async (
/** Only used for non-streaming requests. */
const googleAIResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
@@ -129,30 +77,8 @@ function transformGoogleAIResponse(
req: Request
): Record<string, any> {
const totalTokens = (req.promptTokens ?? 0) + (req.outputTokens ?? 0);
// Handle the case where content might have different structures
let content = "";
// Check if the response has the expected structure
if (resBody.candidates && resBody.candidates[0]) {
const candidate = resBody.candidates[0];
// Extract content text with multiple fallbacks
if (candidate.content?.parts && candidate.content.parts[0]?.text) {
// Regular format with parts array containing text
content = candidate.content.parts[0].text;
} else if (candidate.content?.text) {
// Alternate format with direct text property
content = candidate.content.text;
} else if (typeof candidate.content?.parts?.[0] === 'string') {
// Some formats might have string parts
content = candidate.content.parts[0];
}
// Apply cleanup to the content if needed
content = content.replace(/^(.{0,50}?): /, () => "");
}
const parts = resBody.candidates[0].content?.parts ?? [{ text: "" }];
const content = parts[0].text.replace(/^(.{0,50}?): /, () => "");
return {
id: "goo-" + v4(),
object: "chat.completion",
@@ -166,130 +92,43 @@ function transformGoogleAIResponse(
choices: [
{
message: { role: "assistant", content },
finish_reason: resBody.candidates?.[0]?.finishReason || "STOP",
finish_reason: resBody.candidates[0].finishReason,
index: 0,
},
],
};
}
const googleAIProxy = createQueuedProxyMiddleware({
target: ({ signedRequest }: { signedRequest: any }) => {
if (!signedRequest) throw new Error("Must sign request before proxying");
const { protocol, hostname} = signedRequest;
return `${protocol}//${hostname}`;
},
mutations: [addGoogleAIKey, finalizeSignedRequest],
blockingResponseHandler: googleAIBlockingResponseHandler,
const googleAIProxy = createQueueMiddleware({
beforeProxy: addGoogleAIKey,
proxyMiddleware: createProxyMiddleware({
target: "bad-target-will-be-rewritten",
router: ({ signedRequest }) => {
const { protocol, hostname, path } = signedRequest;
return `${protocol}//${hostname}${path}`;
},
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [finalizeSignedRequest] }),
proxyRes: createOnProxyResHandler([googleAIResponseHandler]),
error: handleProxyError,
},
}),
});
const googleAIRouter = Router();
googleAIRouter.get("/v1/models", handleModelRequest);
googleAIRouter.get("/:apiVersion(v1alpha|v1beta)/models", handleNativeModelRequest);
/**
* Processes the thinking budget for Gemini 2.5 Flash model.
* Validation has been disabled - budget is passed through without limits.
*/
function processThinkingBudget(req: Request) {
// Validation disabled - budget is passed through without any range limits
// Previously enforced 0-24576 token limit
}
function setStreamFlag(req: Request) {
const isStreaming = req.url.includes("streamGenerateContent");
if (isStreaming) {
req.body.stream = true;
req.isStreaming = true;
} else {
req.body.stream = false;
req.isStreaming = false;
}
}
/**
* Strips 'models/' prefix from the beginning of model IDs if present.
* No longer forces redirection to gemini-1.5-pro-latest for non-Gemini models.
**/
function maybeReassignModel(req: Request) {
// Ensure model is on body as a lot of middleware will expect it.
const model = req.body.model || req.url.split("/").pop()?.split(":").shift();
if (!model) {
throw new Error("You must specify a model with your request.");
}
req.body.model = model;
// Only strip the 'models/' prefix if present
if (model.startsWith("models/")) {
req.body.model = model.slice("models/".length);
req.log.info({ originalModel: model, updatedModel: req.body.model }, "Stripped 'models/' prefix from model ID");
}
// No longer redirecting non-Gemini models to gemini-1.5-pro-latest
// This allows the original model to be passed through to the API
// If it's an invalid model, the Google AI API will return the appropriate error
}
/**
* Middleware to check for and block requests to experimental models.
* This function is intended to be used as a RequestPreprocessor.
* It throws an error if an experimental model is detected, which should be
* caught by the proxy's onError handler.
*
* Models can be allowed through the ALLOWED_EXP_MODELS environment variable.
*/
function checkAndBlockExperimentalModels(req: Request) { // Changed signature
const modelId = req.body.model as string | undefined;
// Check if the model ID contains "exp" (case-insensitive)
if (modelId && modelId.toLowerCase().includes("exp")) {
// Check if this specific model is in the allowlist
const allowedModels = config.allowedExpModels
?.split(",")
.map(model => model.trim())
.filter(model => model.length > 0) || [];
const isAllowed = allowedModels.some(allowedModel =>
modelId.toLowerCase() === allowedModel.toLowerCase()
);
if (isAllowed) {
req.log.info({ modelId }, "Allowing experimental Google AI model via allowlist.");
return; // Allow the request to proceed
}
req.log.warn({ modelId }, "Blocking request to experimental Google AI model.");
const err: any = new Error("Experimental models are too unstable to be supported in proxy code. Please use preview models instead.");
err.statusCode = 400;
throw err;
}
// If no experimental model, do nothing, allowing request to proceed.
}
// Native Google AI chat completion endpoint
googleAIRouter.post(
"/:apiVersion(v1alpha|v1beta)/models/:modelId:(generateContent|streamGenerateContent)",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "google-ai", outApi: "google-ai", service: "google-ai" },
{
beforeTransform: [maybeReassignModel],
afterTransform: [checkAndBlockExperimentalModels, setStreamFlag, processThinkingBudget]
}
),
googleAIProxy
);
// OpenAI-to-Google AI compatibility endpoint.
googleAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "google-ai", service: "google-ai" },
{
afterTransform: [maybeReassignModel, checkAndBlockExperimentalModels, processThinkingBudget]
}
),
createPreprocessorMiddleware({
inApi: "openai",
outApi: "google-ai",
service: "google-ai",
}),
googleAIProxy
);
+17 -58
View File
@@ -1,6 +1,6 @@
import { Request, Response } from "express";
import http from "http";
import { Socket } from "net";
import httpProxy from "http-proxy";
import { ZodError } from "zod";
import { generateErrorMessage } from "zod-error";
import { HttpError } from "../../shared/errors";
@@ -12,13 +12,10 @@ const OPENAI_CHAT_COMPLETION_ENDPOINT = "/v1/chat/completions";
const OPENAI_TEXT_COMPLETION_ENDPOINT = "/v1/completions";
const OPENAI_EMBEDDINGS_ENDPOINT = "/v1/embeddings";
const OPENAI_IMAGE_COMPLETION_ENDPOINT = "/v1/images/generations";
const OPENAI_RESPONSES_ENDPOINT = "/v1/responses";
const ANTHROPIC_COMPLETION_ENDPOINT = "/v1/complete";
const ANTHROPIC_MESSAGES_ENDPOINT = "/v1/messages";
const ANTHROPIC_SONNET_COMPAT_ENDPOINT = "/v1/sonnet";
const ANTHROPIC_OPUS_COMPAT_ENDPOINT = "/v1/opus";
const GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT = "/v1alpha/models";
const GOOGLE_AI_BETA_COMPLETION_ENDPOINT = "/v1beta/models";
export function isTextGenerationRequest(req: Request) {
return (
@@ -26,13 +23,10 @@ export function isTextGenerationRequest(req: Request) {
[
OPENAI_CHAT_COMPLETION_ENDPOINT,
OPENAI_TEXT_COMPLETION_ENDPOINT,
OPENAI_RESPONSES_ENDPOINT,
ANTHROPIC_COMPLETION_ENDPOINT,
ANTHROPIC_MESSAGES_ENDPOINT,
ANTHROPIC_SONNET_COMPAT_ENDPOINT,
ANTHROPIC_OPUS_COMPAT_ENDPOINT,
GOOGLE_AI_ALPHA_COMPLETION_ENDPOINT,
GOOGLE_AI_BETA_COMPLETION_ENDPOINT,
].some((endpoint) => req.path.startsWith(endpoint))
);
}
@@ -76,23 +70,16 @@ export function sendProxyError(
});
}
/**
* Handles errors thrown during preparation of a proxy request (before it is
* sent to the upstream API), typically due to validation, quota, or other
* pre-flight checks. Depending on the error class, this function will send an
* appropriate error response to the client, streaming it if necessary.
*/
export const handleProxyError: httpProxy.ErrorCallback = (err, req, res) => {
req.log.error(err, `Error during http-proxy-middleware request`);
classifyErrorAndSend(err, req as Request, res as Response);
};
export const classifyErrorAndSend = (
err: Error,
req: Request,
res: Response | Socket
res: Response
) => {
if (res instanceof Socket) {
// We should always have an Express response object here, but http-proxy's
// ErrorCallback type says it could be just a Socket.
req.log.error(err, "Caught error while proxying request to target but cannot send error response to client.");
return res.destroy();
}
try {
const { statusCode, statusMessage, userMessage, ...errorDetails } =
classifyError(err);
@@ -234,28 +221,9 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
switch (format) {
case "openai":
case "mistral-ai":
// Few possible values:
// - choices[0].message.content
// - choices[0].message with no content if model is invoking a tool
return body.choices?.[0]?.message?.content || "";
case "openai-responses":
// Handle the original Responses API format
if (body.output && Array.isArray(body.output)) {
// Look for a message type in the output array
for (const item of body.output) {
if (item.type === "message" && item.content && Array.isArray(item.content)) {
// Extract text content from each content item
return item.content
.filter((contentItem: any) => contentItem.type === "output_text")
.map((contentItem: any) => contentItem.text)
.join("");
}
}
}
// If we've been transformed to chat completion format already
return body.choices?.[0]?.message?.content || "";
case "mistral-text":
return body.outputs?.[0]?.text || "";
// Can be null if the model wants to invoke tools rather than return a
// completion.
return body.choices[0].message.content || "";
case "openai-text":
return body.choices[0].text;
case "anthropic-chat":
@@ -284,15 +252,7 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
if ("choices" in body) {
return body.choices[0].message.content;
}
const text = body.candidates[0].content?.parts?.[0]?.text;
if (!text) {
req.log.warn(
{ body: JSON.stringify(body) },
"Received empty Google AI text completion"
);
return "";
}
return text;
return body.candidates[0].content.parts[0].text;
case "openai-image":
return body.data?.map((item: any) => item.url).join("\n");
default:
@@ -300,23 +260,22 @@ export function getCompletionFromBody(req: Request, body: Record<string, any>) {
}
}
export function getModelFromBody(req: Request, resBody: Record<string, any>) {
export function getModelFromBody(req: Request, body: Record<string, any>) {
const format = req.outboundApi;
switch (format) {
case "openai":
case "openai-text":
case "openai-responses":
return resBody.model;
case "mistral-ai":
case "mistral-text":
return body.model;
case "openai-image":
case "google-ai":
// These formats don't have a model in the response body.
return req.body.model;
case "anthropic-chat":
case "anthropic-text":
// Anthropic confirms the model in the response, but AWS Claude doesn't.
return resBody.model || req.body.model;
return body.model || req.body.model;
case "google-ai":
// Google doesn't confirm the model in the response.
return req.body.model;
default:
assertNever(format);
}
+34 -26
View File
@@ -1,38 +1,43 @@
import type { Request } from "express";
import type { ClientRequest } from "http";
import type { ProxyReqCallback } from "http-proxy";
import { ProxyReqManager } from "./proxy-req-manager";
export { createOnProxyReqHandler } from "./onproxyreq-factory";
export {
createPreprocessorMiddleware,
createEmbeddingsPreprocessorMiddleware,
} from "./preprocessor-factory";
// Preprocessors (runs before request is queued, usually body transformation/validation)
// Express middleware (runs before http-proxy-middleware, can be async)
export { addAzureKey } from "./preprocessors/add-azure-key";
export { applyQuotaLimits } from "./preprocessors/apply-quota-limits";
export { blockZoomerOrigins } from "./preprocessors/block-zoomer-origins";
export { countPromptTokens } from "./preprocessors/count-prompt-tokens";
export { languageFilter } from "./preprocessors/language-filter";
export { setApiFormat } from "./preprocessors/set-api-format";
export { signAwsRequest } from "./preprocessors/sign-aws-request";
export { transformOutboundPayload } from "./preprocessors/transform-outbound-payload";
export { validateContextSize } from "./preprocessors/validate-context-size";
export { validateModelFamily } from "./preprocessors/validate-model-family";
export { validateVision } from "./preprocessors/validate-vision";
// Proxy request mutators (runs every time request is dequeued, before proxying, usually for auth/signing)
export { addKey, addKeyForEmbeddingsRequest } from "./mutators/add-key";
export { addAzureKey } from "./mutators/add-azure-key";
export { finalizeBody } from "./mutators/finalize-body";
export { finalizeSignedRequest } from "./mutators/finalize-signed-request";
export { signAwsRequest } from "./mutators/sign-aws-request";
export { signGcpRequest } from "./mutators/sign-vertex-ai-request";
export { stripHeaders } from "./mutators/strip-headers";
// http-proxy-middleware callbacks (runs on onProxyReq, cannot be async)
export { addAnthropicPreamble } from "./onproxyreq/add-anthropic-preamble";
export { addKey, addKeyForEmbeddingsRequest } from "./onproxyreq/add-key";
export { blockZoomerOrigins } from "./onproxyreq/block-zoomer-origins";
export { checkModelFamily } from "./onproxyreq/check-model-family";
export { finalizeBody } from "./onproxyreq/finalize-body";
export { finalizeSignedRequest } from "./onproxyreq/finalize-signed-request";
export { stripHeaders } from "./onproxyreq/strip-headers";
/**
* Middleware that runs prior to the request being queued or handled by
* http-proxy-middleware. You will not have access to the proxied
* request/response objects since they have not yet been sent to the API.
* Middleware that runs prior to the request being handled by http-proxy-
* middleware.
*
* User will have been authenticated by the proxy's gatekeeper, but the request
* won't have been assigned an upstream API key yet.
* Async functions can be used here, but you will not have access to the proxied
* request/response objects, nor the data set by ProxyRequestMiddleware
* functions as they have not yet been run.
*
* User will have been authenticated by the time this middleware runs, but your
* request won't have been assigned an API key yet.
*
* Note that these functions only run once ever per request, even if the request
* is automatically retried by the request queue middleware.
@@ -40,14 +45,17 @@ export { stripHeaders } from "./mutators/strip-headers";
export type RequestPreprocessor = (req: Request) => void | Promise<void>;
/**
* Middleware that runs immediately before the request is proxied to the
* upstream API, after dequeueing the request from the request queue.
* Callbacks that run immediately before the request is sent to the API in
* response to http-proxy-middleware's `proxyReq` event.
*
* Because these middleware may be run multiple times per request if a retryable
* error occurs and the request put back in the queue, they must be idempotent.
* A change manager is provided to allow the middleware to make changes to the
* request which can be automatically reverted.
* Async functions cannot be used here as HPM's event emitter is not async and
* will not wait for the promise to resolve before sending the request.
*
* Note that these functions may be run multiple times per request if the
* first attempt is rate limited and the request is automatically retried by the
* request queue middleware.
*/
export type ProxyReqMutator = (
changeManager: ProxyReqManager
) => void | Promise<void>;
export type HPMRequestCallback = ProxyReqCallback<ClientRequest, Request>;
export const forceModel = (model: string) => (req: Request) =>
void (req.body.model = model);
@@ -1,47 +0,0 @@
import { keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
export const addGoogleAIKey: ProxyReqMutator = (manager) => {
const req = manager.request;
const inboundValid =
req.inboundApi === "openai" || req.inboundApi === "google-ai";
const outboundValid = req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!inboundValid || !outboundValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
const model = req.body.model;
const key = keyPool.get(model, "google-ai");
manager.setKey(key);
req.log.info(
{ key: key.hash, model, stream: req.isStreaming },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
const payload = { ...req.body, stream: undefined, model: undefined };
// For OpenAI -> Google conversion we don't actually have the API version
const apiVersion = req.params.apiVersion || "v1beta"
// TODO: this isn't actually signed, so the manager api is a little unclear
// with the ProxyReqManager refactor, it's probably no longer necesasry to
// do this because we can modify the path using Manager.setPath.
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/${apiVersion}/models/${model}:${
req.isStreaming ? "streamGenerateContent?alt=sse&" : "generateContent?"
}key=${key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(payload),
});
};
@@ -1,67 +0,0 @@
import type { ProxyReqMutator } from "../index";
/** Finalize the rewritten request body. Must be the last mutator. */
export const finalizeBody: ProxyReqMutator = (manager) => {
const req = manager.request;
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
// For OpenAI Responses API, ensure messages is in the correct format
if (req.outboundApi === "openai-responses") {
// Format messages for the Responses API
if (req.body.messages) {
req.log.info("Formatting messages for Responses API in finalizeBody");
// The Responses API expects input to be an array, not an object
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API in finalizeBody");
// If input already exists but contains a messages object, replace input with the messages array
req.body.input = req.body.input.messages;
}
// Final check to ensure max_completion_tokens is converted to max_output_tokens
if (req.body.max_completion_tokens) {
req.log.info("Converting max_completion_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_completion_tokens;
}
delete req.body.max_completion_tokens;
}
// Final check to ensure max_tokens is converted to max_output_tokens
if (req.body.max_tokens) {
req.log.info("Converting max_tokens to max_output_tokens in finalizeBody");
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
}
delete req.body.max_tokens;
}
// Remove all parameters not supported by Responses API
const unsupportedParams = [
'frequency_penalty',
'presence_penalty',
];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
delete req.body[param];
}
}
}
const serialized =
typeof req.body === "string" ? req.body : JSON.stringify(req.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
}
};
@@ -1,32 +0,0 @@
import { ProxyReqMutator } from "../index";
/**
* For AWS/GCP/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
manager.setPath(req.signedRequest.path);
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
const headers = req.signedRequest.headers;
Object.keys(headers).forEach((key) => {
manager.removeHeader(key);
});
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
manager.setHeader(key, value);
});
const serialized =
typeof req.signedRequest.body === "string"
? req.signedRequest.body
: JSON.stringify(req.signedRequest.body);
manager.setHeader("Content-Length", String(Buffer.byteLength(serialized)));
manager.setBody(serialized);
};
@@ -1,159 +0,0 @@
import express, { Request } from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-schemas";
import { AwsBedrockKey, keyPool } from "../../../../shared/key-management";
import {
AWSMistralV1ChatCompletionsSchema,
AWSMistralV1TextCompletionsSchema,
} from "../../../../shared/api-schemas/mistral-ai";
import { ProxyReqMutator } from "../index";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
* This happens AFTER request transformation.
*/
export const signAwsRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const { model, stream } = req.body;
const key = keyPool.get(model, "aws") as AwsBedrockKey;
manager.setKey(key);
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
req.body.system = system;
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
// If our key has an inference profile compatible with the requested model,
// we want to use the inference profile instead of the model ID when calling
// InvokeModel as that will give us higher rate limits.
const profile =
key.inferenceProfileIds.find((p) => p.includes(model)) || model;
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${profile}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(getStrictlyValidatedBodyForAws(req)),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
const { body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, profile, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
manager.setSignedRequest(await sign(newRequest, getCredentialParts(req)));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
function getStrictlyValidatedBodyForAws(req: Readonly<Request>): unknown {
// AWS uses vendor API formats but imposes additional (more strict) validation
// rules, namely that extraneous parameters are not allowed. We will validate
// using the vendor's zod schema but apply `.strip` to ensure that any
// extraneous parameters are removed.
let strippedParams: Record<string, unknown> = {};
switch (req.outboundApi) {
case "anthropic-text":
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
break;
case "anthropic-chat":
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
break;
case "mistral-ai":
strippedParams = AWSMistralV1ChatCompletionsSchema.parse(req.body);
break;
case "mistral-text":
strippedParams = AWSMistralV1TextCompletionsSchema.parse(req.body);
break;
default:
throw new Error("Unexpected outbound API for AWS.");
}
return strippedParams;
}
@@ -1,78 +0,0 @@
import { AnthropicV1MessagesSchema } from "../../../../shared/api-schemas";
import { GcpKey, keyPool } from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
import {
getCredentialsFromGcpKey,
refreshGcpAccessToken,
} from "../../../../shared/key-management/gcp/oauth";
const GCP_HOST = process.env.GCP_HOST || "%REGION%-aiplatform.googleapis.com";
export const signGcpRequest: ProxyReqMutator = async (manager) => {
const req = manager.request;
const serviceValid = req.service === "gcp";
if (!serviceValid) {
throw new Error("addVertexAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const { model } = req.body;
const key: GcpKey = keyPool.get(model, "gcp") as GcpKey;
if (!key.accessToken || Date.now() > key.accessTokenExpiresAt) {
const [token, durationSec] = await refreshGcpAccessToken(key);
keyPool.update(key, {
accessToken: token,
accessTokenExpiresAt: Date.now() + durationSec * 1000 * 0.95,
} as GcpKey);
// nb: key received by `get` is a clone and will not have the new access
// token we just set, so it must be manually updated.
key.accessToken = token;
}
manager.setKey(key);
req.log.info({ key: key.hash, model }, "Assigned GCP key to request");
// TODO: This should happen in transform-outbound-payload.ts
// TODO: Support tools
let strippedParams: Record<string, unknown>;
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
stream: true,
tools: true,
tool_choice: true,
thinking: true
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "vertex-2023-10-16";
const credential = await getCredentialsFromGcpKey(key);
const host = GCP_HOST.replace("%REGION%", credential.region);
// GCP doesn't use the anthropic-version header, but we set it to ensure the
// stream adapter selects the correct transformer.
manager.setHeader("anthropic-version", "2023-06-01");
manager.setSignedRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/v1/projects/${credential.projectId}/locations/${credential.region}/publishers/anthropic/models/${model}:streamRawPredict`,
headers: {
["host"]: host,
["content-type"]: "application/json",
["authorization"]: `Bearer ${key.accessToken}`,
},
body: JSON.stringify(strippedParams),
});
};
@@ -1,33 +0,0 @@
import { ProxyReqMutator } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
*/
export const stripHeaders: ProxyReqMutator = (manager) => {
manager.removeHeader("origin");
manager.removeHeader("referer");
// Some APIs refuse requests coming from browsers to discourage embedding
// API keys in client-side code, so we must remove all CORS/fetch headers.
Object.keys(manager.request.headers).forEach((key) => {
if (key.startsWith("sec-")) {
manager.removeHeader(key);
}
});
manager.removeHeader("tailscale-user-login");
manager.removeHeader("tailscale-user-name");
manager.removeHeader("tailscale-headers-info");
manager.removeHeader("tailscale-user-profile-pic");
manager.removeHeader("cf-connecting-ip");
manager.removeHeader("cf-ray");
manager.removeHeader("cf-visitor");
manager.removeHeader("cf-warp-tag-id");
manager.removeHeader("forwarded");
manager.removeHeader("true-client-ip");
manager.removeHeader("x-forwarded-for");
manager.removeHeader("x-forwarded-host");
manager.removeHeader("x-forwarded-proto");
manager.removeHeader("x-real-ip");
};
@@ -0,0 +1,45 @@
import {
applyQuotaLimits,
blockZoomerOrigins,
checkModelFamily,
HPMRequestCallback,
stripHeaders,
} from "./index";
type ProxyReqHandlerFactoryOptions = { pipeline: HPMRequestCallback[] };
/**
* Returns an http-proxy-middleware request handler that runs the given set of
* onProxyReq callback functions in sequence.
*
* These will run each time a request is proxied, including on automatic retries
* by the queue after encountering a rate limit.
*/
export const createOnProxyReqHandler = ({
pipeline,
}: ProxyReqHandlerFactoryOptions): HPMRequestCallback => {
const callbackPipeline = [
checkModelFamily,
applyQuotaLimits,
blockZoomerOrigins,
stripHeaders,
...pipeline,
];
return (proxyReq, req, res, options) => {
// The streaming flag must be set before any other onProxyReq handler runs,
// as it may influence the behavior of subsequent handlers.
// Image generation requests can't be streamed.
// TODO: this flag is set in too many places
req.isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
req.body.stream = req.isStreaming;
try {
for (const fn of callbackPipeline) {
fn(proxyReq, req, res, options);
}
} catch (error) {
proxyReq.destroy(error);
}
};
};
@@ -0,0 +1,33 @@
import { AnthropicKey, Key } from "../../../../shared/key-management";
import { isTextGenerationRequest } from "../../common";
import { HPMRequestCallback } from "../index";
/**
* Some keys require the prompt to start with `\n\nHuman:`. There is no way to
* know this without trying to send the request and seeing if it fails. If a
* key is marked as requiring a preamble, it will be added here.
*/
export const addAnthropicPreamble: HPMRequestCallback = (_proxyReq, req) => {
if (
!isTextGenerationRequest(req) ||
req.key?.service !== "anthropic" ||
req.outboundApi !== "anthropic-text"
) {
return;
}
let preamble = "";
let prompt = req.body.prompt;
assertAnthropicKey(req.key);
if (req.key.requiresPreamble && prompt) {
preamble = prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.log.debug({ key: req.key.hash, preamble }, "Adding preamble to prompt");
}
req.body.prompt = preamble + prompt;
};
function assertAnthropicKey(key: Key): asserts key is AnthropicKey {
if (key.service !== "anthropic") {
throw new Error(`Expected an Anthropic key, got '${key.service}'`);
}
}
@@ -2,12 +2,10 @@ import { AnthropicChatMessage } from "../../../../shared/api-schemas";
import { containsImageContent } from "../../../../shared/api-schemas/anthropic";
import { Key, OpenAIKey, keyPool } from "../../../../shared/key-management";
import { isEmbeddingsRequest } from "../../common";
import { HPMRequestCallback } from "../index";
import { assertNever } from "../../../../shared/utils";
import { ProxyReqMutator } from "../index";
export const addKey: ProxyReqMutator = (manager) => {
const req = manager.request;
export const addKey: HPMRequestCallback = (proxyReq, req) => {
let assignedKey: Key;
const { service, inboundApi, outboundApi, body } = req;
@@ -31,9 +29,7 @@ export const addKey: ProxyReqMutator = (manager) => {
}
if (inboundApi === outboundApi) {
// Pass streaming information for GPT-5 models that require verified keys for streaming
const isStreaming = body.stream === true;
assignedKey = keyPool.get(body.model, service, needsMultimodal, isStreaming);
assignedKey = keyPool.get(body.model, service, needsMultimodal);
} else {
switch (outboundApi) {
// If we are translating between API formats we may need to select a model
@@ -42,32 +38,26 @@ export const addKey: ProxyReqMutator = (manager) => {
// translation now reassigns the model earlier in the request pipeline.
case "anthropic-text":
case "anthropic-chat":
case "mistral-ai":
case "mistral-text":
case "google-ai":
assignedKey = keyPool.get(body.model, service);
assignedKey = keyPool.get("claude-v1", service, needsMultimodal);
break;
case "openai-text":
assignedKey = keyPool.get("gpt-3.5-turbo-instruct", service);
break;
case "openai-image":
// Use the actual model from the request body instead of defaulting to dall-e-3
// This ensures that gpt-image-1 requests get keys that are verified for gpt-image-1
assignedKey = keyPool.get(body.model, service);
break;
case "openai-responses":
assignedKey = keyPool.get(body.model, service);
assignedKey = keyPool.get("dall-e-3", service);
break;
case "openai":
case "google-ai":
case "mistral-ai":
throw new Error(
`Outbound API ${outboundApi} is not supported for ${inboundApi}`
`add-key should not be called for outbound API ${outboundApi}`
);
default:
assertNever(outboundApi);
}
}
manager.setKey(assignedKey);
req.key = assignedKey;
req.log.info(
{ key: assignedKey.hash, model: body.model, inboundApi, outboundApi },
"Assigned key to request"
@@ -76,42 +66,23 @@ export const addKey: ProxyReqMutator = (manager) => {
// TODO: KeyProvider should assemble all necessary headers
switch (assignedKey.service) {
case "anthropic":
manager.setHeader("X-API-Key", assignedKey.key);
if (!manager.request.headers["anthropic-version"]) {
manager.setHeader("anthropic-version", "2023-06-01");
}
proxyReq.setHeader("X-API-Key", assignedKey.key);
break;
case "openai":
const key: OpenAIKey = assignedKey as OpenAIKey;
if (key.organizationId && !key.key.includes("svcacct")) {
manager.setHeader("OpenAI-Organization", key.organizationId);
if (key.organizationId) {
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "mistral-ai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
proxyReq.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "azure":
const azureKey = assignedKey.key;
manager.setHeader("api-key", azureKey);
break;
case "deepseek":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "xai":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "cohere":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "qwen":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
break;
case "moonshot":
manager.setHeader("Authorization", `Bearer ${assignedKey.key}`);
proxyReq.setHeader("api-key", azureKey);
break;
case "aws":
case "gcp":
case "google-ai":
throw new Error("add-key should not be used for this service.");
default:
@@ -123,8 +94,10 @@ export const addKey: ProxyReqMutator = (manager) => {
* Special case for embeddings requests which don't go through the normal
* request pipeline.
*/
export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
const req = manager.request;
export const addKeyForEmbeddingsRequest: HPMRequestCallback = (
proxyReq,
req
) => {
if (!isEmbeddingsRequest(req)) {
throw new Error(
"addKeyForEmbeddingsRequest called on non-embeddings request"
@@ -135,18 +108,18 @@ export const addKeyForEmbeddingsRequest: ProxyReqMutator = (manager) => {
throw new Error("Embeddings requests must be from OpenAI");
}
manager.setBody({ input: req.body.input, model: "text-embedding-ada-002" });
req.body = { input: req.body.input, model: "text-embedding-ada-002" };
const key = keyPool.get("text-embedding-ada-002", "openai") as OpenAIKey;
manager.setKey(key);
req.key = key;
req.log.info(
{ key: key.hash, toApi: req.outboundApi },
"Assigned Turbo key to embeddings request"
);
manager.setHeader("Authorization", `Bearer ${key.key}`);
proxyReq.setHeader("Authorization", `Bearer ${key.key}`);
if (key.organizationId) {
manager.setHeader("OpenAI-Organization", key.organizationId);
proxyReq.setHeader("OpenAI-Organization", key.organizationId);
}
};
@@ -1,6 +1,6 @@
import { RequestPreprocessor } from "../index";
import { HPMRequestCallback } from "../index";
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai,vip.jewproxy.tech,jewproxy.tech".split(",");
const DISALLOWED_ORIGIN_SUBSTRINGS = "janitorai.com,janitor.ai".split(",");
class ZoomerForbiddenError extends Error {
constructor(message: string) {
@@ -13,8 +13,8 @@ class ZoomerForbiddenError extends Error {
* Blocks requests from Janitor AI users with a fake, scary error message so I
* stop getting emails asking for tech support.
*/
export const blockZoomerOrigins: RequestPreprocessor = (req) => {
const origin = req.headers.origin || req.headers.referer || req.headers.host;
export const blockZoomerOrigins: HPMRequestCallback = (_proxyReq, req) => {
const origin = req.headers.origin || req.headers.referer;
if (origin && DISALLOWED_ORIGIN_SUBSTRINGS.some((s) => origin.includes(s))) {
// Venus-derivatives send a test prompt to check if the proxy is working.
// We don't want to block that just yet.
@@ -1,16 +1,14 @@
import { HPMRequestCallback } from "../index";
import { config } from "../../../../config";
import { ForbiddenError } from "../../../../shared/errors";
import { getModelFamilyForRequest } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
/**
* Ensures the selected model family is enabled by the proxy configuration.
*/
export const validateModelFamily: RequestPreprocessor = (req) => {
**/
export const checkModelFamily: HPMRequestCallback = (_proxyReq, req, res) => {
const family = getModelFamilyForRequest(req);
if (!config.allowedModelFamilies.includes(family)) {
throw new ForbiddenError(
`Model family '${family}' is not enabled on this proxy`
);
throw new ForbiddenError(`Model family '${family}' is not enabled on this proxy`);
}
};
@@ -0,0 +1,23 @@
import { fixRequestBody } from "http-proxy-middleware";
import type { HPMRequestCallback } from "../index";
/** Finalize the rewritten request body. Must be the last rewriter. */
export const finalizeBody: HPMRequestCallback = (proxyReq, req) => {
if (["POST", "PUT", "PATCH"].includes(req.method ?? "") && req.body) {
// For image generation requests, remove stream flag.
if (req.outboundApi === "openai-image") {
delete req.body.stream;
}
// For anthropic text to chat requests, remove undefined prompt.
if (req.outboundApi === "anthropic-chat") {
delete req.body.prompt;
}
const updatedBody = JSON.stringify(req.body);
proxyReq.setHeader("Content-Length", Buffer.byteLength(updatedBody));
(req as any).rawBody = Buffer.from(updatedBody);
// body-parser and http-proxy-middleware don't play nice together
fixRequestBody(proxyReq, req);
}
};
@@ -0,0 +1,26 @@
import type { HPMRequestCallback } from "../index";
/**
* For AWS/Azure/Google requests, the body is signed earlier in the request
* pipeline, before the proxy middleware. This function just assigns the path
* and headers to the proxy request.
*/
export const finalizeSignedRequest: HPMRequestCallback = (proxyReq, req) => {
if (!req.signedRequest) {
throw new Error("Expected req.signedRequest to be set");
}
// The path depends on the selected model and the assigned key's region.
proxyReq.path = req.signedRequest.path;
// Amazon doesn't want extra headers, so we need to remove all of them and
// reassign only the ones specified in the signed request.
proxyReq.getRawHeaderNames().forEach(proxyReq.removeHeader.bind(proxyReq));
Object.entries(req.signedRequest.headers).forEach(([key, value]) => {
proxyReq.setHeader(key, value);
});
// Don't use fixRequestBody here because it adds a content-length header.
// Amazon doesn't want that and it breaks the signature.
proxyReq.write(req.signedRequest.body);
};
@@ -0,0 +1,21 @@
import { HPMRequestCallback } from "../index";
/**
* Removes origin and referer headers before sending the request to the API for
* privacy reasons.
**/
export const stripHeaders: HPMRequestCallback = (proxyReq) => {
proxyReq.setHeader("origin", "");
proxyReq.setHeader("referer", "");
proxyReq.removeHeader("tailscale-user-login");
proxyReq.removeHeader("tailscale-user-name");
proxyReq.removeHeader("tailscale-headers-info");
proxyReq.removeHeader("tailscale-user-profile-pic")
proxyReq.removeHeader("cf-connecting-ip");
proxyReq.removeHeader("forwarded");
proxyReq.removeHeader("true-client-ip");
proxyReq.removeHeader("x-forwarded-for");
proxyReq.removeHeader("x-forwarded-host");
proxyReq.removeHeader("x-forwarded-proto");
proxyReq.removeHeader("x-real-ip");
};
@@ -4,15 +4,12 @@ import { initializeSseStream } from "../../../shared/streaming";
import { classifyErrorAndSend } from "../common";
import {
RequestPreprocessor,
blockZoomerOrigins,
countPromptTokens,
languageFilter,
setApiFormat,
transformOutboundPayload,
validateContextSize,
validateModelFamily,
validateVision,
applyQuotaLimits,
} from ".";
type RequestPreprocessorOptions = {
@@ -33,15 +30,14 @@ type RequestPreprocessorOptions = {
/**
* Returns a middleware function that processes the request body into the given
* API format, and then sequentially runs the given additional preprocessors.
* These should be used for validation and transformations that only need to
* happen once per request.
*
* These run first in the request lifecycle, a single time per request before it
* is added to the request queue. They aren't run again if the request is
* re-attempted after a rate limit.
*
* To run functions against requests every time they are re-attempted, write a
* ProxyReqMutator and pass it to createQueuedProxyMiddleware instead.
* To run a preprocessor on every re-attempt, pass it to createQueueMiddleware.
* It will run after these preprocessors, but before the request is sent to
* http-proxy-middleware.
*/
export const createPreprocessorMiddleware = (
apiFormat: Parameters<typeof setApiFormat>[0],
@@ -49,7 +45,6 @@ export const createPreprocessorMiddleware = (
): RequestHandler => {
const preprocessors: RequestPreprocessor[] = [
setApiFormat(apiFormat),
blockZoomerOrigins,
...(beforeTransform ?? []),
transformOutboundPayload,
countPromptTokens,
@@ -57,8 +52,6 @@ export const createPreprocessorMiddleware = (
...(afterTransform ?? []),
validateContextSize,
validateVision,
validateModelFamily,
applyQuotaLimits,
];
return async (...args) => executePreprocessors(preprocessors, args);
};
@@ -90,10 +83,10 @@ async function executePreprocessors(
next();
} catch (error) {
if (error.constructor.name === "ZodError") {
const issues = error?.issues
?.map((issue: ZodIssue) => `${issue.path.join(".")}: ${issue.message}`)
const msg = error?.issues
?.map((issue: ZodIssue) => issue.message)
.join("; ");
req.log.warn({ issues }, "Prompt failed preprocessor validation.");
req.log.info(msg, "Prompt validation failed.");
} else {
req.log.error(error, "Error while executing request preprocessor");
}
@@ -143,21 +136,14 @@ const handleTestMessage: RequestHandler = (req, res) => {
completion: "Hello!",
// anthropic chat
content: [{ type: "text", text: "Hello!" }],
// gemini
candidates: [
{
content: { parts: [{ text: "Hello!" }] },
finishReason: "stop",
},
],
proxy_note:
"SillyTavern connection test detected. Your prompt was not sent to the actual model and this response was generated by the proxy.",
"This response was generated by the proxy's test message handler and did not go to the API.",
});
}
};
function isTestMessage(body: any) {
const { messages, prompt, contents } = body;
const { messages, prompt } = body;
if (messages) {
return (
@@ -165,8 +151,6 @@ function isTestMessage(body: any) {
messages[0].role === "user" &&
messages[0].content === "Hi"
);
} else if (contents) {
return contents.length === 1 && contents[0].parts[0]?.text === "Hi";
} else {
return (
prompt?.trim() === "Human: Hi\n\nAssistant:" ||
@@ -3,16 +3,14 @@ import {
AzureOpenAIKey,
keyPool,
} from "../../../../shared/key-management";
import { ProxyReqMutator } from "../index";
import { RequestPreprocessor } from "../index";
export const addAzureKey: ProxyReqMutator = async (manager) => {
const req = manager.request;
export const addAzureKey: RequestPreprocessor = (req) => {
const validAPIs: APIFormat[] = ["openai", "openai-image"];
const apisValid = [req.outboundApi, req.inboundApi].every((api) =>
validAPIs.includes(api)
);
const serviceValid = req.service === "azure";
if (!apisValid || !serviceValid) {
throw new Error("addAzureKey called on invalid request");
}
@@ -24,15 +22,11 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
const model = req.body.model.startsWith("azure-")
? req.body.model
: `azure-${req.body.model}`;
// TODO: untracked mutation to body, I think this should just be a
// RequestPreprocessor because we don't need to do it every dequeue.
req.key = keyPool.get(model, "azure");
req.body.model = model;
const key = keyPool.get(model, "azure");
manager.setKey(key);
// Handles the sole Azure API deviation from the OpenAI spec (that I know of)
// TODO: this should also probably be a RequestPreprocessor
const notNullOrUndefined = (x: any) => x !== null && x !== undefined;
if ([req.body.logprobs, req.body.top_logprobs].some(notNullOrUndefined)) {
// OpenAI wants logprobs: true/false and top_logprobs: number
@@ -49,7 +43,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
}
req.log.info(
{ key: key.hash, model },
{ key: req.key.hash, model },
"Assigned Azure OpenAI key to request"
);
@@ -61,7 +55,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
const apiVersion =
req.outboundApi === "openai" ? "2023-09-01-preview" : "2024-02-15-preview";
manager.setSignedRequest({
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: `${resourceName}.openai.azure.com`,
@@ -72,7 +66,7 @@ export const addAzureKey: ProxyReqMutator = async (manager) => {
["api-key"]: apiKey,
},
body: JSON.stringify(req.body),
});
};
};
function getCredentialsFromKey(key: AzureOpenAIKey) {
@@ -0,0 +1,40 @@
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
export const addGoogleAIKey: RequestPreprocessor = (req) => {
const apisValid = req.inboundApi === "openai" && req.outboundApi === "google-ai";
const serviceValid = req.service === "google-ai";
if (!apisValid || !serviceValid) {
throw new Error("addGoogleAIKey called on invalid request");
}
if (!req.body?.model) {
throw new Error("You must specify a model with your request.");
}
const model = req.body.model;
req.key = keyPool.get(model, "google-ai");
req.log.info(
{ key: req.key.hash, model },
"Assigned Google AI API key to request"
);
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:generateContent?key=$API_KEY
// https://generativelanguage.googleapis.com/v1beta/models/$MODEL_ID:streamGenerateContent?key=${API_KEY}
req.isStreaming = req.isStreaming || req.body.stream;
delete req.body.stream;
req.signedRequest = {
method: "POST",
protocol: "https:",
hostname: "generativelanguage.googleapis.com",
path: `/v1beta/models/${model}:${req.isStreaming ? "streamGenerateContent" : "generateContent"}?key=${req.key.key}`,
headers: {
["host"]: `generativelanguage.googleapis.com`,
["content-type"]: "application/json",
},
body: JSON.stringify(req.body),
};
};
@@ -1,6 +1,6 @@
import { hasAvailableQuota } from "../../../../shared/users/user-store";
import { isImageGenerationRequest, isTextGenerationRequest } from "../../common";
import { RequestPreprocessor } from "../index";
import { HPMRequestCallback } from "../index";
export class QuotaExceededError extends Error {
public quotaInfo: any;
@@ -11,7 +11,7 @@ export class QuotaExceededError extends Error {
}
}
export const applyQuotaLimits: RequestPreprocessor = (req) => {
export const applyQuotaLimits: HPMRequestCallback = (_proxyReq, req) => {
const subjectToQuota =
isTextGenerationRequest(req) || isImageGenerationRequest(req);
if (!subjectToQuota || !req.user) return;
@@ -34,4 +34,4 @@ export const applyQuotaLimits: RequestPreprocessor = (req) => {
}
);
}
};
};
@@ -1,18 +1,12 @@
import { RequestPreprocessor } from "../index";
import { countTokens } from "../../../../shared/tokenization";
import { assertNever } from "../../../../shared/utils";
import { OpenAIChatMessage } from "../../../../shared/api-schemas";
import { GoogleAIChatMessage } from "../../../../shared/api-schemas/google-ai";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas/anthropic";
import {
MistralAIChatMessage,
ContentItem,
isMistralVisionModel
} from "../../../../shared/api-schemas/mistral-ai";
import { isGrokVisionModel } from "../../../../shared/api-schemas/xai";
GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../../shared/api-schemas";
/**
* Given a request with an already-transformed body, counts the number of
@@ -24,13 +18,7 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
switch (service) {
case "openai": {
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
}
case "openai-responses": {
req.outputTokens = req.body.max_completion_tokens || req.body.max_tokens;
req.outputTokens = req.body.max_tokens;
const prompt: OpenAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
break;
@@ -43,13 +31,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
}
case "anthropic-chat": {
req.outputTokens = req.body.max_tokens;
let system = req.body.system ?? "";
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
}
const prompt = { system, messages: req.body.messages };
const prompt = {
system: req.body.system ?? "",
messages: req.body.messages,
};
result = await countTokens({ req, prompt, service });
break;
}
@@ -65,50 +50,10 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, prompt, service });
break;
}
case "mistral-ai":
case "mistral-text": {
case "mistral-ai": {
req.outputTokens = req.body.max_tokens;
// Handle multimodal content (vision) in Mistral models
const isVisionModel = isMistralVisionModel(req.body.model);
const messages = req.body.messages;
// Check if this is a vision request with images
const hasImageContent = Array.isArray(messages) && messages.some(
(msg: MistralAIChatMessage) => Array.isArray(msg.content) &&
msg.content.some((item: ContentItem) => item.type === "image_url")
);
// For vision content, we add a fixed token count per image
// This is an estimate as the actual token count depends on image size and complexity
const TOKENS_PER_IMAGE = 1200; // Conservative estimate
let imageTokens = 0;
if (hasImageContent && Array.isArray(messages)) {
// Count images in the request
for (const msg of messages) {
if (Array.isArray(msg.content)) {
const imageCount = msg.content.filter(
(item: ContentItem) => item.type === "image_url"
).length;
imageTokens += imageCount * TOKENS_PER_IMAGE;
}
}
req.log.debug(
{ imageCount: imageTokens / TOKENS_PER_IMAGE, tokenEstimate: imageTokens },
"Estimated token count for Mistral vision images"
);
}
const prompt: string | MistralAIChatMessage[] = messages ?? req.body.prompt;
const prompt: MistralAIChatMessage[] = req.body.messages;
result = await countTokens({ req, prompt, service });
// Add the image tokens to the total count
if (imageTokens > 0) {
result.token_count += imageTokens;
}
break;
}
case "openai-image": {
@@ -116,10 +61,6 @@ export const countPromptTokens: RequestPreprocessor = async (req) => {
result = await countTokens({ req, service });
break;
}
// Handle XAI (Grok) vision models
// Since it uses the OpenAI API format, it's caught in the "openai" case,
// but we need to add additional handling for image tokens after that
default:
assertNever(service);
}
@@ -1,5 +1,4 @@
import { Request } from "express";
import { z } from "zod";
import { config } from "../../../../config";
import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
@@ -9,7 +8,6 @@ import {
OpenAIChatMessage,
flattenAnthropicMessages,
} from "../../../../shared/api-schemas";
import { GoogleAIV1GenerateContentSchema } from "../../../../shared/api-schemas/google-ai";
const rejectedClients = new Map<string, number>();
@@ -52,16 +50,14 @@ export const languageFilter: RequestPreprocessor = async (req) => {
}
};
/*
TODO: this is not type safe and does not raise errors if request body zod schema
is changed.
*/
function getPromptFromRequest(req: Request) {
const service = req.outboundApi;
const body = req.body;
switch (service) {
case "anthropic-chat":
return flattenAnthropicMessages(body.messages);
case "anthropic-text":
return body.prompt;
case "openai":
case "mistral-ai":
return body.messages
@@ -76,19 +72,11 @@ function getPromptFromRequest(req: Request) {
return `${msg.role}: ${text}`;
})
.join("\n\n");
case "anthropic-text":
case "openai-text":
case "openai-responses":
case "openai-image":
case "mistral-text":
return body.prompt;
case "google-ai": {
const b = body as z.infer<typeof GoogleAIV1GenerateContentSchema>;
return [
b.systemInstruction?.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text),
...b.contents.flatMap((c) => c.parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text)),
].join("\n");
}
case "google-ai":
return body.prompt.text;
default:
assertNever(service);
}
@@ -4,22 +4,8 @@ import { LLMService } from "../../../../shared/models";
import { RequestPreprocessor } from "../index";
export const setApiFormat = (api: {
/**
* The API format the user made the request in and expects the response to be
* in.
*/
inApi: Request["inboundApi"];
/**
* The API format the proxy will make the request in and expects the response
* to be in. If different from `inApi`, the proxy will transform the user's
* request body to this format, and will transform the response body or stream
* events from this format.
*/
outApi: APIFormat;
/**
* The service the request will be sent to, which determines authentication
* and possibly the streaming transport.
*/
service: LLMService;
}): RequestPreprocessor => {
return function configureRequestApiFormat(req) {
@@ -0,0 +1,130 @@
import express from "express";
import { Sha256 } from "@aws-crypto/sha256-js";
import { SignatureV4 } from "@smithy/signature-v4";
import { HttpRequest } from "@smithy/protocol-http";
import {
AnthropicV1TextSchema,
AnthropicV1MessagesSchema,
} from "../../../../shared/api-schemas";
import { keyPool } from "../../../../shared/key-management";
import { RequestPreprocessor } from "../index";
const AMZ_HOST =
process.env.AMZ_HOST || "bedrock-runtime.%REGION%.amazonaws.com";
/**
* Signs an outgoing AWS request with the appropriate headers modifies the
* request object in place to fix the path.
* This happens AFTER request transformation.
*/
export const signAwsRequest: RequestPreprocessor = async (req) => {
const { model, stream } = req.body;
req.key = keyPool.get(model, "aws");
req.isStreaming = stream === true || stream === "true";
// same as addAnthropicPreamble for non-AWS requests, but has to happen here
if (req.outboundApi === "anthropic-text") {
let preamble = req.body.prompt.startsWith("\n\nHuman:") ? "" : "\n\nHuman:";
req.body.prompt = preamble + req.body.prompt;
}
// AWS uses mostly the same parameters as Anthropic, with a few removed params
// and much stricter validation on unused parameters. Rather than treating it
// as a separate schema we will use the anthropic ones and strip the unused
// parameters.
// TODO: This should happen in transform-outbound-payload.ts
let strippedParams: Record<string, unknown>;
if (req.outboundApi === "anthropic-chat") {
strippedParams = AnthropicV1MessagesSchema.pick({
messages: true,
system: true,
max_tokens: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
strippedParams.anthropic_version = "bedrock-2023-05-31";
} else {
strippedParams = AnthropicV1TextSchema.pick({
prompt: true,
max_tokens_to_sample: true,
stop_sequences: true,
temperature: true,
top_k: true,
top_p: true,
})
.strip()
.parse(req.body);
}
const credential = getCredentialParts(req);
const host = AMZ_HOST.replace("%REGION%", credential.region);
// AWS only uses 2023-06-01 and does not actually check this header, but we
// set it so that the stream adapter always selects the correct transformer.
req.headers["anthropic-version"] = "2023-06-01";
// Uses the AWS SDK to sign a request, then modifies our HPM proxy request
// with the headers generated by the SDK.
const newRequest = new HttpRequest({
method: "POST",
protocol: "https:",
hostname: host,
path: `/model/${model}/invoke${stream ? "-with-response-stream" : ""}`,
headers: {
["Host"]: host,
["content-type"]: "application/json",
},
body: JSON.stringify(strippedParams),
});
if (stream) {
newRequest.headers["x-amzn-bedrock-accept"] = "application/json";
} else {
newRequest.headers["accept"] = "*/*";
}
const { key, body, inboundApi, outboundApi } = req;
req.log.info(
{ key: key.hash, model: body.model, inboundApi, outboundApi },
"Assigned AWS credentials to request"
);
req.signedRequest = await sign(newRequest, getCredentialParts(req));
};
type Credential = {
accessKeyId: string;
secretAccessKey: string;
region: string;
};
function getCredentialParts(req: express.Request): Credential {
const [accessKeyId, secretAccessKey, region] = req.key!.key.split(":");
if (!accessKeyId || !secretAccessKey || !region) {
req.log.error(
{ key: req.key!.hash },
"AWS_CREDENTIALS isn't correctly formatted; refer to the docs"
);
throw new Error("The key assigned to this request is invalid.");
}
return { accessKeyId, secretAccessKey, region };
}
async function sign(request: HttpRequest, credential: Credential) {
const { accessKeyId, secretAccessKey, region } = credential;
const signer = new SignatureV4({
sha256: Sha256,
credentials: { accessKeyId, secretAccessKey },
region,
service: "bedrock",
});
return signer.sign(request);
}
@@ -1,10 +1,9 @@
import { Request } from "express";
import {
API_REQUEST_VALIDATORS,
API_REQUEST_TRANSFORMERS,
} from "../../../../shared/api-schemas";
import { BadRequestError } from "../../../../shared/errors";
import { fixMistralPrompt, isMistralVisionModel } from "../../../../shared/api-schemas/mistral-ai";
import { fixMistralPrompt } from "../../../../shared/api-schemas/mistral-ai";
import {
isImageGenerationRequest,
isTextGenerationRequest,
@@ -13,41 +12,41 @@ import { RequestPreprocessor } from "../index";
/** Transforms an incoming request body to one that matches the target API. */
export const transformOutboundPayload: RequestPreprocessor = async (req) => {
const sameService = req.inboundApi === req.outboundApi;
const alreadyTransformed = req.retryCount > 0;
const notTransformable =
!isTextGenerationRequest(req) && !isImageGenerationRequest(req);
if (alreadyTransformed) {
return;
} else if (notTransformable) {
// This is probably an indication of a bug in the proxy.
const { inboundApi, outboundApi, method, path } = req;
req.log.warn(
{ inboundApi, outboundApi, method, path },
"`transformOutboundPayload` called on a non-transformable request."
if (alreadyTransformed || notTransformable) return;
// TODO: this should be an APIFormatTransformer
if (req.inboundApi === "mistral-ai") {
const messages = req.body.messages;
req.body.messages = fixMistralPrompt(messages);
req.log.info(
{ old: messages.length, new: req.body.messages.length },
"Fixed Mistral prompt"
);
}
if (sameService) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].safeParse(req.body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body: req.body },
"Request validation failed"
);
throw result.error;
}
req.body = result.data;
return;
}
applyMistralPromptFixes(req);
applyGoogleAIKeyTransforms(req);
applyOpenAIResponsesTransform(req);
// Native prompts are those which were already provided by the client in the
// target API format. We don't need to transform them.
const isNativePrompt = req.inboundApi === req.outboundApi;
if (isNativePrompt) {
const result = API_REQUEST_VALIDATORS[req.inboundApi].parse(req.body);
req.body = result;
return;
}
// Prompt requires translation from one API format to another.
const transformation = `${req.inboundApi}->${req.outboundApi}` as const;
const transFn = API_REQUEST_TRANSFORMERS[transformation];
if (transFn) {
req.log.info({ transformation }, "Transforming request...");
req.log.info({ transformation }, "Transforming request");
req.body = await transFn(req);
return;
}
@@ -56,182 +55,3 @@ export const transformOutboundPayload: RequestPreprocessor = async (req) => {
`${transformation} proxying is not supported. Make sure your client is configured to send requests in the correct format and to the correct endpoint.`
);
};
// Handle OpenAI Responses API transformation
function applyOpenAIResponsesTransform(req: Request): void {
if (req.outboundApi === "openai-responses") {
req.log.info("Transforming request to OpenAI Responses API format");
// Store the original body for reference if needed
const originalBody = { ...req.body };
// Map standard OpenAI chat completions format to Responses API format
// The main differences are:
// 1. Endpoint is /v1/responses instead of /v1/chat/completions
// 2. 'messages' field moves to 'input.messages'
// Move messages to input.messages
if (req.body.messages && !req.body.input) {
req.body.input = {
messages: req.body.messages
};
delete req.body.messages;
}
// Keep all the original properties of the request but ensure compatibility
// with Responses API specifics
if (!req.body.previousResponseId && req.body.conversation_id) {
req.body.previousResponseId = req.body.conversation_id;
delete req.body.conversation_id;
}
// Convert max_tokens to max_output_tokens if present and not already set
if (req.body.max_tokens && !req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens;
delete req.body.max_tokens;
}
// Set the correct tools format if needed
if (req.body.tools) {
// Tools structure is maintained but might need conversion if non-standard
if (!req.body.tools.some((tool: any) => tool.type === "function" || tool.type === "web_search")) {
req.body.tools = req.body.tools.map((tool: any) => ({
...tool,
type: tool.type || "function"
}));
}
}
req.log.info({
originalModel: originalBody.model,
newFormat: "openai-responses"
}, "Successfully transformed request to Responses API format");
}
}
// handles weird cases that don't fit into our abstractions
function applyMistralPromptFixes(req: Request): void {
if (req.inboundApi === "mistral-ai") {
// Mistral Chat is very similar to OpenAI but not identical and many clients
// don't properly handle the differences. We will try to validate the
// mistral prompt and try to fix it if it fails. It will be re-validated
// after this function returns.
const result = API_REQUEST_VALIDATORS["mistral-ai"].parse(req.body);
// Check if this is a vision model request
const isVisionModel = isMistralVisionModel(req.body.model);
// Check if the request contains image content
const hasImageContent = result.messages?.some((msg: {content: string | any[]}) =>
Array.isArray(msg.content) &&
msg.content.some((item: any) => item.type === "image_url")
);
// For vision requests, normalize the image_url format
if (hasImageContent && Array.isArray(result.messages)) {
// Process each message with image content
result.messages.forEach((msg: any) => {
if (Array.isArray(msg.content)) {
// Process each content item
msg.content.forEach((item: any) => {
if (item.type === "image_url") {
// Normalize the image_url field to a string format that Mistral expects
if (typeof item.image_url === "object") {
// If it's an object, extract the URL or base64 data
if (item.image_url.url) {
item.image_url = item.image_url.url;
} else if (item.image_url.data) {
item.image_url = item.image_url.data;
}
req.log.info(
{ model: req.body.model },
"Normalized object-format image_url to string format"
);
}
}
});
}
});
}
// Apply Mistral prompt fixes while preserving multimodal content
req.body.messages = fixMistralPrompt(result.messages);
req.log.info(
{
n: req.body.messages.length,
prev: result.messages.length,
isVisionModel,
hasImageContent
},
"Applied Mistral chat prompt fixes."
);
// If this is a vision model with image content, it MUST use the chat API
// and cannot be converted to text completions
if (hasImageContent) {
req.log.info(
{ model: req.body.model },
"Detected Mistral vision request with image content. Keeping as chat format."
);
return;
}
// If the prompt relies on `prefix: true` for the last message, we need to
// convert it to a text completions request because AWS Mistral support for
// this feature is broken.
// On Mistral La Plateforme, we can't do this because they don't expose
// a text completions endpoint.
const { messages } = req.body;
const lastMessage = messages && messages[messages.length - 1];
if (lastMessage?.role === "assistant" && req.service === "aws") {
// enable prefix if client forgot, otherwise the template will insert an
// eos token which is very unlikely to be what the client wants.
lastMessage.prefix = true;
req.outboundApi = "mistral-text";
req.log.info(
"Native Mistral chat prompt relies on assistant message prefix. Converting to text completions request."
);
}
}
}
function toCamelCase(str: string): string {
return str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
}
function transformKeysToCamelCase(obj: any, hasTransformed = { value: false }): any {
if (Array.isArray(obj)) {
return obj.map(item => transformKeysToCamelCase(item, hasTransformed));
}
if (obj !== null && typeof obj === 'object') {
return Object.fromEntries(
Object.entries(obj).map(([key, value]) => {
const camelKey = toCamelCase(key);
if (camelKey !== key) {
hasTransformed.value = true;
}
return [
camelKey,
transformKeysToCamelCase(value, hasTransformed)
];
})
);
}
return obj;
}
function applyGoogleAIKeyTransforms(req: Request): void {
// Google (Gemini) API in their infinite wisdom accepts both snake_case and camelCase
// for some params even though in the docs they use snake_case.
// Some frontends (e.g. ST) use snake_case and camelCase so we normalize all keys to camelCase
if (req.outboundApi === "google-ai") {
const hasTransformed = { value: false };
req.body = transformKeysToCamelCase(req.body, hasTransformed);
if (hasTransformed.value) {
req.log.info("Applied Gemini camelCase -> snake_case transform");
}
}
}
@@ -6,9 +6,8 @@ import { RequestPreprocessor } from "../index";
const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
// todo: make configurable
const GOOGLE_AI_MAX_CONTEXT = 2048000;
const MISTRAL_AI_MAX_CONTENT = 131072;
const GOOGLE_AI_MAX_CONTEXT = 32000;
const MISTRAL_AI_MAX_CONTENT = 32768;
/**
* Assigns `req.promptTokens` and `req.outputTokens` based on the request body
@@ -28,7 +27,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
switch (req.outboundApi) {
case "openai":
case "openai-text":
case "openai-responses":
proxyMax = OPENAI_MAX_CONTEXT;
break;
case "anthropic-chat":
@@ -39,7 +37,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
proxyMax = GOOGLE_AI_MAX_CONTEXT;
break;
case "mistral-ai":
case "mistral-text":
proxyMax = MISTRAL_AI_MAX_CONTENT;
break;
case "openai-image":
@@ -59,24 +56,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 16384;
} else if (model.match(/^gpt-4o/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4.5/)) {
modelMax = 128000;
} else if (model.match(/^gpt-4\.1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-4\.1-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 1000000;
} else if (model.match(/^gpt-5(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-nano(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 400000;
} else if (model.match(/^gpt-5-chat-latest$/)) {
modelMax = 400000;
} else if (model.match(/^chatgpt-4o/)) {
modelMax = 128000;
} else if (model.match(/gpt-4-turbo(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 131072;
} else if (model.match(/gpt-4-turbo(-preview)?$/)) {
@@ -85,24 +64,6 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 131072;
} else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
modelMax = 131072;
} else if (model.match(/^o3-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o4-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^codex-mini(-latest|-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000; // 200k context window for codex-mini-latest
} else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/^o1-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o3-pro(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 200000;
} else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
modelMax = 128000;
} else if (model.match(/gpt-3.5-turbo/)) {
modelMax = 16384;
} else if (model.match(/gpt-4-32k/)) {
@@ -119,43 +80,17 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
modelMax = 200000;
} else if (model.match(/^claude-3/)) {
modelMax = 200000;
} else if (model.match(/^claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^gemini-/)) {
modelMax = 1024000;
} else if (model.match(/^gemini-\d{3}$/)) {
modelMax = GOOGLE_AI_MAX_CONTEXT;
} else if (model.match(/^mistral-(tiny|small|medium)$/)) {
modelMax = MISTRAL_AI_MAX_CONTENT;
} else if (model.match(/^anthropic\.claude-3/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-(?:sonnet|opus)-4/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude-v2:\d/)) {
modelMax = 200000;
} else if (model.match(/^anthropic\.claude/)) {
// Not sure if AWS Claude has the same context limit as Anthropic Claude.
modelMax = 100000;
} else if (model.match(/^deepseek/)) {
modelMax = 64000;
} else if (model.match(/^kimi-k2/)) {
// Kimi K2 models have 131k context window
modelMax = 131000;
} else if (model.match(/moonshot/)) {
// Moonshot models typically have 200k context window
modelMax = 200000;
} else if (model.match(/command[\w-]*-03-202[0-9]/)) {
// Cohere's command-a-03 models have 256k context window
modelMax = 256000;
} else if (model.match(/command/) || model.match(/cohere/)) {
// Default for all other Cohere models
modelMax = 128000;
} else if (model.match(/^grok-4/)) {
modelMax = 256000;
} else if (model.match(/^grok/)) {
modelMax = 128000;
} else if (model.match(/^magistral/)) {
modelMax = 40000;
} else if (model.match(/tral/)) {
// catches mistral, mixtral, codestral, mathstral, etc. mistral models have
// no name convention and wildly different context windows so this is a
// catch-all
modelMax = MISTRAL_AI_MAX_CONTENT;
} else {
req.log.warn({ model }, "Unknown model, using 200k token limit.");
modelMax = 200000;
@@ -191,4 +126,4 @@ function assertRequestHasTokenCounts(
})
.nonstrict()
.parse({ promptTokens: req.promptTokens, outputTokens: req.outputTokens });
}
}
@@ -3,7 +3,6 @@ import { assertNever } from "../../../../shared/utils";
import { RequestPreprocessor } from "../index";
import { containsImageContent as containsImageContentOpenAI } from "../../../../shared/api-schemas/openai";
import { containsImageContent as containsImageContentAnthropic } from "../../../../shared/api-schemas/anthropic";
import { containsImageContent as containsImageContentGoogleAI } from "../../../../shared/api-schemas/google-ai";
import { ForbiddenError } from "../../../../shared/errors";
/**
@@ -23,18 +22,12 @@ export const validateVision: RequestPreprocessor = async (req) => {
case "openai":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "openai-responses":
hasImage = containsImageContentOpenAI(req.body.messages);
break;
case "anthropic-chat":
hasImage = containsImageContentAnthropic(req.body.messages);
break;
case "google-ai":
hasImage = containsImageContentGoogleAI(req.body.contents);
break;
case "anthropic-text":
case "google-ai":
case "mistral-ai":
case "mistral-text":
case "openai-image":
case "openai-text":
return;
@@ -1,135 +0,0 @@
import { Request, Response } from "express";
import http from "http";
import ProxyServer from "http-proxy";
import { Readable } from "stream";
import {
createProxyMiddleware,
Options,
debugProxyErrorsPlugin,
proxyEventsPlugin,
} from "http-proxy-middleware";
import { ProxyReqMutator, stripHeaders } from "./index";
import { createOnProxyResHandler, ProxyResHandlerWithBody } from "../response";
import { createQueueMiddleware } from "../../queue";
import { getHttpAgents } from "../../../shared/network";
import { classifyErrorAndSend } from "../common";
/**
* Options for the `createQueuedProxyMiddleware` factory function.
*/
type ProxyMiddlewareFactoryOptions = {
/**
* Functions which receive a ProxyReqManager and can modify the request before
* it is proxied. The modifications will be automatically reverted if the
* request needs to be returned to the queue.
*/
mutations?: ProxyReqMutator[];
/**
* The target URL to proxy requests to. This can be a string or a function
* which accepts the request and returns a string.
*/
target: string | Options<Request>["router"];
/**
* A function which receives the proxy response and the JSON-decoded request
* body. Only fired for non-streaming responses; streaming responses are
* handled in `handle-streaming-response.ts`.
*/
blockingResponseHandler?: ProxyResHandlerWithBody;
};
/**
* Returns a middleware function that accepts incoming requests and places them
* into the request queue. When the request is dequeued, it is proxied to the
* target URL using the given options and middleware. Non-streaming responses
* are handled by the given `blockingResponseHandler`.
*/
export function createQueuedProxyMiddleware({
target,
mutations,
blockingResponseHandler,
}: ProxyMiddlewareFactoryOptions) {
const hpmTarget = typeof target === "string" ? target : "https://setbyrouter";
const hpmRouter = typeof target === "function" ? target : undefined;
const [httpAgent, httpsAgent] = getHttpAgents();
const agent = hpmTarget.startsWith("http:") ? httpAgent : httpsAgent;
const proxyMiddleware = createProxyMiddleware<Request, Response>({
target: hpmTarget,
router: hpmRouter,
agent,
changeOrigin: true,
toProxy: true,
selfHandleResponse: typeof blockingResponseHandler === "function",
// Disable HPM logger plugin (requires re-adding the other default plugins).
// Contrary to name, debugProxyErrorsPlugin is not just for debugging and
// fixes several error handling/connection close issues in http-proxy core.
ejectPlugins: true,
// Inferred (via Options<express.Request>) as Plugin<express.Request>, but
// the default plugins only allow http.IncomingMessage for TReq. They are
// compatible with express.Request, so we can use them. `Plugin` type is not
// exported for some reason.
plugins: [
debugProxyErrorsPlugin,
pinoLoggerPlugin,
proxyEventsPlugin,
] as any,
on: {
proxyRes: createOnProxyResHandler(
blockingResponseHandler ? [blockingResponseHandler] : []
),
error: classifyErrorAndSend,
},
buffer: ((req: Request) => {
// This is a hack/monkey patch and is not part of the official
// http-proxy-middleware package. See patches/http-proxy+1.18.1.patch.
let payload = req.body;
if (typeof payload === "string") {
payload = Buffer.from(payload);
}
const stream = new Readable();
stream.push(payload);
stream.push(null);
return stream;
}) as any,
});
return createQueueMiddleware({
mutations: [stripHeaders, ...(mutations ?? [])],
proxyMiddleware,
});
}
type ProxiedResponse = http.IncomingMessage & Response & any;
function pinoLoggerPlugin(proxyServer: ProxyServer<Request>) {
proxyServer.on("error", (err, req, res, target) => {
req.log.error(
{ originalUrl: req.originalUrl, targetUrl: String(target), err },
"Error occurred while proxying request to target"
);
});
proxyServer.on("proxyReq", (proxyReq, req) => {
const { protocol, host, path } = proxyReq;
req.log.info(
{
from: req.originalUrl,
to: `${protocol}//${host}${path}`,
},
"Sending request to upstream API..."
);
});
proxyServer.on("proxyRes", (proxyRes: ProxiedResponse, req, _res) => {
const { protocol, host, path } = proxyRes.req;
req.log.info(
{
target: `${protocol}//${host}${path}`,
status: proxyRes.statusCode,
contentType: proxyRes.headers["content-type"],
contentEncoding: proxyRes.headers["content-encoding"],
contentLength: proxyRes.headers["content-length"],
transferEncoding: proxyRes.headers["transfer-encoding"],
},
"Got response from upstream API."
);
});
}
@@ -1,112 +0,0 @@
import { Request } from "express";
import { Key } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
/**
* Represents a change to the request that will be reverted if the request
* fails.
*/
interface ProxyReqMutation {
target: "header" | "path" | "body" | "api-key" | "signed-request";
key?: string;
originalValue: any | undefined;
}
/**
* Manages a request's headers, body, and path, allowing them to be modified
* before the request is proxied and automatically reverted if the request
* needs to be retried.
*/
export class ProxyReqManager {
private req: Request;
private mutations: ProxyReqMutation[] = [];
/**
* A read-only proxy of the request object. Avoid changing any properties
* here as they will persist across retries.
*/
public readonly request: Readonly<Request>;
constructor(req: Request) {
this.req = req;
this.request = new Proxy(req, {
get: (target, prop) => {
if (typeof prop === "string") return target[prop as keyof Request];
return undefined;
},
});
}
setHeader(name: string, newValue: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
this.req.headers[name.toLowerCase()] = newValue;
}
removeHeader(name: string): void {
const originalValue = this.req.get(name);
this.mutations.push({ target: "header", key: name, originalValue });
delete this.req.headers[name.toLowerCase()];
}
setBody(newBody: any): void {
const originalValue = this.req.body;
this.mutations.push({ target: "body", key: "body", originalValue });
this.req.body = newBody;
}
setKey(newKey: Key): void {
const originalValue = this.req.key;
this.mutations.push({ target: "api-key", key: "key", originalValue });
this.req.key = newKey;
}
setPath(newPath: string): void {
const originalValue = this.req.path;
this.mutations.push({ target: "path", key: "path", originalValue });
this.req.url = newPath;
}
setSignedRequest(newSignedRequest: typeof this.req.signedRequest): void {
const originalValue = this.req.signedRequest;
this.mutations.push({ target: "signed-request", key: "signedRequest", originalValue });
this.req.signedRequest = newSignedRequest;
}
hasChanged(): boolean {
return this.mutations.length > 0;
}
revert(): void {
for (const mutation of this.mutations.reverse()) {
switch (mutation.target) {
case "header":
if (mutation.originalValue === undefined) {
delete this.req.headers[mutation.key!.toLowerCase()];
continue;
} else {
this.req.headers[mutation.key!.toLowerCase()] =
mutation.originalValue;
}
break;
case "path":
this.req.url = mutation.originalValue;
break;
case "body":
this.req.body = mutation.originalValue;
break;
case "api-key":
// We don't reset the key here because it's not a property of the
// inbound request, so we'd only ever be reverting it to null.
break;
case "signed-request":
this.req.signedRequest = mutation.originalValue;
break;
default:
assertNever(mutation.target);
}
}
this.mutations = [];
}
}
@@ -1,36 +0,0 @@
import util from "util";
import zlib from "zlib";
import { PassThrough } from "stream";
const BUFFER_DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
text: (data: Buffer) => data,
};
const STREAM_DECODER_MAP = {
gzip: zlib.createGunzip,
deflate: zlib.createInflate,
br: zlib.createBrotliDecompress,
text: () => new PassThrough(),
};
type SupportedContentEncoding = keyof typeof BUFFER_DECODER_MAP;
const isSupportedContentEncoding = (
encoding: string
): encoding is SupportedContentEncoding => encoding in BUFFER_DECODER_MAP;
export async function decompressBuffer(buf: Buffer, encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return (await BUFFER_DECODER_MAP[encoding](buf)).toString();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
export function getStreamDecompressor(encoding: string = "text") {
if (isSupportedContentEncoding(encoding)) {
return STREAM_DECODER_MAP[encoding]();
}
throw new Error(`Unsupported content-encoding: ${encoding}`);
}
+56 -124
View File
@@ -2,33 +2,36 @@ import express from "express";
import { APIFormat } from "../../../shared/key-management";
import { assertNever } from "../../../shared/utils";
import { initializeSseStream } from "../../../shared/streaming";
import http from "http";
/**
* Returns a Markdown-formatted message that renders semi-nicely in most chat
* frontends. For example:
*
* **Proxy error (HTTP 404 Not Found)**
* The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
* ***
* *The requested Claude model might not exist, or the key might not be provisioned for it.*
* ```
* {
* "type": "error",
* "error": {
* "type": "not_found_error",
* "message": "model: some-invalid-model-id",
* },
* "proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
* }
* ```
*/
function getMessageContent(params: {
function getMessageContent({
title,
message,
obj,
}: {
title: string;
message: string;
obj?: Record<string, any>;
}) {
const { title, message, obj } = params;
/*
Constructs a Markdown-formatted message that renders semi-nicely in most chat
frontends. For example:
**Proxy error (HTTP 404 Not Found)**
The proxy encountered an error while trying to send your prompt to the upstream service. Further technical details are provided below.
***
*The requested Claude model might not exist, or the key might not be provisioned for it.*
```
{
"type": "error",
"error": {
"type": "not_found_error",
"message": "model: some-invalid-model-id",
},
"proxy_note": "The requested Claude model might not exist, or the key might not be provisioned for it."
}
```
*/
const note = obj?.proxy_note || obj?.error?.message || "";
const header = `### **${title}**`;
const friendlyMessage = note ? `${message}\n\n----\n\n*${note}*` : message;
@@ -62,17 +65,13 @@ type ErrorGeneratorOptions = {
format: APIFormat | "unknown";
title: string;
message: string;
obj?: Record<string, any>;
obj?: object;
reqId: string | number | object;
model?: string;
statusCode?: number;
};
/**
* Very crude inference of the request format based on the request body. Don't
* rely on this to be very accurate.
*/
function tryInferFormat(body: any): APIFormat | "unknown" {
export function tryInferFormat(body: any): APIFormat | "unknown" {
if (typeof body !== "object" || !body.model) {
return "unknown";
}
@@ -96,82 +95,47 @@ function tryInferFormat(body: any): APIFormat | "unknown" {
return "unknown";
}
/**
* Redacts the hostname from the error message if it contains a DNS resolution
* error. This is to avoid leaking upstream hostnames on DNS resolution errors,
* as those may contain sensitive information about the proxy's configuration.
*/
function redactHostname(options: ErrorGeneratorOptions): ErrorGeneratorOptions {
if (!options.message.includes("getaddrinfo")) return options;
const redacted = { ...options };
redacted.message = "Could not resolve hostname";
if (typeof redacted.obj?.error === "object") {
redacted.obj = {
...redacted.obj,
error: { message: "Could not resolve hostname" },
};
}
return redacted;
}
/**
* Generates an appropriately-formatted error response and sends it to the
* client over their requested transport (blocking or SSE stream).
*/
export function sendErrorToClient(params: {
export function sendErrorToClient({
options,
req,
res,
}: {
options: ErrorGeneratorOptions;
req: express.Request;
res: express.Response;
}) {
const { req, res } = params;
const options = redactHostname(params.options);
const { statusCode, message, title, obj: details } = options;
const { format: inputFormat } = options;
// Since we want to send the error in a format the client understands, we
// need to know the request format. `setApiFormat` might not have been called
// yet, so we'll try to infer it from the request body.
// This is an error thrown before we know the format of the request, so we
// can't send a response in the format the client expects.
const format =
options.format === "unknown" ? tryInferFormat(req.body) : options.format;
inputFormat === "unknown" ? tryInferFormat(req.body) : inputFormat;
if (format === "unknown") {
// Early middleware error (auth, rate limit) so we can only send something
// generic.
const code = statusCode || 400;
const hasDetails = details && Object.keys(details).length > 0;
return res.status(code).json({
error: {
message,
type: http.STATUS_CODES[code]!.replace(/\s+/g, "_").toLowerCase(),
},
...(hasDetails ? { details } : {}),
return res.status(options.statusCode || 400).json({
error: options.message,
details: options.obj,
});
}
// Cannot modify headers if client opted into streaming and made it into the
// proxy request queue, because that immediately starts an SSE stream.
const completion = buildSpoofedCompletion({ ...options, format });
const event = buildSpoofedSSE({ ...options, format });
const isStreaming =
req.isStreaming || req.body.stream === true || req.body.stream === "true";
if (!res.headersSent) {
res.setHeader("x-oai-proxy-error", title);
res.setHeader("x-oai-proxy-error-status", statusCode || 500);
res.setHeader("x-oai-proxy-error", options.title);
res.setHeader("x-oai-proxy-error-status", options.statusCode || 500);
}
// By this point, we know the request format. To get the error to display in
// chat clients' UIs, we'll send it as a 200 response as a spoofed completion
// from the language model. Depending on whether the client is streaming, we
// will either send an SSE event or a JSON response.
const isStreaming = req.isStreaming || String(req.body.stream) === "true";
if (isStreaming) {
// User can have opted into streaming but not made it into the queue yet,
// in which case the stream must be started first.
if (!res.headersSent) {
initializeSseStream(res);
}
res.write(buildSpoofedSSE({ ...options, format }));
res.write(event);
res.write(`data: [DONE]\n\n`);
res.end();
} else {
res.status(200).json(buildSpoofedCompletion({ ...options, format }));
res.status(200).json(completion);
}
}
@@ -194,21 +158,6 @@ export function buildSpoofedCompletion({
switch (format) {
case "openai":
case "openai-responses":
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{
message: { role: "assistant", content },
finish_reason: title,
index: 0,
},
],
};
case "mistral-ai":
return {
id: "error-" + id,
@@ -224,11 +173,6 @@ export function buildSpoofedCompletion({
},
],
};
case "mistral-text":
return {
outputs: [{ text: content, stop_reason: title }],
model,
};
case "openai-text":
return {
id: "error-" + id,
@@ -260,7 +204,13 @@ export function buildSpoofedCompletion({
stop_sequence: null,
};
case "google-ai":
// TODO: Native Google AI non-streaming responses are not supported, this
// is an untested guess at what the response should look like.
return {
id: "error-" + id,
object: "chat.completion",
created: Date.now(),
model,
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
@@ -298,15 +248,6 @@ export function buildSpoofedSSE({
switch (format) {
case "openai":
case "openai-responses":
event = {
id: "chatcmpl-" + id,
object: "chat.completion.chunk",
created: Date.now(),
model,
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-ai":
event = {
id: "chatcmpl-" + id,
@@ -316,11 +257,6 @@ export function buildSpoofedSSE({
choices: [{ delta: { content }, index: 0, finish_reason: title }],
};
break;
case "mistral-text":
event = {
outputs: [{ text: content, stop_reason: title }],
};
break;
case "openai-text":
event = {
id: "cmpl-" + id,
@@ -350,10 +286,7 @@ export function buildSpoofedSSE({
};
break;
case "google-ai":
// TODO: google ai supports two streaming transports, SSE and JSON.
// we currently only support SSE.
// return JSON.stringify({
event = {
return JSON.stringify({
candidates: [
{
content: { parts: [{ text: content }], role: "model" },
@@ -363,8 +296,7 @@ export function buildSpoofedSSE({
safetyRatings: [],
},
],
};
break;
});
case "openai-image":
return JSON.stringify(obj);
default:
@@ -1,6 +1,19 @@
import util from "util";
import zlib from "zlib";
import { sendProxyError } from "../common";
import type { RawResponseBodyHandler } from "./index";
import { decompressBuffer } from "./compression";
const DECODER_MAP = {
gzip: util.promisify(zlib.gunzip),
deflate: util.promisify(zlib.inflate),
br: util.promisify(zlib.brotliDecompress),
};
const isSupportedContentEncoding = (
contentEncoding: string
): contentEncoding is keyof typeof DECODER_MAP => {
return contentEncoding in DECODER_MAP;
};
/**
* Handles the response from the upstream service and decodes the body if
@@ -22,49 +35,42 @@ export const handleBlockingResponse: RawResponseBodyHandler = async (
throw err;
}
return new Promise((resolve, reject) => {
return new Promise<string>((resolve, reject) => {
let chunks: Buffer[] = [];
proxyRes.on("data", (chunk) => chunks.push(chunk));
proxyRes.on("end", async () => {
const contentEncoding = proxyRes.headers["content-encoding"];
const contentType = proxyRes.headers["content-type"];
let body: string | Buffer = Buffer.concat(chunks);
const rejectWithMessage = function (msg: string, err: Error) {
const error = `${msg} (${err.message})`;
req.log.warn(
{ msg: error, stack: err.stack },
"Error in blocking response handler"
);
sendProxyError(req, res, 500, "Internal Server Error", { error });
return reject(error);
};
let body = Buffer.concat(chunks);
try {
body = await decompressBuffer(body, contentEncoding);
} catch (e) {
return rejectWithMessage(`Could not decode response body`, e);
const contentEncoding = proxyRes.headers["content-encoding"];
if (contentEncoding) {
if (isSupportedContentEncoding(contentEncoding)) {
const decoder = DECODER_MAP[contentEncoding];
// @ts-ignore - started failing after upgrading TypeScript, don't care
// as it was never a problem.
body = await decoder(body);
} else {
const error = `Proxy received response with unsupported content-encoding: ${contentEncoding}`;
req.log.warn({ contentEncoding, key: req.key?.hash }, error);
sendProxyError(req, res, 500, "Internal Server Error", {
error,
contentEncoding,
});
return reject(error);
}
}
try {
return resolve(tryParseAsJson(body, contentType));
if (proxyRes.headers["content-type"]?.includes("application/json")) {
const json = JSON.parse(body.toString());
return resolve(json);
}
return resolve(body.toString());
} catch (e) {
return rejectWithMessage("API responded with invalid JSON", e);
const msg = `Proxy received response with invalid JSON: ${e.message}`;
req.log.warn({ error: e.stack, key: req.key?.hash }, msg);
sendProxyError(req, res, 500, "Internal Server Error", { error: msg });
return reject(msg);
}
});
});
};
function tryParseAsJson(body: string, contentType?: string) {
// If the response is declared as JSON, it must parse or we will throw
if (contentType?.includes("application/json")) {
return JSON.parse(body);
}
// If it's not declared as JSON, some APIs we'll try to parse it as JSON
// anyway since some APIs return the wrong content-type header in some cases.
// If it fails to parse, we'll just return the raw body without throwing.
try {
return JSON.parse(body);
} catch (e) {
return body;
}
}
@@ -1,5 +1,6 @@
import express from "express";
import { pipeline, Readable, Transform } from "stream";
import StreamArray from "stream-json/streamers/StreamArray";
import { StringDecoder } from "string_decoder";
import { promisify } from "util";
import type { logger } from "../../../logger";
@@ -17,45 +18,43 @@ import { getAwsEventStreamDecoder } from "./streaming/aws-event-stream-decoder";
import { EventAggregator } from "./streaming/event-aggregator";
import { SSEMessageTransformer } from "./streaming/sse-message-transformer";
import { SSEStreamAdapter } from "./streaming/sse-stream-adapter";
import { getStreamDecompressor } from "./compression";
const pipelineAsync = promisify(pipeline);
/**
* `handleStreamedResponse` consumes a streamed response from the upstream API,
* decodes chunk-by-chunk into a stream of events, transforms those events into
* the client's requested format, and forwards the result to the client.
*
* `handleStreamedResponse` consumes and transforms a streamed response from the
* upstream service, forwarding events to the client in their requested format.
* After the entire stream has been consumed, it resolves with the full response
* body so that subsequent middleware in the chain can process it as if it were
* a non-streaming response (to count output tokens, track usage, etc).
* a non-streaming response.
*
* In the event of an error, the request's streaming flag is unset and the
* request is bounced back to the non-streaming response handler. If the error
* is retryable, that handler will re-enqueue the request and also reset the
* streaming flag. Unfortunately the streaming flag is set and unset in multiple
* places, so it's hard to keep track of.
* In the event of an error, the request's streaming flag is unset and the non-
* streaming response handler is called instead.
*
* If the error is retryable, that handler will re-enqueue the request and also
* reset the streaming flag. Unfortunately the streaming flag is set and unset
* in multiple places, so it's hard to keep track of.
*/
export const handleStreamedResponse: RawResponseBodyHandler = async (
proxyRes,
req,
res
) => {
const { headers, statusCode } = proxyRes;
const { hash } = req.key!;
if (!req.isStreaming) {
throw new Error("handleStreamedResponse called for non-streaming request.");
}
if (statusCode! > 201) {
if (proxyRes.statusCode! > 201) {
req.isStreaming = false;
req.log.warn(
{ statusCode },
{ statusCode: proxyRes.statusCode, key: hash },
`Streaming request returned error status code. Falling back to non-streaming response handler.`
);
return handleBlockingResponse(proxyRes, req, res);
}
req.log.debug({ headers }, `Starting to proxy SSE stream.`);
req.log.debug({ headers: proxyRes.headers }, `Starting to proxy SSE stream.`);
// Typically, streaming will have already been initialized by the request
// queue to send heartbeat pings.
@@ -66,25 +65,18 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
const prefersNativeEvents = req.inboundApi === req.outboundApi;
const streamOptions = {
contentType: headers["content-type"],
contentType: proxyRes.headers["content-type"],
api: req.outboundApi,
logger: req.log,
};
// While the request is streaming, aggregator collects all events so that we
// can compile them into a single response object and publish that to the
// remaining middleware. Because we have an OpenAI transformer for every
// supported format, EventAggregator always consumes OpenAI events so that we
// only have to write one aggregator (OpenAI input) for each output format.
const aggregator = new EventAggregator(req);
const decompressor = getStreamDecompressor(headers["content-encoding"]);
// Decoder reads from the response bytes to produce a stream of plaintext.
// Decoder turns the raw response stream into a stream of events in some
// format (text/event-stream, vnd.amazon.event-stream, streaming JSON, etc).
const decoder = getDecoder({ ...streamOptions, input: proxyRes });
// Adapter consumes the decoded text and produces server-sent events so we
// have a standard event format for the client and to translate between API
// message formats.
// Adapter transforms the decoded events into server-sent events.
const adapter = new SSEStreamAdapter(streamOptions);
// Aggregator compiles all events into a single response object.
const aggregator = new EventAggregator({ format: req.outboundApi });
// Transformer converts server-sent events from one vendor's API message
// format to another.
const transformer = new SSEMessageTransformer({
@@ -106,7 +98,7 @@ export const handleStreamedResponse: RawResponseBodyHandler = async (
try {
await Promise.race([
handleAbortedStream(req, res),
pipelineAsync(proxyRes, decompressor, decoder, adapter, transformer),
pipelineAsync(proxyRes, decoder, adapter, transformer),
]);
req.log.debug(`Finished proxying SSE stream.`);
res.end();
@@ -173,13 +165,14 @@ function getDecoder(options: {
logger: typeof logger;
contentType?: string;
}) {
const { contentType, input, logger } = options;
const { api, contentType, input, logger } = options;
if (contentType?.includes("application/vnd.amazon.eventstream")) {
return getAwsEventStreamDecoder({ input, logger });
} else if (contentType?.includes("application/json")) {
throw new Error("JSON streaming not supported, request SSE instead");
} else if (api === "google-ai") {
return StreamArray.withParser();
} else {
// Ensures split chunks across multi-byte characters are handled correctly.
// Passthrough stream, but ensures split chunks across multi-byte characters
// are handled correctly.
const stringDecoder = new StringDecoder("utf8");
return new Transform({
readableObjectMode: true,
+126 -493
View File
@@ -1,12 +1,10 @@
/* This file is fucking horrendous, sorry */
// TODO: extract all per-service error response handling into its own modules
import { Request, Response } from "express";
import * as http from "http";
import { config } from "../../../config";
import { HttpError, RetryableError } from "../../../shared/errors";
import { keyPool, GoogleAIKey } from "../../../shared/key-management";
import { logger } from "../../../logger";
import { getOpenAIModelFamily, GoogleAIModelFamily } from "../../../shared/models";
import { keyPool } from "../../../shared/key-management";
import { getOpenAIModelFamily } from "../../../shared/models";
import { countTokens } from "../../../shared/tokenization";
import {
incrementPromptCount,
@@ -48,7 +46,7 @@ export type ProxyResHandlerWithBody = (
*/
body: string | Record<string, any>
) => Promise<void>;
export type ProxyResMiddleware = ProxyResHandlerWithBody[] | undefined;
export type ProxyResMiddleware = ProxyResHandlerWithBody[];
/**
* Returns a on.proxyRes handler that executes the given middleware stack after
@@ -72,22 +70,11 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
req: Request,
res: Response
) => {
// Proxied request has by now been sent to the upstream API, so we revert
// tracked mutations that were only needed to send the request.
// This generally means path adjustment, headers, and body serialization.
if (req.changeManager) {
req.changeManager.revert();
}
const initialHandler = req.isStreaming
const initialHandler: RawResponseBodyHandler = req.isStreaming
? handleStreamedResponse
: handleBlockingResponse;
let lastMiddleware = initialHandler.name;
if (Buffer.isBuffer(req.body)) {
req.body = JSON.parse(req.body.toString());
}
try {
const body = await initialHandler(proxyRes, req, res);
const middlewareStack: ProxyResMiddleware = [];
@@ -112,7 +99,7 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
saveImage,
logPrompt,
logEvent,
...(apiMiddleware ?? [])
...apiMiddleware
);
}
@@ -136,15 +123,15 @@ export const createOnProxyResHandler = (apiMiddleware: ProxyResMiddleware) => {
}
const { stack, message } = error;
const details = { stack, message, lastMiddleware, key: req.key?.hash };
const info = { stack, lastMiddleware, key: req.key?.hash };
const description = `Error while executing proxy response middleware: ${lastMiddleware} (${message})`;
if (res.headersSent) {
req.log.error(details, description);
req.log.error(info, description);
if (!res.writableEnded) res.end();
return;
} else {
req.log.error(details, description);
req.log.error(info, description);
res
.status(500)
.json({ error: "Internal server error", proxy_note: description });
@@ -175,64 +162,53 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
) => {
const statusCode = proxyRes.statusCode || 500;
const statusMessage = proxyRes.statusMessage || "Internal Server Error";
const service = req.key!.service;
// Not an error, continue to next response handler
let errorPayload: ProxiedErrorPayload;
if (statusCode < 400) return;
// Parse the error response body
let errorPayload: ProxiedErrorPayload;
try {
assertJsonResponse(body);
errorPayload = body;
} catch (parseError) {
const strBody = String(body).slice(0, 128);
req.log.error({ statusCode, strBody }, "Error body is not JSON");
// Likely Bad Gateway or Gateway Timeout from upstream's reverse proxy
const hash = req.key?.hash;
req.log.warn({ statusCode, statusMessage, key: hash }, parseError.message);
const details = {
const errorObject = {
error: parseError.message,
status: statusCode,
statusMessage,
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service. Response body: ${strBody}`,
proxy_note: `Proxy got back an error, but it was not in JSON format. This is likely a temporary problem with the upstream service.`,
};
sendProxyError(req, res, statusCode, statusMessage, details);
sendProxyError(req, res, statusCode, statusMessage, errorObject);
throw new HttpError(statusCode, parseError.message);
}
// Extract the error type from the response body depending on the service
if (service === "gcp") {
if (Array.isArray(errorPayload)) {
errorPayload = errorPayload[0];
}
}
const errorType =
errorPayload.error?.code ||
errorPayload.error?.type ||
getAwsErrorType(proxyRes.headers["x-amzn-errortype"]);
req.log.warn(
{ statusCode, statusMessage, errorType, errorPayload, key: req.key?.hash },
`API returned an error.`
{ statusCode, type: errorType, errorPayload, key: req.key?.hash },
`Received error response from upstream. (${proxyRes.statusMessage})`
);
// TODO: split upstream error handling into separate modules for each service,
// this is out of control.
// Try to convert response body to a ProxiedErrorPayload with message/type
const service = req.key!.service;
if (service === "aws") {
// Try to standardize the error format for AWS
errorPayload.error = { message: errorPayload.message, type: errorType };
delete errorPayload.message;
} else if (service === "gcp") {
if (errorPayload.error?.code) {
errorPayload.error = {
message: errorPayload.error.message,
type: errorPayload.error.status || errorPayload.error.code,
};
}
}
// Figure out what to do with the error
// TODO: separate error handling for each service
if (statusCode === 400) {
switch (service) {
case "openai":
case "google-ai":
case "mistral-ai":
case "azure":
const filteredCodes = ["content_policy_violation", "content_filter"];
@@ -244,54 +220,20 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
// same 429 billing error that other models return.
await handleOpenAIRateLimitError(req, errorPayload);
} else {
errorPayload.proxy_note = `The upstream API rejected the request. Check the error message for details.`;
errorPayload.proxy_note = `The upstream API rejected the request. Your prompt may be too long for ${req.body?.model}.`;
}
break;
case "deepseek":
await handleDeepseekBadRequestError(req, errorPayload);
break;
case "xai":
await handleXaiBadRequestError(req, errorPayload);
break;
case "anthropic":
case "aws":
case "gcp":
await handleAnthropicAwsBadRequestError(req, errorPayload);
break;
case "google-ai":
await handleGoogleAIBadRequestError(req, errorPayload);
break;
case "cohere":
errorPayload.proxy_note = `The upstream Cohere API rejected the request. Check the error message for details.`;
break;
case "qwen":
// No special handling yet
break;
case "moonshot":
errorPayload.proxy_note = `The Moonshot API rejected the request. Check the error message for details.`;
await handleAnthropicBadRequestError(req, errorPayload);
break;
default:
assertNever(service);
}
} else if (statusCode === 401) {
// Universal 401 handling - authentication failed, retry with different key
// Key is invalid or was revoked
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError(`${service} key authentication failed, retrying with different key.`);
} else if (statusCode === 402) {
// Deepseek specific - insufficient balance
if (service === "deepseek") {
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("Deepseek key has insufficient balance, retrying with different key.");
}
} else if (statusCode === 405) {
// Xai specific - insufficient balance
if (service === "xai") {
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("XAI key has insufficient balance, retrying with different key.");
}
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
} else if (statusCode === 403) {
switch (service) {
case "anthropic":
@@ -299,11 +241,13 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
errorType === "permission_error" &&
errorPayload.error?.message?.toLowerCase().includes("multimodal")
) {
req.log.warn(
{ key: req.key?.hash },
"This Anthropic key does not support multimodal prompts."
);
keyPool.update(req.key!, { allowsMultimodality: false });
await reenqueueRequest(req);
throw new RetryableError(
"Claude request re-enqueued because key does not support multimodality."
);
throw new RetryableError("Claude request re-enqueued because key does not support multimodality.");
} else {
keyPool.disable(req.key!, "revoked");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
@@ -314,8 +258,7 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "UnrecognizedClientException":
// Key is invalid.
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("AWS key is invalid, retrying with different key.");
errorPayload.proxy_note = `Assigned API key is invalid or revoked, please try again.`;
break;
case "AccessDeniedException":
const isModelAccessError =
@@ -332,16 +275,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
default:
errorPayload.proxy_note = `Received 403 error. Key may be invalid.`;
}
return;
case "mistral-ai":
case "gcp":
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("GCP key is invalid, retrying with different key.");
case "moonshot":
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("Moonshot key is invalid, retrying with different key.");
}
} else if (statusCode === 429) {
switch (service) {
@@ -354,9 +287,6 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "aws":
await handleAwsRateLimitError(req, errorPayload);
break;
case "gcp":
await handleGcpRateLimitError(req, errorPayload);
break;
case "azure":
case "mistral-ai":
await handleAzureRateLimitError(req, errorPayload);
@@ -364,30 +294,14 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
case "google-ai":
await handleGoogleAIRateLimitError(req, errorPayload);
break;
case "deepseek":
await handleDeepseekRateLimitError(req, errorPayload);
break;
case "xai":
await handleXaiRateLimitError(req, errorPayload);
break;
case "cohere":
await handleCohereRateLimitError(req, errorPayload);
break;
case "qwen":
// Similar handling to OpenAI for rate limits
await handleOpenAIRateLimitError(req, errorPayload);
break;
case "moonshot":
await handleMoonshotRateLimitError(req, errorPayload);
break;
default:
assertNever(service as never);
assertNever(service);
}
} else if (statusCode === 404) {
// Most likely model not found
switch (service) {
case "openai":
if (errorType === "model_not_found") {
if (errorPayload.error?.code === "model_not_found") {
const requestedModel = req.body.model;
const modelFamily = getOpenAIModelFamily(requestedModel);
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model (${requestedModel}, family: ${modelFamily}).`;
@@ -398,41 +312,28 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
}
break;
case "anthropic":
errorPayload.proxy_note = `The requested Claude model might not exist, or the key might not be provisioned for it.`;
break;
case "google-ai":
errorPayload.proxy_note = `The requested Google AI model might not exist, or the key might not be provisioned for it.`;
break;
case "mistral-ai":
errorPayload.proxy_note = `The requested Mistral AI model might not exist, or the key might not be provisioned for it.`;
break;
case "aws":
case "gcp":
errorPayload.proxy_note = `The requested AWS resource might not exist, or the key might not have access to it.`;
break;
case "azure":
case "deepseek":
case "xai":
case "cohere":
case "qwen":
errorPayload.proxy_note = `The key assigned to your prompt does not support the requested model.`;
errorPayload.proxy_note = `The assigned Azure deployment does not support the requested model.`;
break;
default:
assertNever(service as never);
}
} else if (statusCode === 503) {
switch (service) {
case "aws":
// Re-enqueue on any 503 from AWS Bedrock
req.log.warn(
{ key: req.key?.hash, errorType, errorPayload },
`AWS Bedrock service unavailable (503). Re-enqueueing request.`
);
await reenqueueRequest(req);
throw new RetryableError(
"AWS Bedrock service unavailable (503), re-enqueued request."
);
default:
errorPayload.proxy_note = `Upstream service unavailable. Try again later.`;
break;
assertNever(service);
}
} else {
errorPayload.proxy_note = `Unrecognized error from upstream service.`;
}
// Redact the OpenAI org id from the error message
// Some OAI errors contain the organization ID, which we don't want to reveal.
if (errorPayload.error?.message) {
errorPayload.error.message = errorPayload.error.message.replace(
/org-.{24}/gm,
@@ -440,14 +341,13 @@ const handleUpstreamErrors: ProxyResHandlerWithBody = async (
);
}
// Send the error to the client
sendProxyError(req, res, statusCode, statusMessage, errorPayload);
// Re-throw the error to bubble up to onProxyRes's handler for logging
// This is bubbled up to onProxyRes's handler for logging but will not trigger
// a write to the response as `sendProxyError` has just done that.
throw new HttpError(statusCode, errorPayload.error?.message);
};
async function handleAnthropicAwsBadRequestError(
async function handleAnthropicBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
@@ -471,32 +371,25 @@ async function handleAnthropicAwsBadRequestError(
// {"type":"error","error":{"type":"invalid_request_error","message":"Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits."}}
const isOverQuota =
error?.message?.match(/usage blocked until/i) ||
error?.message?.match(/credit balance is too low/i) ||
error?.message?.match(/You will regain access on/i) ||
error?.message?.match(/reached your specified API usage limits/i);
error?.message?.match(/credit balance is too low/i);
if (isOverQuota) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic key has hit spending limit and will be disabled."
);
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("Claude key hit spending limit, retrying with different key.");
errorPayload.proxy_note = `Assigned key has hit its spending limit. ${error?.message}`;
return;
}
const isDisabled =
error?.message?.match(/organization has been disabled/i) ||
error?.message?.match(/^operation not allowed/i) ||
error?.message?.match(/credential is only authorized for use with Claude Code/i);
const isDisabled = error?.message?.match(/organization has been disabled/i);
if (isDisabled) {
req.log.warn(
{ key: req.key?.hash, message: error?.message },
"Anthropic/AWS key has been disabled."
"Anthropic key has been disabled."
);
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("Claude key has been disabled, retrying with different key.");
errorPayload.proxy_note = `Assigned key has been disabled. (${error?.message})`;
return;
}
@@ -534,119 +427,6 @@ async function handleAwsRateLimitError(
}
}
async function handleGcpRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
if (errorPayload.error?.type === "RESOURCE_EXHAUSTED") {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("GCP rate-limited request re-enqueued.");
} else {
errorPayload.proxy_note = `Unrecognized 429 Too Many Requests error from GCP.`;
}
}
async function handleDeepseekRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Deepseek rate-limited request re-enqueued.");
}
async function handleDeepseekBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Based on the checker code, a 400 response means the key is valid but there was some other error
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
}
async function handleXaiRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Xai rate-limited request re-enqueued.");
}
async function handleXaiBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Based on the checker code, a 400 response means the key is valid but there was some other error
errorPayload.proxy_note = `The API rejected the request. Check the error message for details.`;
}
async function handleCohereRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Mark the current key as rate limited
keyPool.markRateLimited(req.key!);
// Store the original request attempt count or initialize it
req.retryCount = (req.retryCount || 0) + 1;
// Only retry up to 3 times
if (req.retryCount <= 3) {
try {
// Add a small delay before retrying (1-5 seconds)
const delayMs = 1000 + Math.floor(Math.random() * 4000);
await new Promise(resolve => setTimeout(resolve, delayMs));
// Re-enqueue the request to try with a different key
await reenqueueRequest(req);
req.log.info({ attempt: req.retryCount }, "Cohere rate-limited request re-enqueued");
throw new RetryableError(`Cohere rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
} catch (error) {
if (error instanceof RetryableError) {
throw error; // Rethrow RetryableError to continue the flow
}
req.log.error({ error }, "Failed to re-enqueue rate-limited Cohere request");
}
}
// If we've already retried 3 times, show the error to the user
errorPayload.proxy_note = "Too many requests to the Cohere API. Please try again later.";
}
async function handleMoonshotRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
// Mark the current key as rate limited
keyPool.markRateLimited(req.key!);
// Store the original request attempt count or initialize it
req.retryCount = (req.retryCount || 0) + 1;
// Only retry up to 3 times with different keys
if (req.retryCount <= 3) {
try {
// Add a small delay before retrying (2-6 seconds for Moonshot)
const delayMs = 2000 + Math.floor(Math.random() * 4000);
await new Promise(resolve => setTimeout(resolve, delayMs));
// Re-enqueue the request to try with a different key
await reenqueueRequest(req);
req.log.info({ attempt: req.retryCount }, "Moonshot rate-limited request re-enqueued");
throw new RetryableError(`Moonshot rate-limited request re-enqueued (attempt ${req.retryCount}/3).`);
} catch (error) {
if (error instanceof RetryableError) {
throw error; // Rethrow RetryableError to continue the flow
}
req.log.error({ error }, "Failed to re-enqueue rate-limited Moonshot request");
}
}
// If we've already retried 3 times, show the error to the user
errorPayload.proxy_note = "Too many requests to the Moonshot API. Please try again later.";
}
async function handleOpenAIRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
@@ -657,20 +437,17 @@ async function handleOpenAIRateLimitError(
case "invalid_request_error": // this is the billing_hard_limit_reached error seen in some cases
// Billing quota exceeded (key is dead, disable it)
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("Google AI key quota exceeded, retrying with different key.");
errorPayload.proxy_note = `Assigned key's quota has been exceeded. Please try again.`;
break;
case "access_terminated":
// Account banned (key is dead, disable it)
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("Google AI key banned for policy violations, retrying with different key.");
errorPayload.proxy_note = `Assigned key has been banned by OpenAI for policy violations. Please try again.`;
break;
case "billing_not_active":
// Key valid but account billing is delinquent
keyPool.disable(req.key!, "quota");
await reenqueueRequest(req);
throw new RetryableError("Google AI key billing not active, retrying with different key.");
errorPayload.proxy_note = `Assigned key has been disabled due to delinquent billing. Please try again.`;
break;
case "requests":
case "tokens":
@@ -684,8 +461,58 @@ async function handleOpenAIRateLimitError(
// Per-minute request or token rate limit is exceeded, which we can retry
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
// WIP/nonfunctional
// case "tokens_usage_based":
// // Weird new rate limit type that seems limited to preview models.
// // Distinct from `tokens` type. Can be per-minute or per-day.
//
// // I've seen reports of this error for 500k tokens/day and 10k tokens/min.
// // 10k tokens per minute is problematic, because this is much less than
// // GPT4-Turbo's max context size for a single prompt and is effectively a
// // cap on the max context size for just that key+model, which the app is
// // not able to deal with.
//
// // Similarly if there is a 500k tokens per day limit and 450k tokens have
// // been used today, the max context for that key becomes 50k tokens until
// // the next day and becomes progressively smaller as more tokens are used.
//
// // To work around these keys we will first retry the request a few times.
// // After that we will reject the request, and if it's a per-day limit we
// // will also disable the key.
//
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per day: Limit 500000, Used 460000, Requested 50000"
// // "Rate limit reached for gpt-4-1106-preview in organization org-xxxxxxxxxxxxxxxxxxx on tokens_usage_based per min: Limit 10000, Requested 40000"
//
// const regex =
// /Rate limit reached for .+ in organization .+ on \w+ per (day|min): Limit (\d+)(?:, Used (\d+))?, Requested (\d+)/;
// const [, period, limit, used, requested] =
// errorPayload.error?.message?.match(regex) || [];
//
// req.log.warn(
// { key: req.key?.hash, period, limit, used, requested },
// "Received `tokens_usage_based` rate limit error from OpenAI."
// );
//
// if (!period || !limit || !requested) {
// errorPayload.proxy_note = `Unrecognized rate limit error from OpenAI. (${errorPayload.error?.message})`;
// break;
// }
//
// if (req.retryCount < 2) {
// await reenqueueRequest(req);
// throw new RetryableError("Rate-limited request re-enqueued.");
// }
//
// if (period === "min") {
// errorPayload.proxy_note = `Assigned key can't be used for prompts longer than ${limit} tokens, and no other keys are available right now. Reduce the length of your prompt or try again in a few minutes.`;
// } else {
// errorPayload.proxy_note = `Assigned key has reached its per-day request limit for this model. Try another model.`;
// }
//
// keyPool.markRateLimited(req.key!);
// break;
default:
errorPayload.proxy_note = `This is likely a temporary error with the API. Try again in a few seconds.`;
errorPayload.proxy_note = `This is likely a temporary error with OpenAI. Try again in a few seconds.`;
break;
}
return errorPayload;
@@ -707,193 +534,17 @@ async function handleAzureRateLimitError(
}
}
//{"error":{"code":400,"message":"API Key not found. Please pass a valid API key.","status":"INVALID_ARGUMENT","details":[{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]}}
//{"error":{"code":400,"message":"Gemini API free tier is not available in your country. Please enable billing on your project in Google AI Studio.","status":"FAILED_PRECONDITION"}}
async function handleGoogleAIBadRequestError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const error = errorPayload.error || {};
// google changes this shit every few months
// i don't want to deal with it
const keyDeadMsgs = [
/please enable billing/i,
/API key not valid/i,
/API key expired/i,
/pass a valid API/i,
];
const text = JSON.stringify(error);
if (keyDeadMsgs.some((msg) => text.match(msg))) {
req.log.warn(
{ key: req.key?.hash, error: text },
"Google API key appears to be inoperative."
);
keyPool.disable(req.key!, "revoked");
await reenqueueRequest(req);
throw new RetryableError("Google API key inoperative, retrying with different key.");
} else {
req.log.warn(
{ key: req.key?.hash, error: text },
"Unknown Google API error."
);
errorPayload.proxy_note = `Unrecognized error from Google AI.`;
}
// const { message, status, details } = error;
//
// if (status === "INVALID_ARGUMENT") {
// const reason = details?.[0]?.reason;
// if (reason === "API_KEY_INVALID") {
// req.log.warn(
// { key: req.key?.hash, status, reason, msg: error.message },
// "Received `API_KEY_INVALID` error from Google AI. Check the configured API key."
// );
// keyPool.disable(req.key!, "revoked");
// errorPayload.proxy_note = `Assigned API key is invalid.`;
// }
// } else if (status === "FAILED_PRECONDITION") {
// if (message.match(/please enable billing/i)) {
// req.log.warn(
// { key: req.key?.hash, status, msg: error.message },
// "Cannot use key due to billing restrictions."
// );
// keyPool.disable(req.key!, "revoked");
// errorPayload.proxy_note = `Assigned API key cannot be used.`;
// }
// } else {
// req.log.warn(
// { key: req.key?.hash, status, msg: error.message },
// "Received unexpected 400 error from Google AI."
// );
// }
}
//{"error":{"code":429,"message":"Resource has been exhausted (e.g. check quota).","status":"RESOURCE_EXHAUSTED"}
//
async function handleGoogleAIRateLimitError(
req: Request,
errorPayload: ProxiedErrorPayload
) {
const status = errorPayload.error?.status;
const text = JSON.stringify(errorPayload.error);
const errorMessage = errorPayload.error?.message?.toLowerCase() || '';
// sometimes they block keys by rate limiting them to 0 requests per minute
// for some indefinite period of time
const keyDeadMsgs = [
/GenerateContentRequestsPerMinutePerProjectPerRegion/i,
/"quota_limit_value":"0"/i,
];
// Quota exhaustion indicators in error messages
const quotaExhaustedMsgs = [
/quota exceeded/i,
/free tier|free_tier/i,
/quota limit/i
];
// If we don't have a key in the request, we can't process rate limits
if (!req.key) {
errorPayload.proxy_note = `Rate limit error but no key was found in the request.`;
return;
}
switch (status) {
case "RESOURCE_EXHAUSTED": {
// Hard disabled keys - these are completely blocked
if (keyDeadMsgs.some((msg) => msg.test(text))) {
req.log.warn(
{ key: req.key.hash, error: text },
"Google API key appears to be completely disabled and will be removed from rotation."
);
keyPool.disable(req.key, "revoked");
errorPayload.proxy_note = `Assigned API key cannot be used.`;
return;
}
// Check if this is a quota exhaustion error rather than just a rate limit
const isQuotaExhausted = quotaExhaustedMsgs.some(pattern => pattern.test(text) || pattern.test(errorMessage));
if (isQuotaExhausted && req.body?.model) {
// Get model family for the current request
const modelName = req.body.model;
const isPro = modelName.includes('pro');
const isFlash = modelName.includes('flash');
const isUltra = modelName.includes('ultra');
req.log.warn(
{ key: req.key.hash, model: modelName, error: text },
"Google API key has exhausted its quota for this model family and will be marked as overquota."
);
// Create a filtered list of model families that excludes the over-quota family
let familyToRemove: GoogleAIModelFamily | null = null;
if (isPro) {
familyToRemove = 'gemini-pro';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Pro models.`;
} else if (isFlash) {
familyToRemove = 'gemini-flash';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Flash models.`;
} else if (isUltra) {
familyToRemove = 'gemini-ultra';
errorPayload.proxy_note = `Assigned API key has exhausted quota for Gemini Ultra models.`;
} else {
// If model family can't be determined, just mark as rate limited
keyPool.markRateLimited(req.key);
errorPayload.proxy_note = `Assigned API key has exhausted quota but model family couldn't be determined.`;
}
// Update the modelFamilies in the key if we identified a family to remove
if (familyToRemove) {
// Get current model families, filter out the one that's over quota
const updatedFamilies = [...req.key.modelFamilies].filter(f => f !== familyToRemove);
// Cast the key to GoogleAIKey type to access its specific properties
const googleKey = req.key as GoogleAIKey;
// Track which families are over quota for future rechecking
const overQuotaFamilies = googleKey.overQuotaFamilies || [];
if (!overQuotaFamilies.includes(familyToRemove)) {
overQuotaFamilies.push(familyToRemove);
}
// Mark the key as over quota but still usable for other model families
req.log.info(
{ key: req.key.hash, family: familyToRemove },
"Marking Google AI key as over quota for specific model family"
);
// First make a typed update object that includes only the properties we want to update
interface GoogleAIPartialUpdate {
modelFamilies: GoogleAIModelFamily[];
isOverQuota: boolean;
overQuotaFamilies: GoogleAIModelFamily[];
}
// Create a properly typed update
const update: GoogleAIPartialUpdate = {
modelFamilies: updatedFamilies as GoogleAIModelFamily[],
isOverQuota: true,
overQuotaFamilies
};
// Use the standard KeyPool interface
// This gets around the TypeScript issues by letting KeyPool handle routing
const clonedKey = { ...req.key }; // Make a clone since we'll be modifying it
keyPool.update(clonedKey, update as any);
}
// Re-enqueue with a different key
await reenqueueRequest(req);
throw new RetryableError("Quota-exhausted request re-enqueued with a different key.");
}
// Standard rate limiting - just mark as rate limited temporarily
req.log.debug({ key: req.key.hash, error: text }, "Google API request rate limited, will retry.");
keyPool.markRateLimited(req.key);
case "RESOURCE_EXHAUSTED":
keyPool.markRateLimited(req.key!);
await reenqueueRequest(req);
throw new RetryableError("Rate-limited request re-enqueued.");
}
default:
errorPayload.proxy_note = `Unrecognized rate limit error from Google AI (${status}). Please report this.`;
break;
@@ -913,12 +564,10 @@ const incrementUsage: ProxyResHandlerWithBody = async (_proxyRes, req) => {
},
`Incrementing usage for model`
);
// Get modelFamily for the key usage log
const modelFamilyForKeyPool = req.modelFamily!; // Should be set by getModelFamilyForRequest earlier
keyPool.incrementUsage(req.key!, modelFamilyForKeyPool, { input: req.promptTokens!, output: req.outputTokens! });
keyPool.incrementUsage(req.key!, model, tokensUsed);
if (req.user) {
incrementPromptCount(req.user.token);
incrementTokenCount(req.user.token, model, req.outboundApi, { input: req.promptTokens!, output: req.outputTokens! });
incrementTokenCount(req.user.token, model, req.outboundApi, tokensUsed);
}
}
};
@@ -944,24 +593,16 @@ const countResponseTokens: ProxyResHandlerWithBody = async (
const service = req.outboundApi;
const completion = getCompletionFromBody(req, body);
const tokens = await countTokens({ req, completion, service });
if (req.service === "openai" || req.service === "azure" || req.service === "deepseek" || req.service === "cohere" || req.service === "qwen") {
// O1 consumes (a significant amount of) invisible tokens for the chain-
// of-thought reasoning. We have no way to count these other than to check
// the response body.
tokens.reasoning_tokens =
body.usage?.completion_tokens_details?.reasoning_tokens;
}
req.log.debug(
{ service, prevOutputTokens: req.outputTokens, tokens },
{ service, tokens, prevOutputTokens: req.outputTokens },
`Counted tokens for completion`
);
if (req.tokenizerInfo) {
req.tokenizerInfo.completion_tokens = tokens;
}
req.outputTokens = tokens.token_count + (tokens.reasoning_tokens ?? 0);
req.outputTokens = tokens.token_count;
} catch (error) {
req.log.warn(
error,
@@ -976,30 +617,22 @@ const trackKeyRateLimit: ProxyResHandlerWithBody = async (proxyRes, req) => {
keyPool.updateRateLimits(req.key!, proxyRes.headers);
};
const omittedHeaders = new Set<string>([
// Omit content-encoding because we will always decode the response body
"content-encoding",
// Omit transfer-encoding because we are using response.json which will
// set a content-length header, which is not valid for chunked responses.
"transfer-encoding",
// Don't set cookies from upstream APIs because proxied requests are stateless
"set-cookie",
"openai-organization",
"x-request-id",
"x-ds-request-id",
"x-ds-trace-id",
"cf-ray",
]);
const copyHttpHeaders: ProxyResHandlerWithBody = async (
proxyRes,
_req,
res
) => {
// Hack: we don't copy headers since with chunked transfer we've already sent them.
if (_req.isChunkedTransfer) return;
Object.keys(proxyRes.headers).forEach((key) => {
if (omittedHeaders.has(key)) return;
// Omit content-encoding because we will always decode the response body
if (key === "content-encoding") {
return;
}
// We're usually using res.json() to send the response, which causes express
// to set content-length. That's not valid for chunked responses and some
// clients will reject it so we need to omit it.
if (key === "transfer-encoding") {
return;
}
res.setHeader(key, proxyRes.headers[key] as string);
});
};
@@ -1043,6 +676,6 @@ function getAwsErrorType(header: string | string[] | undefined) {
function assertJsonResponse(body: any): asserts body is Record<string, any> {
if (typeof body !== "object") {
throw new Error(`Expected response to be an object, got ${typeof body}`);
throw new Error("Expected response to be an object");
}
}
+12 -15
View File
@@ -11,8 +11,7 @@ import { ProxyResHandlerWithBody } from ".";
import { assertNever } from "../../../shared/utils";
import {
AnthropicChatMessage,
flattenAnthropicMessages,
GoogleAIChatMessage,
flattenAnthropicMessages, GoogleAIChatMessage,
MistralAIChatMessage,
OpenAIChatMessage,
} from "../../../shared/api-schemas";
@@ -72,21 +71,11 @@ const getPromptForRequest = (
// format.
switch (req.outboundApi) {
case "openai":
case "openai-responses":
return req.body.messages;
case "mistral-ai":
return req.body.messages;
case "anthropic-chat":
let system = req.body.system;
if (Array.isArray(system)) {
system = system
.map((m: { type: string; text: string }) => m.text)
.join("\n");
}
return { system, messages: req.body.messages };
return { system: req.body.system, messages: req.body.messages };
case "openai-text":
case "anthropic-text":
case "mistral-text":
return req.body.prompt;
case "openai-image":
return {
@@ -96,6 +85,8 @@ const getPromptForRequest = (
quality: req.body.quality,
revisedPrompt: responseBody.data[0].revised_prompt,
};
case "anthropic-text":
return req.body.prompt;
case "google-ai":
return { contents: req.body.contents };
default:
@@ -122,7 +113,9 @@ const flattenMessages = (
if (isGoogleAIChatPrompt(val)) {
return val.contents
.map(({ parts, role }) => {
const text = parts.filter(p => 'text' in p).map((p) => (p as { text: string }).text).join("\n");
const text = parts
.map((p) => p.text)
.join("\n");
return `${role}: ${text}`;
})
.join("\n");
@@ -150,7 +143,11 @@ const flattenMessages = (
function isGoogleAIChatPrompt(
val: unknown
): val is { contents: GoogleAIChatMessage[] } {
return typeof val === "object" && val !== null && "contents" in val;
return (
typeof val === "object" &&
val !== null &&
"contents" in val
);
}
function isAnthropicChatPrompt(
@@ -1,39 +0,0 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralChatCompletionResponse = {
choices: {
index: number;
message: { role: string; content: string };
finish_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral chat completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralChat(
events: OpenAIChatCompletionStreamEvent[]
): MistralChatCompletionResponse {
let merged: MistralChatCompletionResponse = {
choices: [
{ index: 0, message: { role: "", content: "" }, finish_reason: "" },
],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
acc.choices[0].message.role = event.choices[0].delta.role ?? "assistant";
return acc;
}
acc.choices[0].finish_reason = event.choices[0].finish_reason ?? "";
if (event.choices[0].delta.content) {
acc.choices[0].message.content += event.choices[0].delta.content;
}
return acc;
}, merged);
return merged;
}
@@ -1,33 +0,0 @@
import { OpenAIChatCompletionStreamEvent } from "../index";
export type MistralTextCompletionResponse = {
outputs: {
text: string;
stop_reason: string | null;
}[];
};
/**
* Given a list of OpenAI chat completion events, compiles them into a single
* finalized Mistral text completion response so that non-streaming middleware
* can operate on it as if it were a blocking response.
*/
export function mergeEventsForMistralText(
events: OpenAIChatCompletionStreamEvent[]
): MistralTextCompletionResponse {
let merged: MistralTextCompletionResponse = {
outputs: [{ text: "", stop_reason: "" }],
};
merged = events.reduce((acc, event, i) => {
// The first event will only contain role assignment and response metadata
if (i === 0) {
return acc;
}
acc.outputs[0].text += event.choices[0].delta.content ?? "";
acc.outputs[0].stop_reason = event.choices[0].finish_reason ?? "";
return acc;
}, merged);
return merged;
}
@@ -24,7 +24,7 @@ export function getAwsEventStreamDecoder(params: {
if (eventType === "chunk") {
result = input[eventType];
} else {
// AWS unmarshaller treats non-chunk events (errors and exceptions) oddly.
// AWS unmarshaller treats non-chunk (errors and exceptions) oddly.
result = { [eventType]: input[eventType] } as any;
}
return result;
@@ -1,4 +1,3 @@
import express from "express";
import { APIFormat } from "../../../../shared/key-management";
import { assertNever } from "../../../../shared/utils";
import {
@@ -7,13 +6,8 @@ import {
mergeEventsForAnthropicText,
mergeEventsForOpenAIChat,
mergeEventsForOpenAIText,
mergeEventsForMistralChat,
mergeEventsForMistralText,
AnthropicV2StreamEvent,
OpenAIChatCompletionStreamEvent,
mistralAIToOpenAI,
MistralAIStreamEvent,
MistralChatCompletionEvent,
} from "./index";
/**
@@ -21,71 +15,45 @@ import {
* compiles them into a single finalized response for downstream middleware.
*/
export class EventAggregator {
private readonly model: string;
private readonly requestFormat: APIFormat;
private readonly responseFormat: APIFormat;
private readonly format: APIFormat;
private readonly events: OpenAIChatCompletionStreamEvent[];
constructor({ body, inboundApi, outboundApi }: express.Request) {
constructor({ format }: { format: APIFormat }) {
this.events = [];
this.requestFormat = inboundApi;
this.responseFormat = outboundApi;
this.model = body.model;
this.format = format;
}
addEvent(
event:
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralAIStreamEvent
) {
addEvent(event: OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent) {
if (eventIsOpenAIEvent(event)) {
this.events.push(event);
} else {
// horrible special case. previously all transformers' target format was
// openai, so the event aggregator could conveniently assume all incoming
// events were in openai format.
// now we have added some transformers that convert between non-openai
// formats, so aggregator needs to know how to collapse for more than
// just openai.
// because writing aggregation logic for every possible output format is
// annoying, we will just transform any non-openai output events to openai
// format (even if the client did not request openai at all) so that we
// still only need to write aggregators for openai SSEs.
let openAIEvent: OpenAIChatCompletionStreamEvent | undefined;
switch (this.requestFormat) {
case "anthropic-text":
assertIsAnthropicV2Event(event);
openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "fallback-" + Date.now(),
fallbackModel: event.model || this.model || "fallback-claude-3",
})?.event;
break;
case "mistral-ai":
assertIsMistralChatEvent(event);
openAIEvent = mistralAIToOpenAI({
data: `data: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: "fallback-" + Date.now(),
fallbackModel: this.model || "fallback-mistral",
})?.event;
break;
}
if (openAIEvent) {
this.events.push(openAIEvent);
// now we have added anthropic-chat-to-text, so aggregator needs to know
// how to collapse events from two formats.
// because that is annoying, we will simply transform anthropic events to
// openai (even if the client didn't ask for openai) so we don't have to
// write aggregation logic for anthropic chat (which is also a troublesome
// stateful format).
const openAIEvent = anthropicV2ToOpenAI({
data: `event: completion\ndata: ${JSON.stringify(event)}\n\n`,
lastPosition: -1,
index: 0,
fallbackId: event.log_id || "event-aggregator-fallback",
fallbackModel: event.model || "claude-3-fallback",
});
if (openAIEvent.event) {
this.events.push(openAIEvent.event);
}
}
}
getFinalResponse() {
switch (this.responseFormat) {
switch (this.format) {
case "openai":
case "openai-responses":
case "google-ai":
case "mistral-ai":
return mergeEventsForOpenAIChat(this.events);
case "openai-text":
return mergeEventsForOpenAIText(this.events);
@@ -93,16 +61,10 @@ export class EventAggregator {
return mergeEventsForAnthropicText(this.events);
case "anthropic-chat":
return mergeEventsForAnthropicChat(this.events);
case "mistral-ai":
return mergeEventsForMistralChat(this.events);
case "mistral-text":
return mergeEventsForMistralText(this.events);
case "openai-image":
throw new Error(
`SSE aggregation not supported for ${this.responseFormat}`
);
throw new Error(`SSE aggregation not supported for ${this.format}`);
default:
assertNever(this.responseFormat);
assertNever(this.format);
}
}
@@ -116,17 +78,3 @@ function eventIsOpenAIEvent(
): event is OpenAIChatCompletionStreamEvent {
return event?.object === "chat.completion.chunk";
}
function assertIsAnthropicV2Event(event: any): asserts event is AnthropicV2StreamEvent {
if (!event?.completion) {
throw new Error(`Bad event for Anthropic V2 SSE aggregation`);
}
}
function assertIsMistralChatEvent(
event: any
): asserts event is MistralChatCompletionEvent {
if (!event?.choices) {
throw new Error(`Bad event for Mistral SSE aggregation`);
}
}
@@ -7,25 +7,6 @@ export type SSEResponseTransformArgs<S = Record<string, any>> = {
state?: S;
};
export type MistralChatCompletionEvent = {
choices: {
index: number;
message: { role: string; content: string };
stop_reason: string | null;
}[];
};
export type MistralTextCompletionEvent = {
outputs: { text: string; stop_reason: string | null }[];
};
export type MistralAIStreamEvent = {
"amazon-bedrock-invocationMetrics"?: {
inputTokenCount: number;
outputTokenCount: number;
invocationLatency: number;
firstByteLatency: number;
};
} & (MistralChatCompletionEvent | MistralTextCompletionEvent);
export type AnthropicV2StreamEvent = {
log_id?: string;
model?: string;
@@ -60,12 +41,8 @@ export { anthropicV2ToOpenAI } from "./transformers/anthropic-v2-to-openai";
export { anthropicChatToAnthropicV2 } from "./transformers/anthropic-chat-to-anthropic-v2";
export { anthropicChatToOpenAI } from "./transformers/anthropic-chat-to-openai";
export { googleAIToOpenAI } from "./transformers/google-ai-to-openai";
export { mistralAIToOpenAI } from "./transformers/mistral-ai-to-openai";
export { mistralTextToMistralChat } from "./transformers/mistral-text-to-mistral-chat";
export { passthroughToOpenAI } from "./transformers/passthrough-to-openai";
export { mergeEventsForOpenAIChat } from "./aggregators/openai-chat";
export { mergeEventsForOpenAIText } from "./aggregators/openai-text";
export { mergeEventsForAnthropicText } from "./aggregators/anthropic-text";
export { mergeEventsForAnthropicChat } from "./aggregators/anthropic-chat";
export { mergeEventsForMistralChat } from "./aggregators/mistral-chat";
export { mergeEventsForMistralText } from "./aggregators/mistral-text";
@@ -11,11 +11,8 @@ import {
googleAIToOpenAI,
OpenAIChatCompletionStreamEvent,
openAITextToOpenAIChat,
mistralAIToOpenAI,
mistralTextToMistralChat,
passthroughToOpenAI,
StreamingCompletionTransformer,
MistralChatCompletionEvent,
} from "./index";
type SSEMessageTransformerOptions = TransformOptions & {
@@ -38,9 +35,7 @@ export class SSEMessageTransformer extends Transform {
private readonly inputFormat: APIFormat;
private readonly transformFn: StreamingCompletionTransformer<
// TODO: Refactor transformers to not assume only OpenAI events as output
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralChatCompletionEvent
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
>;
private readonly log;
private readonly fallbackId: string;
@@ -126,17 +121,16 @@ function eventIsOpenAIEvent(
function getTransformer(
responseApi: APIFormat,
version?: string,
// In most cases, we are transforming back to OpenAI. Some responses can be
// translated between two non-OpenAI formats, eg Anthropic Chat -> Anthropic
// Text, or Mistral Text -> Mistral Chat.
// There's only one case where we're not transforming back to OpenAI, which is
// Anthropic Chat response -> Anthropic Text request. This parameter is only
// used for that case.
requestApi: APIFormat = "openai"
): StreamingCompletionTransformer<
| OpenAIChatCompletionStreamEvent
| AnthropicV2StreamEvent
| MistralChatCompletionEvent
OpenAIChatCompletionStreamEvent | AnthropicV2StreamEvent
> {
switch (responseApi) {
case "openai":
case "mistral-ai":
return passthroughToOpenAI;
case "openai-text":
return openAITextToOpenAIChat;
@@ -146,20 +140,12 @@ function getTransformer(
: anthropicV2ToOpenAI;
case "anthropic-chat":
return requestApi === "anthropic-text"
? anthropicChatToAnthropicV2 // User's legacy text prompt was converted to chat, and response must be converted back to text
? anthropicChatToAnthropicV2
: anthropicChatToOpenAI;
case "google-ai":
return googleAIToOpenAI;
case "mistral-ai":
return mistralAIToOpenAI;
case "mistral-text":
return requestApi === "mistral-ai"
? mistralTextToMistralChat // User's chat request was converted to text, and response must be converted back to chat
: mistralAIToOpenAI;
case "openai-image":
throw new Error(`SSE transformation not supported for ${responseApi}`);
case "openai-responses":
return passthroughToOpenAI;
default:
assertNever(responseApi);
}
@@ -2,6 +2,7 @@ import pino from "pino";
import { Transform, TransformOptions } from "stream";
import { Message } from "@smithy/eventstream-codec";
import { APIFormat } from "../../../../shared/key-management";
import { buildSpoofedSSE } from "../error-generator";
import { BadRequestError, RetryableError } from "../../../../shared/errors";
type SSEStreamAdapterOptions = TransformOptions & {
@@ -19,6 +20,7 @@ type SSEStreamAdapterOptions = TransformOptions & {
*/
export class SSEStreamAdapter extends Transform {
private readonly isAwsStream;
private readonly isGoogleStream;
private api: APIFormat;
private partialMessage = "";
private textDecoder = new TextDecoder("utf8");
@@ -28,6 +30,7 @@ export class SSEStreamAdapter extends Transform {
super({ ...options, objectMode: true });
this.isAwsStream =
options?.contentType === "application/vnd.amazon.eventstream";
this.isGoogleStream = options?.api === "google-ai";
this.api = options.api;
this.log = options.logger.child({ module: "sse-stream-adapter" });
}
@@ -52,10 +55,8 @@ export class SSEStreamAdapter extends Transform {
if ("completion" in eventObj) {
return ["event: completion", `data: ${event}`].join(`\n`);
} else if (eventObj.type) {
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
} else {
return `data: ${event}`;
return [`event: ${eventObj.type}`, `data: ${event}`].join(`\n`);
}
}
// noinspection FallThroughInSwitchStatementJS -- non-JSON data is unexpected
@@ -107,12 +108,44 @@ export class SSEStreamAdapter extends Transform {
}
}
/** Processes an incoming array element from the Google AI JSON stream. */
protected processGoogleObject(data: any): string | null {
// Sometimes data has fields key and value, sometimes it's just the
// candidates array.
const candidates = data.value?.candidates ?? data.candidates ?? [{}];
try {
const hasParts = candidates[0].content?.parts?.length > 0;
if (hasParts) {
return `data: ${JSON.stringify(data.value ?? data)}\n`;
} else {
this.log.error({ event: data }, "Received bad Google AI event");
return `data: ${buildSpoofedSSE({
format: "google-ai",
title: "Proxy stream error",
message:
"The proxy received malformed or unexpected data from Google AI while streaming.",
obj: data,
reqId: "proxy-sse-adapter-message",
model: "",
})}`;
}
} catch (error) {
error.lastEvent = data;
this.emit("error", error);
}
return null;
}
_transform(data: any, _enc: string, callback: (err?: Error | null) => void) {
try {
if (this.isAwsStream) {
// `data` is a Message object
const message = this.processAwsMessage(data);
if (message) this.push(message + "\n\n");
} else if (this.isGoogleStream) {
// `data` is an element from the Google AI JSON stream
const message = this.processGoogleObject(data);
if (message) this.push(message + "\n\n");
} else {
// `data` is a string, but possibly only a partial message
const fullMessages = (this.partialMessage + data).split(
@@ -34,7 +34,7 @@ export const anthropicChatToOpenAI: StreamingCompletionTransformer = (
model: params.fallbackModel,
choices: [
{
index: 0,
index: params.index,
delta: { content: deltaEvent.delta.text },
finish_reason: null,
},
@@ -9,7 +9,7 @@ const log = logger.child({
type GoogleAIStreamEvent = {
candidates: {
content?: { parts?: { text: string }[]; role: string };
content: { parts: { text: string }[]; role: string };
finishReason?: "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER";
index: number;
tokenCount?: number;
@@ -34,15 +34,9 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
return { position: -1 };
}
const parts = completionEvent.candidates[0].content?.parts || [];
const parts = completionEvent.candidates[0].content.parts;
let content = parts[0]?.text ?? "";
if (isSafetyStop(completionEvent)) {
content = `[Proxy Warning] Gemini safety filter triggered: ${JSON.stringify(
completionEvent.candidates[0].safetyRatings
)}`;
}
// If this is the first chunk, try stripping speaker names from the response
// e.g. "John: Hello" -> "Hello"
if (index === 0) {
@@ -66,14 +60,6 @@ export const googleAIToOpenAI: StreamingCompletionTransformer = (params) => {
return { position: -1, event: newEvent };
};
function isSafetyStop(completion: GoogleAIStreamEvent) {
const isSafetyStop = ["SAFETY", "OTHER"].includes(
completion.candidates[0].finishReason ?? ""
);
const hasNoContent = completion.candidates[0].content?.parts?.length === 0;
return isSafetyStop && hasNoContent;
}
function asCompletion(event: ServerSentEvent): GoogleAIStreamEvent | null {
try {
const parsed = JSON.parse(event.data) as GoogleAIStreamEvent;
@@ -1,76 +0,0 @@
import { logger } from "../../../../../logger";
import { MistralAIStreamEvent, SSEResponseTransformArgs } from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
const log = logger.child({
module: "sse-transformer",
transformer: "mistral-ai-to-openai",
});
export const mistralAIToOpenAI = (params: SSEResponseTransformArgs) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data || rawEvent.data === "[DONE]") {
return { position: -1 };
}
const completionEvent = asCompletion(rawEvent);
if (!completionEvent) {
return { position: -1 };
}
if ("choices" in completionEvent) {
const newChatEvent = {
id: params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: completionEvent.choices[0].index,
delta: { content: completionEvent.choices[0].message.content },
finish_reason: completionEvent.choices[0].stop_reason,
},
],
};
return { position: -1, event: newChatEvent };
} else if ("outputs" in completionEvent) {
const newTextEvent = {
id: params.fallbackId,
object: "chat.completion.chunk" as const,
created: Date.now(),
model: params.fallbackModel,
choices: [
{
index: 0,
delta: { content: completionEvent.outputs[0].text },
finish_reason: completionEvent.outputs[0].stop_reason,
},
],
};
return { position: -1, event: newTextEvent };
}
// should never happen
return { position: -1 };
};
function asCompletion(event: ServerSentEvent): MistralAIStreamEvent | null {
try {
const parsed = JSON.parse(event.data);
if (
(Array.isArray(parsed.choices) &&
parsed.choices[0].message !== undefined) ||
(Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined)
) {
return parsed;
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Missing required fields");
}
} catch (error) {
log.warn({ error: error.stack, event }, "Received invalid data event");
}
return null;
}
@@ -1,63 +0,0 @@
import {
MistralChatCompletionEvent,
MistralTextCompletionEvent,
StreamingCompletionTransformer,
} from "../index";
import { parseEvent, ServerSentEvent } from "../parse-sse";
import { logger } from "../../../../../logger";
const log = logger.child({
module: "sse-transformer",
transformer: "mistral-text-to-mistral-chat",
});
/**
* Transforms an incoming Mistral Text SSE to an equivalent Mistral Chat SSE.
* This is generally used when a client sends a Mistral Chat prompt, but we
* convert it to Mistral Text before sending it to the API to work around
* some bugs in Mistral/AWS prompt templating. In these cases we need to convert
* the response back to Mistral Chat.
*/
export const mistralTextToMistralChat: StreamingCompletionTransformer<
MistralChatCompletionEvent
> = (params) => {
const { data } = params;
const rawEvent = parseEvent(data);
if (!rawEvent.data) {
return { position: -1 };
}
const textCompletion = asTextCompletion(rawEvent);
if (!textCompletion) {
return { position: -1 };
}
const chatEvent: MistralChatCompletionEvent = {
choices: [
{
index: 0,
message: { role: "assistant", content: textCompletion.outputs[0].text },
stop_reason: textCompletion.outputs[0].stop_reason,
},
],
};
return { position: -1, event: chatEvent };
};
function asTextCompletion(
event: ServerSentEvent
): MistralTextCompletionEvent | null {
try {
const parsed = JSON.parse(event.data);
if (Array.isArray(parsed.outputs) && parsed.outputs[0].text !== undefined) {
return parsed;
} else {
// noinspection ExceptionCaughtLocallyJS
throw new Error("Missing required fields");
}
} catch (error: any) {
log.warn({ error: error.stack, event }, "Received invalid data event");
}
return null;
}
+45 -114
View File
@@ -1,87 +1,48 @@
import { Request, RequestHandler, Router } from "express";
import { BadRequestError } from "../shared/errors";
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { keyPool } from "../shared/key-management";
import {
getMistralAIModelFamily,
MistralAIModelFamily,
ModelFamily,
} from "../shared/models";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeBody,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
// Mistral can't settle on a single naming scheme and deprecates models within
// months of releasing them so this list is hard to keep up to date. 2024-07-28
// https://docs.mistral.ai/platform/endpoints
export const KNOWN_MISTRAL_AI_MODELS = [
/* Premier models */
// Mistral Large (top-tier reasoning model)
"mistral-large-latest",
"mistral-large-2411",
"mistral-large-2407",
"mistral-large-2402", // older version
// Pixtral Large (multimodal/vision model)
"pixtral-large-latest",
"pixtral-large-2411",
// Mistral Saba (language-specialized model)
"mistral-saba-latest",
"mistral-saba-2502",
// Codestral (code model)
"codestral-latest",
"codestral-2501",
"codestral-2405",
// Ministral models (edge models)
"ministral-8b-latest",
"ministral-8b-2410",
"ministral-3b-latest",
"ministral-3b-2410",
// Embedding & Moderation
"mistral-embed",
"mistral-embed-2312",
"mistral-moderation-latest",
"mistral-moderation-2411",
/* Free models */
// Mistral Small (with vision in latest version)
"mistral-small-latest",
"mistral-small-2503", // v3.1 with vision
"mistral-small-2402", // older version
"magistral-small-latest",
// Pixtral 12B (vision model)
"pixtral-12b-latest",
"pixtral-12b-2409",
/* Research & Open Models */
// Mistral Nemo
"open-mistral-nemo",
"open-mistral-nemo-2407",
// Earlier Mixtral & Mistral models
// Mistral 7b (open weight, legacy)
"open-mistral-7b",
"mistral-tiny-2312",
// Mixtral 8x7b (open weight, legacy)
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"open-codestral-mamba",
"mathstral",
/* Other, too lazy to do it properly now */
"mistral-small-2312",
// Mixtral Small (newer 8x7b, closed weight)
"mistral-small-latest",
"mistral-small-2402",
// Mistral Medium
"mistral-medium-latest",
"mistral-medium-2312",
"mistral-medium-2505",
"magistral-medium-latest",
// Mistral Large
"mistral-large-latest",
"mistral-large-2402",
// Deprecated identifiers (2024-05-01)
"mistral-tiny",
"mistral-tiny-2312",
"mistral-small",
"mistral-medium",
];
let modelsCache: any = null;
@@ -128,28 +89,23 @@ const mistralAIResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
let newBody = body;
if (req.inboundApi === "mistral-text" && req.outboundApi === "mistral-ai") {
newBody = transformMistralTextToMistralChat(body);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
res.status(200).json({ ...body, proxy: body.proxy });
};
export function transformMistralTextToMistralChat(textBody: any) {
return {
...textBody,
choices: [
{ message: { content: textBody.outputs[0].text, role: "assistant" } },
],
outputs: undefined,
};
}
const mistralAIProxy = createQueuedProxyMiddleware({
target: "https://api.mistral.ai",
mutations: [addKey, finalizeBody],
blockingResponseHandler: mistralAIResponseHandler,
const mistralAIProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.mistral.ai",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({
pipeline: [addKey, finalizeBody],
}),
proxyRes: createOnProxyResHandler([mistralAIResponseHandler]),
error: handleProxyError,
},
}),
});
const mistralAIRouter = Router();
@@ -158,37 +114,12 @@ mistralAIRouter.get("/v1/models", handleModelRequest);
mistralAIRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{
inApi: "mistral-ai",
outApi: "mistral-ai",
service: "mistral-ai",
},
{ beforeTransform: [detectMistralInputApi] }
),
createPreprocessorMiddleware({
inApi: "mistral-ai",
outApi: "mistral-ai",
service: "mistral-ai",
}),
mistralAIProxy
);
/**
* We can't determine if a request is Mistral text or chat just from the path
* because they both use the same endpoint. We need to check the request body
* for either `messages` or `prompt`.
* @param req
*/
export function detectMistralInputApi(req: Request) {
const { messages, prompt } = req.body;
if (messages) {
req.inboundApi = "mistral-ai";
req.outboundApi = "mistral-ai";
} else if (prompt && req.service === "mistral-ai") {
// Mistral La Plateforme doesn't expose a text completions endpoint.
throw new BadRequestError(
"Mistral (via La Plateforme API) does not support text completions. This format is only supported on Mistral via the AWS API."
);
} else if (prompt && req.service === "aws") {
req.inboundApi = "mistral-text";
req.outboundApi = "mistral-text";
}
}
export const mistralAI = mistralAIRouter;
-219
View File
@@ -1,219 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { MoonshotKey, keyPool } from "../shared/key-management";
import { isMoonshotModel, isMoonshotVisionModel } from "../shared/api-schemas/moonshot";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "moonshot" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const moonshotResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
const modelToUse = "moonshot-v1-8k";
const moonshotKey = keyPool.get(modelToUse, "moonshot") as MoonshotKey;
if (!moonshotKey || !moonshotKey.key) {
log.warn("No valid Moonshot key available for model listing");
throw new Error("No valid Moonshot API key available");
}
// Fetch models from Moonshot API
const response = await axios.get("https://api.moonshot.cn/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${moonshotKey.key}`
},
});
if (!response.data || !response.data.data) {
throw new Error("Unexpected response format from Moonshot API");
}
// Format response to ensure OpenAI compatibility
const models = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
created: model.created || Math.floor(Date.now() / 1000),
owned_by: model.owned_by || "moonshot",
permission: model.permission || [],
root: model.root || model.id,
parent: model.parent || null,
})),
};
log.debug({ modelCount: models.data.length }, "Retrieved models from Moonshot API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Moonshot models"
);
} else {
log.error({ error }, "Unknown error fetching Moonshot models");
}
// Return a default list of known Moonshot models as fallback
return {
object: "list",
data: [
{ id: "moonshot-v1-8k", object: "model", created: 1678888000, owned_by: "moonshot" },
{ id: "moonshot-v1-32k", object: "model", created: 1678888000, owned_by: "moonshot" },
{ id: "moonshot-v1-128k", object: "model", created: 1678888000, owned_by: "moonshot" },
],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to handle partial mode for Moonshot
function handlePartialMode(req: Request) {
if (!process.env.NO_MOONSHOT_PARTIAL && req.body.messages && Array.isArray(req.body.messages)) {
const msgs = req.body.messages;
if (msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// Consolidate consecutive assistant messages
content = msgs[i--].content + content;
}
// Replace consecutive assistant messages with single message with partial: true
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, partial: true });
log.debug("Consolidated assistant messages and enabled partial mode for Moonshot request");
}
}
// Function to handle vision model content transformation
function handleVisionContent(req: Request) {
const model = req.body.model;
if (isMoonshotVisionModel(model) && req.body.messages) {
// Ensure vision content is properly formatted
req.body.messages = req.body.messages.map((msg: any) => {
if (msg.content && typeof msg.content === 'string') {
// Keep string content as is for non-vision requests
return msg;
}
return msg;
});
}
}
// Function to count tokens for Moonshot models
function countMoonshotTokens(req: Request) {
const model = req.body.model;
if (isMoonshotModel(model)) {
if (req.promptTokens) {
log.debug(
{ tokens: req.promptTokens, model },
"Estimated token count for Moonshot prompt"
);
}
}
}
// Handle rate limit errors for Moonshot
async function handleMoonshotRateLimitError(req: Request, error: any) {
if (error.response?.status === 429) {
log.warn({ model: req.body.model }, "Moonshot rate limit hit, rotating key");
const currentKey = req.key as MoonshotKey;
keyPool.markRateLimited(currentKey);
// Try to get a new key
const newKey = keyPool.get(req.body.model, "moonshot") as MoonshotKey;
if (newKey.hash !== currentKey.hash) {
req.key = newKey;
return true; // Retry with new key
}
}
return false;
}
const moonshotProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
finalizeBody
],
target: "https://api.moonshot.cn",
blockingResponseHandler: moonshotResponseHandler,
});
const moonshotRouter = Router();
// Chat completions endpoint
moonshotRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "moonshot" },
{ afterTransform: [ handlePartialMode, handleVisionContent, countMoonshotTokens ] }
),
moonshotProxy
);
// Embeddings endpoint
moonshotRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "moonshot" },
{ afterTransform: [ countMoonshotTokens ] }
),
moonshotProxy
);
// Models endpoint
moonshotRouter.get("/v1/models", handleModelRequest);
export const moonshot = moonshotRouter;
+37 -123
View File
@@ -1,17 +1,24 @@
import { Request, RequestHandler, Router } from "express";
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
import { generateModelList } from "./openai";
import { RequestHandler, Router, Request } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
createPreprocessorMiddleware,
finalizeBody,
createOnProxyReqHandler,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
import { generateModelList } from "./openai";
import { OpenAIImageGenerationResult } from "../shared/file-storage/mirror-generated-image";
const KNOWN_MODELS = ["dall-e-2", "dall-e-3", "gpt-image-1"];
const KNOWN_MODELS = ["dall-e-2", "dall-e-3"];
let modelListCache: any = null;
let modelListValid = 0;
@@ -19,9 +26,7 @@ const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelListValid < 1000 * 60) {
return res.status(200).json(modelListCache);
}
const result = generateModelList("openai").filter((m: { id: string }) =>
KNOWN_MODELS.includes(m.id)
);
const result = generateModelList(KNOWN_MODELS);
modelListCache = { object: "list", data: result };
modelListValid = new Date().getTime();
res.status(200).json(modelListCache);
@@ -58,46 +63,27 @@ function transformResponseForChat(
req: Request
): Record<string, any> {
const prompt = imageBody.data[0].revised_prompt ?? req.body.prompt;
const isGptImage = req.body.model?.includes("gpt-image") || false;
const content = imageBody.data
.map((item) => {
const { url, b64_json } = item;
// The gpt-image-1 model always returns b64_json
// Format will depend on output_format parameter (defaults to png)
// For simplicity, we'll assume png if not specified
const format = req.body.output_format || "png";
if (b64_json) {
return `![${prompt}](data:image/${format};base64,${b64_json})`;
return `![${prompt}](data:image/png;base64,${b64_json})`;
} else {
return `![${prompt}](${url})`;
}
})
.join("\n\n");
// Prepare the usage information - gpt-image-1 includes detailed token usage
let usage = {
prompt_tokens: 0,
completion_tokens: req.outputTokens,
total_tokens: req.outputTokens,
};
// If this is a gpt-image-1 response, it includes detailed usage info
if (imageBody.usage) {
usage = {
prompt_tokens: imageBody.usage.input_tokens || 0,
completion_tokens: imageBody.usage.output_tokens || 0,
total_tokens: imageBody.usage.total_tokens || 0,
};
}
return {
id: req.body.model?.includes("gpt-image") ? "gptimage-" + req.id : "dalle-" + req.id,
id: "dalle-" + req.id,
object: "chat.completion",
created: Date.now(),
model: req.body.model,
usage,
usage: {
prompt_tokens: 0,
completion_tokens: req.outputTokens,
total_tokens: req.outputTokens,
},
choices: [
{
message: { role: "assistant", content },
@@ -108,82 +94,21 @@ function transformResponseForChat(
};
}
// Filter parameters based on the model being used to avoid sending unsupported parameters
function filterModelParameters(manager: ProxyReqManager) {
const req = manager.request;
const originalBody = req.body;
const modelName = originalBody?.model || "";
// Skip if no body or it's not an object
if (!originalBody || typeof originalBody !== 'object') return;
// Create a deep copy of the body to filter
const filteredBody = { ...originalBody };
// Define allowed parameters for each model
if (modelName.includes('dall-e-2')) {
// DALL-E 2 parameters
const allowedParams = [
'model', 'prompt', 'n', 'size', 'response_format', 'user'
];
// Remove any parameter not in the allowed list
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
delete filteredBody[key];
}
});
req.log.info({ model: 'dall-e-2', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 2");
} else if (modelName.includes('dall-e-3')) {
// DALL-E 3 parameters
const allowedParams = [
'model', 'prompt', 'n', 'quality', 'size', 'style', 'response_format', 'user'
];
// Remove any parameter not in the allowed list
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
delete filteredBody[key];
}
});
req.log.info({ model: 'dall-e-3', params: Object.keys(filteredBody) }, "Filtered parameters for DALL-E 3");
} else if (modelName.includes('gpt-image')) {
// Define allowed parameters for gpt-image-1
const allowedParams = [
'model', 'prompt', 'background', 'moderation', 'n', 'output_compression',
'output_format', 'quality', 'size', 'user', 'image', 'mask'
];
// Remove any parameter not in the allowed list, especially 'style' which is only for DALL-E 3
Object.keys(filteredBody).forEach(key => {
if (!allowedParams.includes(key)) {
req.log.info({ model: 'gpt-image-1', removedParam: key }, "Removing unsupported parameter for GPT Image");
delete filteredBody[key];
}
});
req.log.info({ model: 'gpt-image-1', params: Object.keys(filteredBody) }, "Filtered parameters for GPT Image");
}
// Use the proper method to update the body
manager.setBody(filteredBody);
}
function replacePath(manager: ProxyReqManager) {
const req = manager.request;
const pathname = req.url.split("?")[0];
req.log.debug({ pathname }, "OpenAI image path filter");
if (req.path.startsWith("/v1/chat/completions")) {
manager.setPath("/v1/images/generations");
}
}
const openaiImagesProxy = createQueuedProxyMiddleware({
target: "https://api.openai.com",
mutations: [replacePath, filterModelParameters, addKey, finalizeBody],
blockingResponseHandler: openaiImagesResponseHandler,
const openaiImagesProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.openai.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
pathRewrite: {
"^/v1/chat/completions": "/v1/images/generations",
},
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
proxyRes: createOnProxyResHandler([openaiImagesResponseHandler]),
error: handleProxyError,
},
}),
});
const openaiImagesRouter = Router();
@@ -198,17 +123,6 @@ openaiImagesRouter.post(
}),
openaiImagesProxy
);
// Add support for the /v1/images/edits endpoint (used by gpt-image-1 for image editing)
openaiImagesRouter.post(
"/v1/images/edits",
ipLimiter,
createPreprocessorMiddleware({
inApi: "openai-image",
outApi: "openai-image",
service: "openai",
}),
openaiImagesProxy
);
openaiImagesRouter.post(
"/v1/chat/completions",
ipLimiter,
+113 -367
View File
@@ -1,81 +1,113 @@
import { Request, RequestHandler, Router } from "express";
import { RequestHandler, Router } from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import { config } from "../config";
import { BadRequestError } from "../shared/errors";
import { AzureOpenAIKey, keyPool, OpenAIKey } from "../shared/key-management";
import { getOpenAIModelFamily } from "../shared/models";
import { keyPool, OpenAIKey } from "../shared/key-management";
import {
getOpenAIModelFamily,
ModelFamily,
OpenAIModelFamily,
} from "../shared/models";
import { logger } from "../logger";
import { createQueueMiddleware } from "./queue";
import { ipLimiter } from "./rate-limit";
import { handleProxyError } from "./middleware/common";
import {
addKey,
addKeyForEmbeddingsRequest,
createEmbeddingsPreprocessorMiddleware,
createOnProxyReqHandler,
createPreprocessorMiddleware,
finalizeBody,
forceModel,
RequestPreprocessor,
} from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import {
createOnProxyResHandler,
ProxyResHandlerWithBody,
} from "./middleware/response";
// https://platform.openai.com/docs/models/overview
export const KNOWN_OPENAI_MODELS = [
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4-turbo", // alias for latest gpt4-turbo stable
"gpt-4-turbo-2024-04-09", // gpt4-turbo stable, with vision
"gpt-4-turbo-preview", // alias for latest turbo preview
"gpt-4-0125-preview", // gpt4-turbo preview 2
"gpt-4-1106-preview", // gpt4-turbo preview 1
"gpt-4-vision-preview", // gpt4-turbo preview 1 with vision
"gpt-4",
"gpt-4-0613",
"gpt-4-0314", // EOL 2024-06-13
"gpt-4-32k",
"gpt-4-32k-0314", // EOL 2024-06-13
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301", // EOL 2024-06-13
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"text-embedding-ada-002",
];
let modelsCache: any = null;
let modelsCacheTime = 0;
export function generateModelList(service: "openai" | "azure") {
const keys = keyPool
.list()
.filter((k) => k.service === service && !k.isDisabled) as
| OpenAIKey[]
| AzureOpenAIKey[];
if (keys.length === 0) return [];
export function generateModelList(models = KNOWN_OPENAI_MODELS) {
// Get available families and snapshots
let availableFamilies = new Set<OpenAIModelFamily>();
const availableSnapshots = new Set<string>();
for (const key of keyPool.list()) {
if (key.isDisabled || key.service !== "openai") continue;
const asOpenAIKey = key as OpenAIKey;
asOpenAIKey.modelFamilies.forEach((f) => availableFamilies.add(f));
asOpenAIKey.modelSnapshots.forEach((s) => availableSnapshots.add(s));
}
const allowedModelFamilies = new Set(config.allowedModelFamilies);
const modelFamilies = new Set(
keys
.flatMap((k) => k.modelFamilies)
.filter((f) => allowedModelFamilies.has(f))
// Remove disabled families
const allowed = new Set<ModelFamily>(config.allowedModelFamilies);
availableFamilies = new Set(
[...availableFamilies].filter((x) => allowed.has(x))
);
const modelIds = new Set(
keys
.flatMap((k) => k.modelIds)
.filter((id) => {
const allowed = modelFamilies.has(getOpenAIModelFamily(id));
const known = ["gpt", "o", "dall-e", "chatgpt", "text-embedding", "codex"].some(
(prefix) => id.startsWith(prefix)
);
const isFinetune = id.includes("ft");
return allowed && known && !isFinetune;
})
);
return models
.map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: "openai",
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
}))
.filter((model) => {
// First check if the family is available
const hasFamily = availableFamilies.has(getOpenAIModelFamily(model.id));
if (!hasFamily) return false;
return Array.from(modelIds).map((id) => ({
id,
object: "model",
created: new Date().getTime(),
owned_by: service,
permission: [
{
id: "modelperm-" + id,
object: "model_permission",
created: new Date().getTime(),
organization: "*",
group: null,
is_blocking: false,
},
],
root: id,
parent: null,
}));
// Then for snapshots, ensure the specific snapshot is available
const isSnapshot = model.id.match(/-\d{4}(-preview)?$/);
if (!isSnapshot) return true;
return availableSnapshots.has(model.id);
});
}
const handleModelRequest: RequestHandler = (_req, res) => {
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return res.status(200).json(modelsCache);
}
if (!config.openaiKey) return { object: "list", data: [] };
const result = generateModelList("openai");
const result = generateModelList();
modelsCache = { object: "list", data: result };
modelsCacheTime = new Date().getTime();
res.status(200).json(modelsCache);
@@ -110,26 +142,16 @@ const openaiResponseHandler: ProxyResHandlerWithBody = async (
throw new Error("Expected body to be an object");
}
const interval = (req as any)._keepAliveInterval
if (interval) {
clearInterval(interval);
res.write(JSON.stringify(body));
res.end();
return;
}
let newBody = body;
if (req.outboundApi === "openai-text" && req.inboundApi === "openai") {
req.log.info("Transforming Turbo-Instruct response to Chat format");
newBody = transformTurboInstructResponse(body);
} else if (req.outboundApi === "openai-responses" && req.inboundApi === "openai") {
req.log.info("Transforming Responses API response to Chat format");
newBody = transformResponsesApiResponse(body);
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
/** Only used for non-streaming responses. */
function transformTurboInstructResponse(
turboInstructBody: Record<string, any>
): Record<string, any> {
@@ -147,151 +169,31 @@ function transformTurboInstructResponse(
return transformed;
}
function transformResponsesApiResponse(
responsesBody: Record<string, any>
): Record<string, any> {
// If the response is already in chat completion format, return it as is
if (responsesBody.choices && responsesBody.choices[0]?.message) {
return responsesBody;
}
// Create a compatible format for clients expecting chat completions format
const transformed: Record<string, any> = {
id: responsesBody.id || `chatcmpl-${Date.now()}`,
object: "chat.completion",
created: responsesBody.created_at || Math.floor(Date.now() / 1000),
model: responsesBody.model || "o1-pro",
choices: [],
usage: responsesBody.usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
// Extract content from the Responses API format - multiple possible structures
// Structure 1: output array with message objects
if (responsesBody.output && Array.isArray(responsesBody.output)) {
// Look for a message type in the output array
let messageOutput = null;
for (const output of responsesBody.output) {
if (output.type === "message") {
messageOutput = output;
break;
}
}
if (messageOutput) {
if (messageOutput.content && Array.isArray(messageOutput.content) && messageOutput.content.length > 0) {
// Handle text content
let content = "";
const toolCalls: any[] = [];
for (const contentItem of messageOutput.content) {
if (contentItem.type === "output_text") {
content += contentItem.text;
} else if (contentItem.type === "tool_calls" && Array.isArray(contentItem.tool_calls)) {
toolCalls.push(...contentItem.tool_calls);
}
}
const message: Record<string, any> = {
role: messageOutput.role || "assistant",
content: content
};
if (toolCalls.length > 0) {
message.tool_calls = toolCalls;
}
transformed.choices.push({
index: 0,
message,
finish_reason: "stop"
});
} else if (typeof messageOutput.content === 'string') {
// Simple string content
transformed.choices.push({
index: 0,
message: {
role: messageOutput.role || "assistant",
content: messageOutput.content
},
finish_reason: "stop"
});
}
}
}
// Structure 2: response object with content
else if (responsesBody.response && responsesBody.response.content) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: typeof responsesBody.response.content === 'string'
? responsesBody.response.content
: JSON.stringify(responsesBody.response.content)
},
finish_reason: responsesBody.response.finish_reason || "stop"
});
}
// Structure 3: look for 'content' field directly
else if (responsesBody.content) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: typeof responsesBody.content === 'string'
? responsesBody.content
: JSON.stringify(responsesBody.content)
},
finish_reason: "stop"
});
}
// If we couldn't extract content, create a basic response
if (transformed.choices.length === 0) {
transformed.choices.push({
index: 0,
message: {
role: "assistant",
content: ""
},
finish_reason: "stop"
});
}
// Copy usage information if available
if (responsesBody.usage) {
transformed.usage = {
prompt_tokens: responsesBody.usage.input_tokens || 0,
completion_tokens: responsesBody.usage.output_tokens || 0,
total_tokens: responsesBody.usage.total_tokens || 0
};
}
return transformed;
}
const openaiProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.openai.com",
blockingResponseHandler: openaiResponseHandler,
const openaiProxy = createQueueMiddleware({
proxyMiddleware: createProxyMiddleware({
target: "https://api.openai.com",
changeOrigin: true,
selfHandleResponse: true,
logger,
on: {
proxyReq: createOnProxyReqHandler({ pipeline: [addKey, finalizeBody] }),
proxyRes: createOnProxyResHandler([openaiResponseHandler]),
error: handleProxyError,
},
}),
});
const openaiEmbeddingsProxy = createQueuedProxyMiddleware({
mutations: [addKeyForEmbeddingsRequest, finalizeBody],
const openaiEmbeddingsProxy = createProxyMiddleware({
target: "https://api.openai.com",
});
// New proxy middleware for the Responses API
const openaiResponsesProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.openai.com",
blockingResponseHandler: openaiResponseHandler,
changeOrigin: true,
selfHandleResponse: false,
logger,
on: {
proxyReq: createOnProxyReqHandler({
pipeline: [addKeyForEmbeddingsRequest, finalizeBody],
}),
error: handleProxyError,
},
});
const openaiRouter = Router();
@@ -320,120 +222,17 @@ openaiRouter.post(
),
openaiProxy
);
const setupChunkedTransfer: RequestHandler = (req, res, next) => {
req.log.info("Setting chunked transfer for o1 to prevent Cloudflare timeouts")
// Check if user is trying to use streaming with codex-mini models
if (req.body.model?.startsWith("codex-mini") && req.body.stream === true) {
return res.status(400).json({
error: {
message: "The codex-mini models do not support streaming. Please set 'stream: false' in your request.",
type: "invalid_request_error",
param: "stream",
code: "streaming_not_supported"
}
});
}
// Only o1 doesn't support streaming
if (req.body.model === "o1" || req.body.model === "o1-2024-12-17") {
req.isChunkedTransfer = true;
res.writeHead(200, {
'Content-Type': 'application/json',
'Transfer-Encoding': 'chunked'
});
// Higher values are required - otherwise Cloudflare will buffer and not pass
// the separate chunks, which means that a >100s response will get terminated anyway
const keepAlive = setInterval(() => {
res.write(' '.repeat(4096));
}, 48_000);
(req as any)._keepAliveInterval = keepAlive;
}
next();
};
// Functions to handle model-specific API routing
function shouldUseResponsesApi(model: string): boolean {
return model === "o1-pro" || model.startsWith("o1-pro-") ||
model === "o3-pro" || model.startsWith("o3-pro-") ||
model === "codex-mini-latest" || model.startsWith("codex-mini-");
}
// Preprocessor to redirect requests to the responses API
const routeToResponsesApi: RequestPreprocessor = (req) => {
if (shouldUseResponsesApi(req.body.model)) {
req.log.info(`Routing ${req.body.model} to OpenAI Responses API`);
req.url = "/v1/responses";
req.outboundApi = "openai-responses";
}
};
// General chat completion endpoint. Turbo-instruct is not supported here.
openaiRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "openai" },
{
afterTransform: [
fixupMaxTokens,
filterGPT5UnsupportedParams,
routeToResponsesApi
]
}
),
setupChunkedTransfer,
(req, _res, next) => {
// Route to the responses endpoint if needed
if (req.outboundApi === "openai-responses") {
// Ensure messages is moved to input properly
req.log.info("Final check for Responses API format in chat completions");
if (req.body.messages) {
req.log.info("Moving 'messages' to 'input' for Responses API");
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API");
req.body.input = req.body.input.messages;
}
return openaiResponsesProxy(req, _res, next);
}
next();
},
createPreprocessorMiddleware({
inApi: "openai",
outApi: "openai",
service: "openai",
}),
openaiProxy
);
// New endpoint for OpenAI Responses API
openaiRouter.post(
"/v1/responses",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai-responses", service: "openai" },
{ afterTransform: [fixupMaxTokens, filterGPT5UnsupportedParams] }
),
// Add final check to ensure the body is in the correct format for Responses API
(req, _res, next) => {
req.log.info("Final check for Responses API format");
// Ensure messages is properly formatted for input
if (req.body.messages) {
req.log.info("Moving 'messages' to 'input' for Responses API");
req.body.input = req.body.messages;
delete req.body.messages;
} else if (req.body.input && req.body.input.messages) {
req.log.info("Reformatting input.messages for Responses API");
req.body.input = req.body.input.messages;
}
next();
},
openaiResponsesProxy
);
// Embeddings endpoint.
openaiRouter.post(
"/v1/embeddings",
@@ -442,57 +241,4 @@ openaiRouter.post(
openaiEmbeddingsProxy
);
function forceModel(model: string): RequestPreprocessor {
return (req: Request) => void (req.body.model = model);
}
function fixupMaxTokens(req: Request) {
// For Responses API, use max_output_tokens instead of max_completion_tokens
if (req.outboundApi === "openai-responses") {
if (!req.body.max_output_tokens) {
req.body.max_output_tokens = req.body.max_tokens || req.body.max_completion_tokens;
}
// Remove the other token params to avoid API errors
delete req.body.max_tokens;
delete req.body.max_completion_tokens;
// Remove other parameters not supported by Responses API
const unsupportedParams = ['frequency_penalty', 'presence_penalty'];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for Responses API: ${param}`);
delete req.body[param];
}
}
} else {
// Original behavior for other APIs
if (!req.body.max_completion_tokens) {
req.body.max_completion_tokens = req.body.max_tokens;
}
delete req.body.max_tokens;
}
}
// GPT-5, GPT-5-mini, and GPT-5-nano don't support certain parameters
// Remove them if present to prevent API errors
function filterGPT5UnsupportedParams(req: Request) {
const model = req.body.model;
// Only apply filtering to these specific models (gpt5-chat-latest supports all params)
const restrictedModels = /^gpt-5(-mini|-nano)?(-\d{4}-\d{2}-\d{2})?$/;
if (!restrictedModels.test(model)) {
return; // Not a restricted model, no filtering needed
}
// Remove unsupported parameters if they exist
const unsupportedParams = ['temperature', 'top_p', 'presence_penalty', 'frequency_penalty'];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
delete req.body[param];
}
}
}
export const openai = openaiRouter;
+67 -55
View File
@@ -13,7 +13,6 @@
import crypto from "crypto";
import { Handler, Request } from "express";
import { config } from "../config";
import { BadRequestError, TooManyRequestsError } from "../shared/errors";
import { keyPool } from "../shared/key-management";
import {
@@ -23,25 +22,24 @@ import {
} from "../shared/models";
import { initializeSseStream } from "../shared/streaming";
import { logger } from "../logger";
import { getUniqueIps } from "./rate-limit";
import { ProxyReqMutator, RequestPreprocessor } from "./middleware/request";
import { getUniqueIps, SHARED_IP_ADDRESSES } from "./rate-limit";
import { RequestPreprocessor } from "./middleware/request";
import { handleProxyError } from "./middleware/common";
import { sendErrorToClient } from "./middleware/response/error-generator";
import { ProxyReqManager } from "./middleware/request/proxy-req-manager";
import { classifyErrorAndSend } from "./middleware/common";
const queue: Request[] = [];
const log = logger.child({ module: "request-queue" });
/** Maximum number of queue slots for Agnai.chat requests. */
const AGNAI_CONCURRENCY_LIMIT = 5;
/** Maximum number of queue slots for individual users. */
const USER_CONCURRENCY_LIMIT = parseInt(
process.env.USER_CONCURRENCY_LIMIT ?? "1"
);
const USER_CONCURRENCY_LIMIT = 1;
const MIN_HEARTBEAT_SIZE = parseInt(process.env.MIN_HEARTBEAT_SIZE_B ?? "512");
const MAX_HEARTBEAT_SIZE =
1024 * parseInt(process.env.MAX_HEARTBEAT_SIZE_KB ?? "1024");
const HEARTBEAT_INTERVAL =
1000 * parseInt(process.env.HEARTBEAT_INTERVAL_SEC ?? "5");
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "150");
const LOAD_THRESHOLD = parseFloat(process.env.LOAD_THRESHOLD ?? "50");
const PAYLOAD_SCALE_FACTOR = parseFloat(
process.env.PAYLOAD_SCALE_FACTOR ?? "6"
);
@@ -60,28 +58,39 @@ const QUEUE_JOIN_TIMEOUT = 5000;
function getIdentifier(req: Request) {
if (req.user) return req.user.token;
if (req.risuToken) return req.risuToken;
// if (isFromSharedIp(req)) return "shared-ip";
if (isFromSharedIp(req)) return "shared-ip";
return req.ip;
}
const sharesIdentifierWith = (incoming: Request) => (queued: Request) =>
getIdentifier(queued) === getIdentifier(incoming);
async function enqueue(req: Request) {
if (req.socket.destroyed || req.res?.writableEnded) {
// In rare cases, a request can be disconnected after it is dequeued for a
// retry, but before it is re-enqueued. In this case we may miss the abort
// and the request will loop in the queue forever.
req.log.warn("Attempt to enqueue aborted request.");
throw new Error("Attempt to enqueue aborted request.");
}
const isFromSharedIp = (req: Request) => SHARED_IP_ADDRESSES.has(req.ip);
async function enqueue(req: Request) {
const enqueuedRequestCount = queue.filter(sharesIdentifierWith(req)).length;
// Do not apply concurrency limit to "special" users
if (enqueuedRequestCount >= USER_CONCURRENCY_LIMIT && req.user?.type !== "special") {
throw new TooManyRequestsError(
"Your IP or user token already has another request in the queue."
);
let isGuest = req.user?.token === undefined;
// Requests from shared IP addresses such as Agnai.chat are exempt from IP-
// based rate limiting but can only occupy a certain number of slots in the
// queue. Authenticated users always get a single spot in the queue.
const isSharedIp = isFromSharedIp(req);
const maxConcurrentQueuedRequests =
isGuest && isSharedIp ? AGNAI_CONCURRENCY_LIMIT : USER_CONCURRENCY_LIMIT;
if (enqueuedRequestCount >= maxConcurrentQueuedRequests) {
if (isSharedIp) {
// Re-enqueued requests are not counted towards the limit since they
// already made it through the queue once.
if (req.retryCount === 0) {
throw new TooManyRequestsError(
"Too many agnai.chat requests are already queued"
);
}
} else {
throw new TooManyRequestsError(
"Your IP or user token already has another request in the queue."
);
}
}
// shitty hack to remove hpm's event listeners on retried requests
@@ -137,7 +146,19 @@ export async function reenqueueRequest(req: Request) {
}
function getQueueForPartition(partition: ModelFamily): Request[] {
return queue.filter((req) => getModelFamilyForRequest(req) === partition);
return queue
.filter((req) => getModelFamilyForRequest(req) === partition)
.sort((a, b) => {
// Certain requests are exempted from IP-based rate limiting because they
// come from a shared IP address. To prevent these requests from starving
// out other requests during periods of high traffic, we sort them to the
// end of the queue.
const aIsExempted = isFromSharedIp(a);
const bIsExempted = isFromSharedIp(b);
if (aIsExempted && !bIsExempted) return 1;
if (!aIsExempted && bIsExempted) return -1;
return 0;
});
}
export function dequeue(partition: ModelFamily): Request | undefined {
@@ -148,14 +169,7 @@ export function dequeue(partition: ModelFamily): Request | undefined {
}
const req = modelQueue.reduce((prev, curr) =>
prev.startTime +
config.tokensPunishmentFactor *
((prev.promptTokens ?? 0) + (prev.outputTokens ?? 0)) <
curr.startTime +
config.tokensPunishmentFactor *
((curr.promptTokens ?? 0) + (curr.outputTokens ?? 0))
? prev
: curr
prev.startTime < curr.startTime ? prev : curr
);
queue.splice(queue.indexOf(req), 1);
@@ -247,6 +261,7 @@ let waitTimes: {
partition: ModelFamily;
start: number;
end: number;
isDeprioritized: boolean;
}[] = [];
/** Adds a successful request to the list of wait times. */
@@ -255,6 +270,7 @@ export function trackWaitTime(req: Request) {
partition: getModelFamilyForRequest(req),
start: req.startTime!,
end: req.queueOutTime ?? Date.now(),
isDeprioritized: isFromSharedIp(req),
});
}
@@ -280,7 +296,8 @@ function calculateWaitTime(partition: ModelFamily) {
.filter((wait) => {
const isSamePartition = wait.partition === partition;
const isRecent = now - wait.end < 300 * 1000;
return isSamePartition && isRecent;
const isNormalPriority = !wait.isDeprioritized;
return isSamePartition && isRecent && isNormalPriority;
})
.map((wait) => wait.end - wait.start);
const recentAverage = recentWaits.length
@@ -294,7 +311,11 @@ function calculateWaitTime(partition: ModelFamily) {
);
const currentWaits = queue
.filter((req) => getModelFamilyForRequest(req) === partition)
.filter((req) => {
const isSamePartition = getModelFamilyForRequest(req) === partition;
const isNormalPriority = !isFromSharedIp(req);
return isSamePartition && isNormalPriority;
})
.map((req) => now - req.startTime!);
const longestCurrentWait = Math.max(...currentWaits, 0);
@@ -322,35 +343,26 @@ export function getQueueLength(partition: ModelFamily | "all" = "all") {
}
export function createQueueMiddleware({
mutations = [],
beforeProxy,
proxyMiddleware,
}: {
mutations?: ProxyReqMutator[];
beforeProxy?: RequestPreprocessor;
proxyMiddleware: Handler;
}): Handler {
return async (req, res, next) => {
req.proceed = async () => {
// canonicalize the stream field which is set in a few places not always
// consistently
req.isStreaming = req.isStreaming || String(req.body.stream) === "true";
req.body.stream = req.isStreaming;
try {
// Just before executing the proxyMiddleware, we will create a
// ProxyReqManager to track modifications to the request. This allows
// us to revert those changes if the proxied request fails with a
// retryable error. That happens in proxyMiddleware's onProxyRes
// handler.
const changeManager = new ProxyReqManager(req);
req.changeManager = changeManager;
for (const mutator of mutations) {
await mutator(changeManager);
if (beforeProxy) {
try {
// Hack to let us run asynchronous middleware before the
// http-proxy-middleware handler. This is used to sign AWS requests
// before they are proxied, as the signing is asynchronous.
// Unlike RequestPreprocessors, this runs every time the request is
// dequeued, not just the first time.
await beforeProxy(req);
} catch (err) {
return handleProxyError(err, req, res);
}
} catch (err) {
// Failure during request preparation is a fatal error.
return classifyErrorAndSend(err, req, res);
}
proxyMiddleware(req, res, next);
};
-361
View File
@@ -1,361 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { QwenKey, keyPool } from "../shared/key-management";
import {
isQwenModel,
isQwenThinkingModel,
normalizeMessages,
isQwen3Model,
isThinkingVariant,
isNonThinkingVariant,
getBaseModelName
} from "../shared/api-schemas/qwen";
import { logger } from "../logger";
const log = logger.child({ module: "proxy", service: "qwen" });
let modelsCache: any = null;
let modelsCacheTime = 0;
const qwenResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
res.status(200).json({ ...body, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get a Qwen key directly
const modelToUse = "qwen-plus"; // Use any Qwen model here - just for key selection
const qwenKey = keyPool.get(modelToUse, "qwen") as QwenKey;
if (!qwenKey || !qwenKey.key) {
log.warn("No valid Qwen key available for model listing");
throw new Error("No valid Qwen API key available");
}
// Fetch models directly from Qwen API
const response = await axios.get("https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${qwenKey.key}`
},
});
if (!response.data || !response.data.data) {
throw new Error("Unexpected response format from Qwen API");
}
// Extract models
const models = response.data;
// Ensure we have all known Qwen models in the list
const knownQwenModels = [
"qwen-max",
"qwen-max-latest",
"qwen-max-2025-01-25",
"qwen-plus",
"qwen-plus-latest",
"qwen-plus-2025-01-25",
"qwen-turbo",
"qwen-turbo-latest",
"qwen-turbo-2024-11-01",
"qwen3-235b-a22b",
"qwen3-32b",
"qwen3-30b-a3b"
];
// Add thinking capability flag to models that support it
if (models.data && Array.isArray(models.data)) {
// Create a set of existing model IDs for quick lookup
const existingModelIds = new Set(models.data.map((model: any) => model.id));
// Filter out base Qwen3 models since we'll add variants instead
models.data = models.data.filter((model: any) => {
return !isQwen3Model(model.id) || isThinkingVariant(model.id) || isNonThinkingVariant(model.id);
});
// Add any missing models from our known list
knownQwenModels.forEach(modelId => {
if (!existingModelIds.has(modelId)) {
models.data.push({
id: modelId,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
});
}
});
// Add thinking capability flag to existing models
const processedModelIds = new Set();
const originalModelsData = [...models.data];
models.data = originalModelsData.flatMap((model: any) => {
const modelId = model.id;
processedModelIds.add(modelId);
// Apply capabilities to all models
if (isQwenThinkingModel(modelId)) {
model.capabilities = model.capabilities || {};
model.capabilities.thinking = true;
}
// For Qwen3 models, add thinking and non-thinking variants, but not the original
if (isQwen3Model(modelId) &&
!isThinkingVariant(modelId) &&
!isNonThinkingVariant(modelId)) {
// Create thinking variant
const thinkingModel = {
id: `${modelId}-thinking`,
object: "model",
created: model.created || Date.now(),
owned_by: model.owned_by || "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${model.display_name || modelId} (Thinking Mode)`
};
// Create non-thinking variant
const nonThinkingModel = {
id: `${modelId}-nonthinking`,
object: "model",
created: model.created || Date.now(),
owned_by: model.owned_by || "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${model.display_name || modelId} (Standard Mode)`
};
// Only add variants, not the original model
return [thinkingModel, nonThinkingModel];
}
return [model];
});
} else {
// If the API response didn't include models, create our own list
models.data = knownQwenModels.flatMap(modelId => {
// For Qwen3 models, add only thinking and non-thinking variants (not the base model)
if (isQwen3Model(modelId) &&
!isThinkingVariant(modelId) &&
!isNonThinkingVariant(modelId)) {
return [
{
id: `${modelId}-thinking`,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${modelId} (Thinking Mode)`
},
{
id: `${modelId}-nonthinking`,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: { thinking: true },
proxy_managed: true,
display_name: `${modelId} (Standard Mode)`
}
];
}
// For non-Qwen3 models, return the base model
const baseModel = {
id: modelId,
object: "model",
created: Date.now(),
owned_by: "qwen",
capabilities: isQwenThinkingModel(modelId) ? { thinking: true } : {}
};
return [baseModel];
});
}
log.debug({ modelCount: models.data?.length }, "Retrieved models from Qwen API");
// Cache the response
modelsCache = models;
modelsCacheTime = new Date().getTime();
return models;
} catch (error) {
// Provide detailed logging for better troubleshooting
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error fetching Qwen models"
);
} else {
log.error({ error }, "Unknown error fetching Qwen models");
}
// Return empty list as fallback
return {
object: "list",
data: [],
};
}
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const models = await getModelsResponse();
res.status(200).json(models);
} catch (error) {
if (error instanceof Error) {
log.error(
{ errorMessage: error.message, stack: error.stack },
"Error handling model request"
);
} else {
log.error({ error }, "Unknown error handling model request");
}
res.status(500).json({ error: "Failed to fetch models" });
}
};
// Function to prepare messages for Qwen API
function prepareMessages(req: Request) {
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages = normalizeMessages(req.body.messages);
}
}
// Function to handle thinking capability for Qwen models
function handleThinkingCapability(req: Request) {
const model = req.body.model;
// Special handling for our proxy-managed variants
if (isThinkingVariant(model)) {
// Set the base model name without the suffix
req.body.model = getBaseModelName(model);
// Force enable thinking for the -thinking variant
req.body.enable_thinking = true;
// Log the transformation
log.debug(
{ originalModel: model, transformedModel: req.body.model, enableThinking: true },
"Transformed request for thinking variant"
);
return;
}
if (isNonThinkingVariant(model)) {
// Set the base model name without the suffix
req.body.model = getBaseModelName(model);
// Force disable thinking for the -nonthinking variant
req.body.enable_thinking = false;
// Log the transformation
log.debug(
{ originalModel: model, transformedModel: req.body.model, enableThinking: false },
"Transformed request for non-thinking variant"
);
return;
}
// For standard models with thinking capability
if (isQwenThinkingModel(model) && req.body.stream === true) {
// Only add enable_thinking if it's not already set
if (req.body.enable_thinking === undefined) {
req.body.enable_thinking = false; // Default to false, let users explicitly enable it
}
// If thinking_budget is provided but enable_thinking is false, enable thinking
if (req.body.thinking_budget !== undefined && req.body.enable_thinking === false) {
req.body.enable_thinking = true;
}
} else if (isQwenThinkingModel(model) && req.body.stream !== true) {
// For non-streaming requests with thinking-capable models, always disable thinking
req.body.enable_thinking = false;
}
}
// Function to remove parameters not supported by Qwen models
function removeUnsupportedParameters(req: Request) {
// Remove parameters that Qwen doesn't support
if (req.body.logit_bias !== undefined) {
delete req.body.logit_bias;
}
if (req.body.top_logprobs !== undefined) {
delete req.body.top_logprobs;
}
// Logging for debugging
if (process.env.NODE_ENV !== 'production') {
log.debug({ body: req.body }, "Request after parameter cleanup");
}
}
// Set up count token functionality for Qwen models
function countQwenTokens(req: Request) {
const model = req.body.model;
if (isQwenModel(model)) {
// Count tokens using prompt tokens (simplified)
if (req.promptTokens) {
req.log.debug(
{ tokens: req.promptTokens },
"Estimated token count for Qwen prompt"
);
}
}
}
const qwenProxy = createQueuedProxyMiddleware({
mutations: [
addKey,
finalizeBody
],
target: "https://dashscope-intl.aliyuncs.com/compatible-mode",
blockingResponseHandler: qwenResponseHandler,
});
const qwenRouter = Router();
qwenRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "qwen" },
{ afterTransform: [ prepareMessages, handleThinkingCapability, removeUnsupportedParameters, countQwenTokens ] }
),
qwenProxy
);
qwenRouter.post(
"/v1/embeddings",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "qwen" },
{ afterTransform: [] }
),
qwenProxy
);
qwenRouter.get("/v1/models", handleModelRequest);
export const qwen = qwenRouter;
+32 -15
View File
@@ -1,6 +1,14 @@
import { Request, Response, NextFunction } from "express";
import { config } from "../config";
export const SHARED_IP_ADDRESSES = new Set([
// Agnai.chat
"157.230.249.32", // old
"157.245.148.56",
"174.138.29.50",
"209.97.162.44",
]);
const ONE_MINUTE_MS = 60 * 1000;
type Timestamp = number;
@@ -12,10 +20,7 @@ const exemptedRequests: Timestamp[] = [];
const isRecentAttempt = (now: Timestamp) => (attempt: Timestamp) =>
attempt > now - ONE_MINUTE_MS;
/**
* Returns duration in seconds to wait before retrying for Retry-After header.
*/
const getRetryAfter = (ip: string, type: "text" | "image") => {
const getTryAgainInMs = (ip: string, type: "text" | "image") => {
const now = Date.now();
const attempts = lastAttempts.get(ip) || [];
const validAttempts = attempts.filter(isRecentAttempt(now));
@@ -24,7 +29,7 @@ const getRetryAfter = (ip: string, type: "text" | "image") => {
type === "text" ? config.textModelRateLimit : config.imageModelRateLimit;
if (validAttempts.length >= limit) {
return (validAttempts[0] - now + ONE_MINUTE_MS) / 1000;
return validAttempts[0] - now + ONE_MINUTE_MS;
} else {
lastAttempts.set(ip, [...validAttempts, now]);
return 0;
@@ -91,11 +96,22 @@ export const ipLimiter = async (
if (!textLimit && !imageLimit) return next();
if (req.user?.type === "special") return next();
const path = req.baseUrl + req.path;
const type =
path.includes("openai-image") || path.includes("images/generations")
? "image"
: "text";
// Exempts Agnai.chat from IP-based rate limiting because its IPs are shared
// by many users. Instead, the request queue will limit the number of such
// requests that may wait in the queue at a time, and sorts them to the end to
// let individual users go first.
if (SHARED_IP_ADDRESSES.has(req.ip)) {
exemptedRequests.push(Date.now());
req.log.info(
{ ip: req.ip, recentExemptions: exemptedRequests.length },
"Exempting Agnai request from rate limiting."
);
return next();
}
const type = (req.baseUrl + req.path).includes("openai-image")
? "image"
: "text";
const limit = type === "image" ? imageLimit : textLimit;
// If user is authenticated, key rate limiting by their token. Otherwise, key
@@ -107,14 +123,15 @@ export const ipLimiter = async (
res.set("X-RateLimit-Remaining", remaining.toString());
res.set("X-RateLimit-Reset", reset.toString());
const retryAfterTime = getRetryAfter(rateLimitKey, type);
if (retryAfterTime > 0) {
const waitSec = Math.ceil(retryAfterTime).toString();
res.set("Retry-After", waitSec);
const tryAgainInMs = getTryAgainInMs(rateLimitKey, type);
if (tryAgainInMs > 0) {
res.set("Retry-After", tryAgainInMs.toString());
res.status(429).json({
error: {
type: "proxy_rate_limited",
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${waitSec} seconds.`,
message: `This model type is rate limited to ${limit} prompts per minute. Please try again in ${Math.ceil(
tryAgainInMs / 1000
)} seconds.`,
},
});
} else {
+19 -35
View File
@@ -1,65 +1,42 @@
import express from "express";
import { addV1 } from "./add-v1";
import { anthropic } from "./anthropic";
import { aws } from "./aws";
import { azure } from "./azure";
import { checkRisuToken } from "./check-risu-token";
import express, { Request, Response, NextFunction } from "express";
import { gatekeeper } from "./gatekeeper";
import { gcp } from "./gcp";
import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { checkRisuToken } from "./check-risu-token";
import { openai } from "./openai";
import { openaiImage } from "./openai-image";
import { deepseek } from "./deepseek";
import { xai } from "./xai";
import { cohere } from "./cohere";
import { qwen } from "./qwen";
import { moonshot } from "./moonshot";
import { anthropic } from "./anthropic";
import { googleAI } from "./google-ai";
import { mistralAI } from "./mistral-ai";
import { aws } from "./aws";
import { azure } from "./azure";
import { sendErrorToClient } from "./middleware/response/error-generator";
const proxyRouter = express.Router();
// Remove `expect: 100-continue` header from requests due to incompatibility
// with node-http-proxy.
proxyRouter.use((req, _res, next) => {
if (req.headers.expect) {
// node-http-proxy does not like it when clients send `expect: 100-continue`
// and will stall. none of the upstream APIs use this header anyway.
delete req.headers.expect;
}
next();
});
// Apply body parsers.
proxyRouter.use(
express.json({ limit: "100mb" }),
express.urlencoded({ extended: true, limit: "100mb" })
);
// Apply auth/rate limits.
proxyRouter.use(gatekeeper);
proxyRouter.use(checkRisuToken);
// Initialize request queue metadata.
proxyRouter.use((req, _res, next) => {
req.startTime = Date.now();
req.retryCount = 0;
next();
});
// Proxy endpoints.
proxyRouter.use("/openai", addV1, openai);
proxyRouter.use("/openai-image", addV1, openaiImage);
proxyRouter.use("/anthropic", addV1, anthropic);
proxyRouter.use("/google-ai", addV1, googleAI);
proxyRouter.use("/mistral-ai", addV1, mistralAI);
proxyRouter.use("/aws", aws);
proxyRouter.use("/gcp/claude", addV1, gcp);
proxyRouter.use("/aws/claude", addV1, aws);
proxyRouter.use("/azure/openai", addV1, azure);
proxyRouter.use("/deepseek", addV1, deepseek);
proxyRouter.use("/xai", addV1, xai);
proxyRouter.use("/cohere", addV1, cohere);
proxyRouter.use("/qwen", addV1, qwen);
proxyRouter.use("/moonshot", addV1, moonshot);
// Redirect browser requests to the homepage.
proxyRouter.get("*", (req, res, next) => {
const isBrowser = req.headers["user-agent"]?.includes("Mozilla");
@@ -69,8 +46,7 @@ proxyRouter.get("*", (req, res, next) => {
next();
}
});
// Send a fake client error if user specifies an invalid proxy endpoint.
// Handle 404s.
proxyRouter.use((req, res) => {
sendErrorToClient({
req,
@@ -91,3 +67,11 @@ proxyRouter.use((req, res) => {
});
export { proxyRouter as proxyRouter };
function addV1(req: Request, res: Response, next: NextFunction) {
// Clients don't consistently use the /v1 prefix so we'll add it for them.
if (!req.path.startsWith("/v1/")) {
req.url = `/v1${req.url}`;
}
next();
}
-394
View File
@@ -1,394 +0,0 @@
import { Request, RequestHandler, Router } from "express";
import { createPreprocessorMiddleware } from "./middleware/request";
import { ipLimiter } from "./rate-limit";
import { createQueuedProxyMiddleware } from "./middleware/request/proxy-middleware-factory";
import { addKey, finalizeBody } from "./middleware/request";
import { ProxyResHandlerWithBody } from "./middleware/response";
import axios from "axios";
import { XaiKey, keyPool } from "../shared/key-management";
import { isGrokVisionModel, isGrokImageGenModel, isGrokReasoningModel, isGrokReasoningEffortModel, isGrokReasoningContentModel } from "../shared/api-schemas/xai";
let modelsCache: any = null;
let modelsCacheTime = 0;
const xaiResponseHandler: ProxyResHandlerWithBody = async (
_proxyRes,
req,
res,
body
) => {
if (typeof body !== "object") {
throw new Error("Expected body to be an object");
}
// Preserve the original body (including potential reasoning_content) for grok-3-mini models
// which support the reasoning feature
let newBody = body;
// Check if this is an image generation response (data array with url or b64_json)
if (body.data && Array.isArray(body.data)) {
req.log.debug(
{ imageCount: body.data.length },
"Grok image generation response detected"
);
// Transform the image generation response into a chat completion format
// that SillyTavern can display
const images = body.data;
// Create a chat completion style response
newBody = {
id: `grok-image-${Date.now()}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: req.body.model,
choices: images.map((image, index) => {
// Create markdown image content for each generated image
let content = '';
// Add the image using data URL for b64_json
if (image.b64_json) {
// If it doesn't start with data:image/, add the prefix
const imgData = image.b64_json.startsWith('data:image/')
? image.b64_json
: `data:image/jpeg;base64,${image.b64_json}`;
content = `![Generated Image](${imgData})`;
}
// Fall back to URL if b64_json isn't available
else if (image.url) {
content = `![Generated Image](${image.url})`;
}
return {
index,
message: {
role: "assistant",
content
},
finish_reason: "stop"
};
}),
usage: body.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
};
req.log.debug("Transformed image generation response to chat format");
}
// Check if this is a chat completion response with choices
else if (body.choices && Array.isArray(body.choices) && body.choices.length > 0) {
// Make sure each choice's message is preserved, especially reasoning_content
// Only grok-3-mini models return reasoning_content
const model = req.body.model;
if (isGrokReasoningContentModel(model)) {
body.choices.forEach(choice => {
if (choice.message && choice.message.reasoning_content) {
req.log.debug(
{ reasoning_length: choice.message.reasoning_content.length },
"Grok reasoning content detected"
);
}
});
}
}
res.status(200).json({ ...newBody, proxy: body.proxy });
};
const getModelsResponse = async () => {
// Return cache if less than 1 minute old
if (new Date().getTime() - modelsCacheTime < 1000 * 60) {
return modelsCache;
}
try {
// Get an XAI key directly using keyPool.get()
const modelToUse = "grok-3"; // Use any XAI model here - just for key selection
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
if (!xaiKey || !xaiKey.key) {
throw new Error("Failed to get valid XAI key");
}
// Fetch models from XAI API with authorization
const response = await axios.get("https://api.x.ai/v1/models", {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${xaiKey.key}`
},
});
// If successful, update the cache
if (response.data && response.data.data) {
modelsCache = {
object: "list",
data: response.data.data.map((model: any) => ({
id: model.id,
object: "model",
owned_by: "xai",
})),
};
} else {
throw new Error("Unexpected response format from XAI API");
}
} catch (error) {
console.error("Error fetching XAI models:", error);
throw error; // No fallback - error will be passed to caller
}
modelsCacheTime = new Date().getTime();
return modelsCache;
};
const handleModelRequest: RequestHandler = async (_req, res) => {
try {
const modelsResponse = await getModelsResponse();
res.status(200).json(modelsResponse);
} catch (error) {
console.error("Error in handleModelRequest:", error);
res.status(500).json({ error: "Failed to fetch models" });
}
};
const xaiProxy = createQueuedProxyMiddleware({
mutations: [addKey, finalizeBody],
target: "https://api.x.ai",
blockingResponseHandler: xaiResponseHandler,
});
const xaiRouter = Router();
// combines all the assistant messages at the end of the context and adds the
// beta 'prefix' option, makes prefills work the same way they work for Claude
function enablePrefill(req: Request) {
// If you want to disable
if (process.env.NO_XAI_PREFILL) return
// Skip if no messages (e.g., for image generation requests)
if (!req.body.messages || !Array.isArray(req.body.messages)) return;
const msgs = req.body.messages;
if (msgs.length === 0 || msgs.at(-1)?.role !== 'assistant') return;
let i = msgs.length - 1;
let content = '';
while (i >= 0 && msgs[i].role === 'assistant') {
// maybe we should also add a newline between messages? no for now.
content = msgs[i--].content + content;
}
msgs.splice(i + 1, msgs.length, { role: 'assistant', content, prefix: true });
}
// Function to redirect image model requests to the image generations endpoint
function redirectImageRequests(req: Request) {
const model = req.body.model;
// If this is an image generation model but the endpoint is chat/completions,
// we need to transform the request to match the image generations endpoint format
if (isGrokImageGenModel(model) && req.path === "/v1/chat/completions") {
req.log.info(`Redirecting ${model} request to /v1/images/generations endpoint`);
// Save original URL and path for later
const originalUrl = req.url;
const originalPath = req.path;
// Change the request URL and path to the images endpoint
req.url = req.url.replace("/v1/chat/completions", "/v1/images/generations");
Object.defineProperty(req, 'path', { value: "/v1/images/generations" });
// Extract the prompt from the messages if present
if (req.body.messages && Array.isArray(req.body.messages)) {
// Find the last user message and use its content as the prompt
for (let i = req.body.messages.length - 1; i >= 0; i--) {
const msg = req.body.messages[i];
if (msg.role === 'user') {
// Extract text content
let prompt = "";
if (typeof msg.content === 'string') {
prompt = msg.content;
} else if (Array.isArray(msg.content)) {
// Collect all text content items
prompt = msg.content
.filter((item: any) => item.type === 'text')
.map((item: any) => item.text)
.join(" ");
}
if (prompt) {
// Create a new request body for image generation
req.body = {
model: model,
prompt: prompt,
n: req.body.n || 1,
response_format: "b64_json", // Always use b64_json for better client compatibility
user: req.body.user
};
req.log.debug({ newBody: req.body }, "Transformed request for image generation");
break;
}
}
}
}
// Log transformation
req.log.info(`Request transformed from ${originalUrl} to ${req.url}`);
}
}
// Function to remove parameters not supported by X.AI/Grok models and handle special cases
function removeUnsupportedParameters(req: Request) {
const model = req.body.model;
// Check if this is a reasoning model (grok-3-mini or grok-4-0709)
const isReasoningModel = isGrokReasoningModel(model);
const isReasoningEffortModel = isGrokReasoningEffortModel(model);
if (isReasoningModel) {
// List of parameters not supported by reasoning models
const unsupportedParams = [
'presence_penalty',
'frequency_penalty',
'stop' // stop parameter is not supported by reasoning models
];
for (const param of unsupportedParams) {
if (req.body[param] !== undefined) {
req.log.info(`Removing unsupported parameter for reasoning model ${model}: ${param}`);
delete req.body[param];
}
}
// Handle reasoning_effort parameter - only supported by grok-3-mini
if (isReasoningEffortModel) {
// This is grok-3-mini, handle reasoning_effort
if (req.body.reasoning_effort) {
// If reasoning_effort is already present in the request, validate it
if (!['low', 'medium', 'high'].includes(req.body.reasoning_effort)) {
req.log.warn(`Invalid reasoning_effort value: ${req.body.reasoning_effort}, removing it`);
delete req.body.reasoning_effort;
}
} else {
// Default to low reasoning effort if not specified
req.body.reasoning_effort = 'low';
req.log.debug(`Setting default reasoning_effort=low for Grok-3-mini model`);
}
} else {
// This is grok-4-0709 or other reasoning model that doesn't support reasoning_effort
if (req.body.reasoning_effort !== undefined) {
req.log.info(`Removing unsupported reasoning_effort parameter for model ${model}`);
delete req.body.reasoning_effort;
}
}
}
// Special handling for vision models
if (isGrokVisionModel(model)) {
req.log.debug(`Detected Grok vision model: ${model}`);
// Check that messages have proper format for vision models
if (req.body.messages && Array.isArray(req.body.messages)) {
req.body.messages.forEach((msg: { content: string | any[] }) => {
// If content is a string but the model is vision-capable,
// convert it to an array with a single text item for consistency
if (typeof msg.content === 'string') {
req.log.debug('Converting string content to array format for vision model');
msg.content = [{ type: 'text', text: msg.content }];
}
});
}
}
// Special handling for image generation models is handled by separate endpoint
}
// Handler for image generation requests
const handleImageGenerationRequest: RequestHandler = async (req, res) => {
try {
// Get an XAI key directly for image generation
const modelToUse = req.body.model || "grok-2-image"; // Default model
const xaiKey = keyPool.get(modelToUse, "xai") as XaiKey;
if (!xaiKey || !xaiKey.key) {
throw new Error("Failed to get valid XAI key for image generation");
}
// Forward the request to XAI API
const response = await axios.post("https://api.x.ai/v1/images/generations", req.body, {
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${xaiKey.key}`
},
});
// Return the response directly
res.status(200).json(response.data);
} catch (error) {
req.log.error({ error }, "Error in image generation request");
// Pass through the error response if available
if (error.response && error.response.data) {
res.status(error.response.status || 500).json(error.response.data);
} else {
res.status(500).json({ error: "Failed to generate image", message: error.message });
}
}
};
// Set up count token functionality for XAI models
function countXaiTokens(req: Request) {
const model = req.body.model;
// For vision models, estimate image token usage
if (isGrokVisionModel(model) && req.body.messages && Array.isArray(req.body.messages)) {
// Initialize image count
let imageCount = 0;
// Count images in the request
for (const msg of req.body.messages) {
if (Array.isArray(msg.content)) {
const imagesInMessage = msg.content.filter(
(item: any) => item.type === "image_url"
).length;
imageCount += imagesInMessage;
}
}
// Apply token estimations for images
// Each image is approximately 1500 tokens based on documentation
const TOKENS_PER_IMAGE = 1500;
const imageTokens = imageCount * TOKENS_PER_IMAGE;
if (imageTokens > 0) {
req.log.debug(
{ imageCount, tokenEstimate: imageTokens },
"Estimated token count for Grok vision images"
);
// Add the image tokens to the existing token count if available
if (req.promptTokens) {
req.promptTokens += imageTokens;
}
}
}
}
xaiRouter.post(
"/v1/chat/completions",
ipLimiter,
createPreprocessorMiddleware(
{ inApi: "openai", outApi: "openai", service: "xai" },
{ afterTransform: [ redirectImageRequests, enablePrefill, removeUnsupportedParameters, countXaiTokens ] }
),
xaiProxy
);
// Add endpoint for image generation
xaiRouter.post(
"/v1/images/generations",
ipLimiter,
handleImageGenerationRequest
);
xaiRouter.get("/v1/models", handleModelRequest);
export const xai = xaiRouter;
+1 -18
View File
@@ -23,7 +23,6 @@ import { init as initTokenizers } from "./shared/tokenization";
import { checkOrigin } from "./proxy/check-origin";
import { sendErrorToClient } from "./proxy/middleware/response/error-generator";
import { initializeDatabase, getDatabase } from "./shared/database";
import { initializeFirebase } from "./shared/firebase";
const PORT = config.port;
const BIND_ADDRESS = config.bindAddress;
@@ -50,7 +49,6 @@ app.use(
// Don't log the prompt text on transform errors
"body.messages",
"body.prompt",
"body.contents",
],
censor: "********",
},
@@ -89,15 +87,6 @@ app.use(blacklist);
app.use(checkOrigin);
app.use("/admin", adminRouter);
app.use((req, _, next) => {
// For whatever reason SillyTavern just ignores the path a user provides
// when using Google AI with reverse proxy. We'll fix it here.
if (req.path.match(/^\/v1(alpha|beta)\/models(\/|$)/)) {
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
return next();
}
next();
});
app.use(config.proxyEndpointRoute, proxyRouter);
app.use("/user", userRouter);
if (config.staticServiceInfo) {
@@ -138,12 +127,6 @@ async function start() {
logger.info("Checking configs and external dependencies...");
await assertConfigIsValid();
if (config.gatekeeperStore.startsWith("firebase")) {
logger.info("Testing Firebase connection...");
await initializeFirebase();
logger.info("Firebase connection successful.");
}
keyPool.init();
await initTokenizers();
@@ -173,7 +156,7 @@ async function start() {
app.listen(PORT, BIND_ADDRESS, () => {
logger.info(
{ port: PORT, interface: BIND_ADDRESS },
"Server ready to accept connections."
"Now listening for connections."
);
registerUncaughtExceptionHandler();
});
+141 -428
View File
@@ -2,20 +2,15 @@ import { config, listConfig } from "./config";
import {
AnthropicKey,
AwsBedrockKey,
DeepseekKey,
GcpKey,
AzureOpenAIKey,
GoogleAIKey,
keyPool,
OpenAIKey,
XaiKey,
CohereKey,
QwenKey,
MoonshotKey,
} from "./shared/key-management";
import {
AnthropicModelFamily,
assertIsKnownModelFamily,
AwsBedrockModelFamily,
GcpModelFamily,
AzureOpenAIModelFamily,
GoogleAIModelFamily,
LLM_SERVICES,
@@ -24,117 +19,27 @@ import {
MODEL_FAMILY_SERVICE,
ModelFamily,
OpenAIModelFamily,
DeepseekModelFamily,
XaiModelFamily,
CohereModelFamily,
QwenModelFamily,
MoonshotModelFamily,
} from "./shared/models";
import { getCostSuffix, getTokenCostUsd, prettyTokens } from "./shared/stats";
import { getUniqueIps } from "./proxy/rate-limit";
import { assertNever } from "./shared/utils";
import { getEstimatedWaitTime, getQueueLength } from "./proxy/queue";
import { MistralAIKey } from "./shared/key-management/mistral-ai/provider";
const CACHE_TTL = 2000;
// Define the preferred order for model families in the service info display
// This ensures logical grouping (GPT-4 models together, then GPT-4.1, then GPT-5, etc.)
const MODEL_FAMILY_ORDER: ModelFamily[] = [
// OpenAI models in logical order
"turbo",
"gpt4",
"gpt4-32k",
"gpt4-turbo",
"gpt4o",
"gpt41",
"gpt41-mini",
"gpt41-nano",
"gpt45",
"gpt5",
"gpt5-mini",
"gpt5-nano",
"gpt5-chat-latest",
"o1",
"o1-mini",
"o1-pro",
"o3",
"o3-mini",
"o3-pro",
"o4-mini",
"codex-mini",
"dall-e",
"gpt-image",
// Azure OpenAI models (same order as OpenAI)
"azure-turbo",
"azure-gpt4",
"azure-gpt4-32k",
"azure-gpt4-turbo",
"azure-gpt4o",
"azure-gpt41",
"azure-gpt41-mini",
"azure-gpt41-nano",
"azure-gpt45",
"azure-gpt5",
"azure-gpt5-mini",
"azure-gpt5-nano",
"azure-gpt5-chat-latest",
"azure-o1",
"azure-o1-mini",
"azure-o1-pro",
"azure-o3",
"azure-o3-mini",
"azure-o3-pro",
"azure-o4-mini",
"azure-codex-mini",
"azure-dall-e",
"azure-gpt-image",
// Anthropic models
"claude",
"claude-opus",
// Google AI models
"gemini-flash",
"gemini-pro",
"gemini-ultra",
// Mistral AI models
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
// AWS Bedrock models
"aws-claude",
"aws-claude-opus",
"aws-mistral-tiny",
"aws-mistral-small",
"aws-mistral-medium",
"aws-mistral-large",
// GCP models
"gcp-claude",
"gcp-claude-opus",
// Other services
"deepseek",
"xai",
"cohere",
"qwen",
"moonshot"
];
type KeyPoolKey = ReturnType<typeof keyPool.list>[0];
const keyIsOpenAIKey = (k: KeyPoolKey): k is OpenAIKey =>
k.service === "openai";
const keyIsAzureKey = (k: KeyPoolKey): k is AzureOpenAIKey =>
k.service === "azure";
const keyIsAnthropicKey = (k: KeyPoolKey): k is AnthropicKey =>
k.service === "anthropic";
const keyIsGoogleAIKey = (k: KeyPoolKey): k is GoogleAIKey =>
k.service === "google-ai";
const keyIsMistralAIKey = (k: KeyPoolKey): k is MistralAIKey =>
k.service === "mistral-ai";
const keyIsAwsKey = (k: KeyPoolKey): k is AwsBedrockKey => k.service === "aws";
const keyIsGcpKey = (k: KeyPoolKey): k is GcpKey => k.service === "gcp";
const keyIsDeepseekKey = (k: KeyPoolKey): k is DeepseekKey =>
k.service === "deepseek";
const keyIsXaiKey = (k: KeyPoolKey): k is XaiKey =>
k.service === "xai";
const keyIsCohereKey = (k: KeyPoolKey): k is CohereKey =>
k.service === "cohere";
const keyIsQwenKey = (k: KeyPoolKey): k is QwenKey =>
k.service === "qwen";
const keyIsMoonshotKey = (k: KeyPoolKey): k is MoonshotKey =>
k.service === "moonshot";
/** Stats aggregated across all keys for a given service. */
type ServiceAggregate = "keys" | "uncheckedKeys" | "orgs";
@@ -146,31 +51,18 @@ type ModelAggregates = {
overQuota?: number;
pozzed?: number;
awsLogged?: number;
// needed to disambugiate aws-claude family's variants
awsClaude2?: number;
awsSonnet3?: number;
awsSonnet3_5?: number;
awsSonnet3_7?: number;
awsSonnet4?: number;
awsOpus3?: number;
awsOpus4?: number;
awsHaiku: number;
gcpSonnet?: number;
gcpSonnet35?: number;
gcpHaiku?: number;
awsSonnet?: number;
awsHaiku?: number;
queued: number;
inputTokens: number; // Changed from tokens
outputTokens: number; // Added
legacyTokens?: number; // Added for migrated totals
queueTime: string;
tokens: number;
};
/** All possible combinations of model family and aggregate type. */
type ModelAggregateKey = `${ModelFamily}__${keyof ModelAggregates}`;
type AllStats = {
proompts: number;
inputTokens: number; // Changed from tokens
outputTokens: number; // Added
legacyTokens?: number; // Added
tokens: number;
tokenCost: number;
} & { [modelFamily in ModelFamily]?: ModelAggregates } & {
[service in LLMService as `${service}__${ServiceAggregate}`]?: number;
@@ -194,10 +86,8 @@ type AnthropicInfo = BaseFamilyInfo & {
};
type AwsInfo = BaseFamilyInfo & {
privacy?: string;
enabledVariants?: string;
};
type GcpInfo = BaseFamilyInfo & {
enabledVariants?: string;
sonnetKeys?: number;
haikuKeys?: number;
};
// prettier-ignore
@@ -205,13 +95,12 @@ export type ServiceInfo = {
uptime: number;
endpoints: {
openai?: string;
deepseek?: string;
xai?: string;
openai2?: string;
anthropic?: string;
"anthropic-claude-3"?: string;
"google-ai"?: string;
"mistral-ai"?: string;
"aws"?: string;
gcp?: string;
aws?: string;
azure?: string;
"openai-image"?: string;
"azure-image"?: string;
@@ -225,15 +114,9 @@ export type ServiceInfo = {
} & { [f in OpenAIModelFamily]?: OpenAIInfo }
& { [f in AnthropicModelFamily]?: AnthropicInfo; }
& { [f in AwsBedrockModelFamily]?: AwsInfo }
& { [f in GcpModelFamily]?: GcpInfo }
& { [f in AzureOpenAIModelFamily]?: BaseFamilyInfo; }
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo & { overQuotaKeys?: number } }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo }
& { [f in DeepseekModelFamily]?: BaseFamilyInfo }
& { [f in XaiModelFamily]?: BaseFamilyInfo }
& { [f in CohereModelFamily]?: BaseFamilyInfo }
& { [f in QwenModelFamily]?: BaseFamilyInfo }
& { [f in MoonshotModelFamily]?: BaseFamilyInfo };
& { [f in GoogleAIModelFamily]?: BaseFamilyInfo }
& { [f in MistralAIModelFamily]?: BaseFamilyInfo };
// https://stackoverflow.com/a/66661477
// type DeepKeyOf<T> = (
@@ -253,6 +136,7 @@ export type ServiceInfo = {
const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
openai: {
openai: `%BASE%/openai`,
openai2: `%BASE%/openai/turbo-instruct`,
"openai-image": `%BASE%/openai-image`,
},
anthropic: {
@@ -265,34 +149,15 @@ const SERVICE_ENDPOINTS: { [s in LLMService]: Record<string, string> } = {
"mistral-ai": `%BASE%/mistral-ai`,
},
aws: {
"aws-claude": `%BASE%/aws/claude`,
"aws-mistral": `%BASE%/aws/mistral`,
},
gcp: {
gcp: `%BASE%/gcp/claude`,
aws: `%BASE%/aws/claude`,
},
azure: {
azure: `%BASE%/azure/openai`,
"azure-image": `%BASE%/azure/openai`,
},
deepseek: {
deepseek: `%BASE%/deepseek`,
},
xai: {
xai: `%BASE%/xai`,
},
cohere: {
cohere: `%BASE%/cohere`,
},
qwen: {
qwen: `%BASE%/qwen`,
},
moonshot: {
moonshot: `%BASE%/moonshot`,
},
};
const familyStats = new Map<ModelAggregateKey, number>();
const modelStats = new Map<ModelAggregateKey, number>();
const serviceStats = new Map<keyof AllStats, number>();
let cachedInfo: ServiceInfo | undefined;
@@ -309,7 +174,7 @@ export function buildInfo(baseUrl: string, forAdmin = false): ServiceInfo {
.concat("turbo")
);
familyStats.clear();
modelStats.clear();
serviceStats.clear();
keys.forEach(addKeyToAggregates);
@@ -381,14 +246,11 @@ function getEndpoints(baseUrl: string, accessibleFamilies: Set<ModelFamily>) {
type TrafficStats = Pick<ServiceInfo, "proompts" | "tookens" | "proomptersNow">;
function getTrafficStats(): TrafficStats {
const inputTokens = serviceStats.get("inputTokens") || 0;
const outputTokens = serviceStats.get("outputTokens") || 0;
// const legacyTokens = serviceStats.get("legacyTokens") || 0; // Optional: include in total if desired
const totalTokens = inputTokens + outputTokens; // + legacyTokens;
const tokens = serviceStats.get("tokens") || 0;
const tokenCost = serviceStats.get("tokenCost") || 0;
return {
proompts: serviceStats.get("proompts") || 0,
tookens: `${prettyTokens(totalTokens)}${getCostSuffix(tokenCost)}`, // Simplified to show aggregate and cost
tookens: `${prettyTokens(tokens)}${getCostSuffix(tokenCost)}`,
...(config.textModelRateLimit ? { proomptersNow: getUniqueIps() } : {}),
};
}
@@ -404,18 +266,16 @@ function getServiceModelStats(accessibleFamilies: Set<ModelFamily>) {
if (!hasKeys) continue;
serviceInfo[`${service}Keys`] = hasKeys;
accessibleFamilies.forEach((f) => {
if (MODEL_FAMILY_SERVICE[f] === service) {
modelFamilyInfo[f] = getInfoForFamily(f);
}
});
if (service === "openai" && config.checkKeys) {
serviceInfo.openaiOrgs = getUniqueOpenAIOrgs(keyPool.list());
}
}
// Build model family info in the defined order for logical grouping
for (const family of MODEL_FAMILY_ORDER) {
if (accessibleFamilies.has(family)) {
modelFamilyInfo[family] = getInfoForFamily(family);
}
}
return { serviceInfo, modelFamilyInfo };
}
@@ -433,229 +293,131 @@ function increment<T extends keyof AllStats | ModelAggregateKey>(
) {
map.set(key, (map.get(key) || 0) + delta);
}
const addToService = increment.bind(null, serviceStats);
const addToFamily = increment.bind(null, familyStats);
function addKeyToAggregates(k: KeyPoolKey) {
addToService("proompts", k.promptCount);
addToService("openai__keys", k.service === "openai" ? 1 : 0);
addToService("anthropic__keys", k.service === "anthropic" ? 1 : 0);
addToService("google-ai__keys", k.service === "google-ai" ? 1 : 0);
addToService("mistral-ai__keys", k.service === "mistral-ai" ? 1 : 0);
addToService("aws__keys", k.service === "aws" ? 1 : 0);
addToService("gcp__keys", k.service === "gcp" ? 1 : 0);
addToService("azure__keys", k.service === "azure" ? 1 : 0);
addToService("deepseek__keys", k.service === "deepseek" ? 1 : 0);
addToService("xai__keys", k.service === "xai" ? 1 : 0);
addToService("cohere__keys", k.service === "cohere" ? 1 : 0);
addToService("qwen__keys", k.service === "qwen" ? 1 : 0);
addToService("moonshot__keys", k.service === "moonshot" ? 1 : 0);
increment(serviceStats, "proompts", k.promptCount);
increment(serviceStats, "openai__keys", k.service === "openai" ? 1 : 0);
increment(serviceStats, "anthropic__keys", k.service === "anthropic" ? 1 : 0);
increment(serviceStats, "google-ai__keys", k.service === "google-ai" ? 1 : 0);
increment(
serviceStats,
"mistral-ai__keys",
k.service === "mistral-ai" ? 1 : 0
);
increment(serviceStats, "aws__keys", k.service === "aws" ? 1 : 0);
increment(serviceStats, "azure__keys", k.service === "azure" ? 1 : 0);
let sumInputTokens = 0;
let sumOutputTokens = 0;
let sumLegacyTokens = 0; // Optional
let sumTokens = 0;
let sumCost = 0;
const incrementGenericFamilyStats = (f: ModelFamily) => {
const usage = k.tokenUsage?.[f];
let familyInputTokens = 0;
let familyOutputTokens = 0;
let familyLegacyTokens = 0;
if (usage) {
familyInputTokens = usage.input || 0;
familyOutputTokens = usage.output || 0;
if (usage.legacy_total && familyInputTokens === 0 && familyOutputTokens === 0) {
// This is a migrated key with no new usage, use legacy_total as input for cost
familyLegacyTokens = usage.legacy_total;
sumCost += getTokenCostUsd(f, usage.legacy_total, 0);
} else {
sumCost += getTokenCostUsd(f, familyInputTokens, familyOutputTokens);
}
}
// If no k.tokenUsage[f], tokens are 0, cost is 0.
sumInputTokens += familyInputTokens;
sumOutputTokens += familyOutputTokens;
sumLegacyTokens += familyLegacyTokens; // Optional
addToFamily(`${f}__inputTokens`, familyInputTokens);
addToFamily(`${f}__outputTokens`, familyOutputTokens);
if (familyLegacyTokens > 0) {
addToFamily(`${f}__legacyTokens`, familyLegacyTokens); // Optional
}
addToFamily(`${f}__revoked`, k.isRevoked ? 1 : 0);
addToFamily(`${f}__active`, k.isDisabled ? 0 : 1);
};
switch (k.service) {
case "openai":
if (!keyIsOpenAIKey(k)) throw new Error("Invalid key type");
addToService("openai__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__trial`, k.isTrial ? 1 : 0);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "anthropic":
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
addToService("anthropic__uncheckedKeys", Boolean(k.lastChecked) ? 0 : 1);
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__trial`, k.tier === "free" ? 1 : 0);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
addToFamily(`${f}__pozzed`, k.isPozzed ? 1 : 0);
});
break;
increment(
serviceStats,
"openai__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__trial`, k.isTrial ? 1 : 0);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "azure":
if (!keyIsAzureKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
});
break;
case "anthropic": {
if (!keyIsAnthropicKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__trial`, k.tier === "free" ? 1 : 0);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${f}__overQuota`, k.isOverQuota ? 1 : 0);
increment(modelStats, `${f}__pozzed`, k.isPozzed ? 1 : 0);
});
increment(
serviceStats,
"anthropic__uncheckedKeys",
Boolean(k.lastChecked) ? 0 : 1
);
break;
}
case "google-ai": {
if (!keyIsGoogleAIKey(k)) throw new Error("Invalid key type");
const family = "gemini-pro";
sumTokens += k["gemini-proTokens"];
sumCost += getTokenCostUsd(family, k["gemini-proTokens"]);
increment(modelStats, `${family}__active`, k.isDisabled ? 0 : 1);
increment(modelStats, `${family}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${family}__tokens`, k["gemini-proTokens"]);
break;
}
case "mistral-ai": {
if (!keyIsMistralAIKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
});
break;
}
case "aws": {
if (!keyIsAwsKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach(incrementGenericFamilyStats);
if (!k.isDisabled) {
// Don't add revoked keys to available AWS variants
k.modelIds.forEach((id) => {
if (id.includes("claude-3-sonnet")) {
addToFamily(`aws-claude__awsSonnet3`, 1);
// not ideal but whatever
} else if (id.includes("claude-3-5-sonnet")) {
addToFamily(`aws-claude__awsSonnet3_5`, 1);
} else if (id.includes("claude-3-7-sonnet")) {
addToFamily(`aws-claude__awsSonnet3_7`, 1);
} else if (id.includes("claude-3-haiku")) {
addToFamily(`aws-claude__awsHaiku`, 1);
} else if (id.includes("sonnet-4")) {
addToFamily(`aws-claude__awsSonnet4`, 1);
} else if (id.includes("claude-3-opus")) {
addToFamily(`aws-claude__awsOpus3`, 1);
addToFamily(`aws-claude-opus__awsOpus3`, 1);
} else if (id.includes("opus-4")) {
addToFamily(`aws-claude__awsOpus4`, 1);
addToFamily(`aws-claude-opus__awsOpus4`, 1);
} else if (id.includes("claude-v2")) {
addToFamily(`aws-claude__awsClaude2`, 1);
}
});
}
k.modelFamilies.forEach((f) => {
const tokens = k[`${f}Tokens`];
sumTokens += tokens;
sumCost += getTokenCostUsd(f, tokens);
increment(modelStats, `${f}__tokens`, tokens);
increment(modelStats, `${f}__revoked`, k.isRevoked ? 1 : 0);
increment(modelStats, `${f}__active`, k.isDisabled ? 0 : 1);
});
increment(modelStats, `aws-claude__awsSonnet`, k.sonnetEnabled ? 1 : 0);
increment(modelStats, `aws-claude__awsHaiku`, k.haikuEnabled ? 1 : 0);
// Ignore revoked keys for aws logging stats, but include keys where the
// logging status is unknown.
const countAsLogged =
k.lastChecked && !k.isDisabled && k.awsLoggingStatus === "enabled";
addToFamily(`aws-claude__awsLogged`, countAsLogged ? 1 : 0);
increment(modelStats, `aws-claude__awsLogged`, countAsLogged ? 1 : 0);
break;
}
case "gcp":
if (!keyIsGcpKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach(incrementGenericFamilyStats);
// TODO: add modelIds to GcpKey
break;
case "deepseek":
if (!keyIsDeepseekKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
});
break;
case "xai":
if (!keyIsXaiKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
if ('isOverQuota' in k) {
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
}
});
break;
case "cohere":
if (!keyIsCohereKey(k)) throw new Error("Invalid key type");
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
if ('isOverQuota' in k) {
addToFamily(`${f}__overQuota`, k.isOverQuota ? 1 : 0);
}
});
break;
// These services don't have any additional stats to track.
case "azure":
case "mistral-ai":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
case "google-ai":
// Cast to GoogleAIKey to access GoogleAI-specific properties
const googleKey = k as unknown as { overQuotaFamilies?: string[] };
// First handle general stats for all model families
k.modelFamilies.forEach((f) => {
incrementGenericFamilyStats(f);
});
// Create a set of model families that are over quota for this key
let overQuotaModelFamilies = new Set<string>();
// Add any model family that's listed in overQuotaFamilies
if (googleKey.overQuotaFamilies && Array.isArray(googleKey.overQuotaFamilies)) {
googleKey.overQuotaFamilies.forEach(family => {
overQuotaModelFamilies.add(family);
});
}
// If key is generally over quota and we don't have specific families, add all families
else if ('isOverQuota' in k && k.isOverQuota) {
k.modelFamilies.forEach(family => {
overQuotaModelFamilies.add(family);
});
}
// Now increment the over-quota counter for each affected family
// These model families are valid and already defined in the enum
overQuotaModelFamilies.forEach(family => {
if (family === 'gemini-pro' || family === 'gemini-flash' || family === 'gemini-ultra') {
addToFamily(`${family}__overQuota` as any, 1);
}
});
break;
case "qwen":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
case "moonshot":
k.modelFamilies.forEach(incrementGenericFamilyStats);
break;
default:
assertNever(k.service);
}
addToService("inputTokens", sumInputTokens);
addToService("outputTokens", sumOutputTokens);
if (sumLegacyTokens > 0) { // Optional
addToService("legacyTokens", sumLegacyTokens);
}
addToService("tokenCost", sumCost);
increment(serviceStats, "tokens", sumTokens);
increment(serviceStats, "tokenCost", sumCost);
}
function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const inputTokens = familyStats.get(`${family}__inputTokens`) || 0;
const outputTokens = familyStats.get(`${family}__outputTokens`) || 0;
const legacyTokens = familyStats.get(`${family}__legacyTokens`) || 0; // Optional
let cost = 0;
let displayTokens = 0;
let usageString = "";
if (inputTokens > 0 || outputTokens > 0) {
cost = getTokenCostUsd(family, inputTokens, outputTokens);
displayTokens = inputTokens + outputTokens;
usageString = `${prettyTokens(displayTokens)} (In: ${prettyTokens(inputTokens)}, Out: ${prettyTokens(outputTokens)})${getCostSuffix(cost)}`;
} else if (legacyTokens > 0) {
// Only show legacy if no new input/output has been recorded for this family aggregate
cost = getTokenCostUsd(family, legacyTokens, 0); // Cost legacy as all input
displayTokens = legacyTokens;
usageString = `${prettyTokens(displayTokens)} tokens (legacy total)${getCostSuffix(cost)}`;
} else {
usageString = `${prettyTokens(0)} tokens${getCostSuffix(0)}`;
}
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo & GcpInfo = {
usage: usageString,
activeKeys: familyStats.get(`${family}__active`) || 0,
revokedKeys: familyStats.get(`${family}__revoked`) || 0,
const tokens = modelStats.get(`${family}__tokens`) || 0;
const cost = getTokenCostUsd(family, tokens);
let info: BaseFamilyInfo & OpenAIInfo & AnthropicInfo & AwsInfo = {
usage: `${prettyTokens(tokens)} tokens${getCostSuffix(cost)}`,
activeKeys: modelStats.get(`${family}__active`) || 0,
revokedKeys: modelStats.get(`${family}__revoked`) || 0,
};
// Add service-specific stats to the info object.
@@ -663,8 +425,8 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
const service = MODEL_FAMILY_SERVICE[family];
switch (service) {
case "openai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
// Delete trial/revoked keys for non-turbo families.
// Trials are turbo 99% of the time, and if a key is invalid we don't
@@ -675,70 +437,21 @@ function getInfoForFamily(family: ModelFamily): BaseFamilyInfo {
}
break;
case "anthropic":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
info.trialKeys = familyStats.get(`${family}__trial`) || 0;
info.prefilledKeys = familyStats.get(`${family}__pozzed`) || 0;
info.overQuotaKeys = modelStats.get(`${family}__overQuota`) || 0;
info.trialKeys = modelStats.get(`${family}__trial`) || 0;
info.prefilledKeys = modelStats.get(`${family}__pozzed`) || 0;
break;
case "aws":
if (family === "aws-claude") {
// Original behavior: get logged count from the same family
const logged = familyStats.get(`${family}__awsLogged`) || 0;
const variants = new Set<string>();
if (familyStats.get(`${family}__awsClaude2`) || 0) variants.add("claude2");
if (familyStats.get(`${family}__awsSonnet3`) || 0) variants.add("sonnet3");
if (familyStats.get(`${family}__awsSonnet3_5`) || 0) variants.add("sonnet3.5");
if (familyStats.get(`${family}__awsSonnet3_7`) || 0) variants.add("sonnet3.7");
if (familyStats.get(`${family}__awsHaiku`) || 0) variants.add("haiku");
if (familyStats.get(`${family}__awsSonnet4`) || 0) variants.add("sonnet4");
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
info.sonnetKeys = modelStats.get(`${family}__awsSonnet`) || 0;
info.haikuKeys = modelStats.get(`${family}__awsHaiku`) || 0;
const logged = modelStats.get(`${family}__awsLogged`) || 0;
if (logged > 0) {
info.privacy = config.allowAwsLogging
? `AWS logging verification inactive. Prompts could be logged.`
: `${logged} active keys are potentially logged and can't be used. Set ALLOW_AWS_LOGGING=true to override.`;
}
} else if (family === "aws-claude-opus") {
// Get logging info from aws-claude family since that's where it's collected
const awsLogged = familyStats.get(`aws-claude__awsLogged`) || 0;
const variants = new Set<string>();
if (familyStats.get(`${family}__awsOpus3`) || 0) variants.add("opus3");
if (familyStats.get(`${family}__awsOpus4`) || 0) variants.add("opus4");
info.enabledVariants = variants.size ? Array.from(variants).join(",") : undefined;
// Show privacy warning for Opus if there are active Opus keys AND some AWS keys are logged
if (awsLogged > 0 && info.activeKeys > 0) {
info.privacy = config.allowAwsLogging
? `AWS logging verification inactive. Prompts could be logged.`
: `Some AWS keys are potentially logged. Set ALLOW_AWS_LOGGING=true to override.`;
}
}
// TODO: Consider if aws-mistral-* families need similar enabledVariant listings
break;
case "gcp":
if (family === "gcp-claude") {
// TODO: implement
info.enabledVariants = "not implemented";
}
break;
case "deepseek":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "xai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "cohere":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "google-ai":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "qwen":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
case "moonshot":
info.overQuotaKeys = familyStats.get(`${family}__overQuota`) || 0;
break;
}
}
+23 -72
View File
@@ -19,13 +19,6 @@ const AnthropicV1BaseSchema = z
top_k: z.coerce.number().optional(),
top_p: z.coerce.number().optional(),
metadata: z.object({ user_id: z.string().optional() }).optional(),
tools: z.array(z.any()).optional(),
tool_choice: z.any().optional(),
service_tier: z.enum(["auto", "standard_only"]).optional(),
cache_control: z.object({
type: z.literal("ephemeral"),
ttl: z.enum(["5m", "1h"]).optional()
}).optional(),
})
.strip();
@@ -40,35 +33,16 @@ export const AnthropicV1TextSchema = AnthropicV1BaseSchema.merge(
})
);
const AnthropicV1BaseContentSchema = z.union([
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
})
]);
const AnthropicV1MessageMultimodalContentSchema = z.array(
z.union([
AnthropicV1BaseContentSchema,
z.object({ type: z.literal("text"), text: z.string() }),
z.object({
type: z.literal("tool_use"),
id: z.string(),
name: z.string(),
input: z.object({}).passthrough(),
}),
z.object({
type: z.literal("tool_result"),
tool_use_id: z.string(),
is_error: z.boolean().optional(),
content: z.union([
z.string(),
z.array(AnthropicV1BaseContentSchema)
]).optional(),
type: z.literal("image"),
source: z.object({
type: z.literal("base64"),
media_type: z.string().max(100),
data: z.string(),
}),
}),
])
);
@@ -89,16 +63,7 @@ export const AnthropicV1MessagesSchema = AnthropicV1BaseSchema.merge(
.number()
.int()
.transform((v) => Math.min(v, CLAUDE_OUTPUT_MAX)),
system: z
.union([
z.string(),
z.array(z.object({ type: z.literal("text"), text: z.string() })),
])
.optional(),
thinking: z.object({
type: z.literal("enabled"),
budget_tokens: z.number().min(1024),
}).optional(),
system: z.string().optional(),
})
);
export type AnthropicChatMessage = z.infer<
@@ -112,7 +77,7 @@ function openAIMessagesToClaudeTextPrompt(messages: OpenAIChatMessage[]) {
let role: string = m.role;
if (role === "assistant") {
role = "Assistant";
} else if (role === "system" || role === "developer") {
} else if (role === "system") {
role = "System";
} else if (role === "user") {
role = "Human";
@@ -139,10 +104,8 @@ export const transformOpenAIToAnthropicChat: APIFormatTransformer<
);
throw result.error;
}
if (result.data.max_tokens > 8192) {
result.data.max_tokens = 4096;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const { messages: newMessages, system } =
@@ -178,6 +141,8 @@ export const transformOpenAIToAnthropicText: APIFormatTransformer<
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { messages, ...rest } = result.data;
const prompt = openAIMessagesToClaudeTextPrompt(messages);
@@ -222,6 +187,8 @@ export const transformAnthropicTextToAnthropicChat: APIFormatTransformer<
throw result.error;
}
req.headers["anthropic-version"] = "2023-06-01";
const { model, max_tokens_to_sample, prompt, ...rest } = result.data;
validateAnthropicTextPrompt(prompt);
@@ -399,7 +366,7 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
// Here we will lose the original name if it was a system message, but that
// is generally okay because the system message is usually a prompt and not
// a character in the chat.
const name = (msg.role === "system" || msg.role === "developer") ? "System" : msg.name?.trim();
const name = msg.role === "system" ? "System" : msg.name?.trim();
const content = convertOpenAIContent(msg.content);
// Prepend the display name to the first text content in the current message
@@ -429,8 +396,8 @@ function openAIMessagesToClaudeChatPrompt(messages: OpenAIChatMessage[]): {
function isSystemOpenAIRole(
role: OpenAIChatMessage["role"]
): role is "developer" | "system" | "function" | "tool" {
return ["developer", "system", "function", "tool"].includes(role);
): role is "system" | "function" | "tool" {
return ["system", "function", "tool"].includes(role);
}
function getFirstTextContent(content: OpenAIChatMessage["content"]) {
@@ -473,25 +440,9 @@ function convertOpenAIContent(
});
}
export function containsImageContent(messages: AnthropicChatMessage[]): boolean {
const isImage = (item: any) => item?.type === 'image';
return messages.some(msg => {
if (typeof msg.content === 'string') return false;
return msg.content.some(item => {
if (isImage(item)) return true;
if (item.type === 'tool_result') {
const content = item.content;
if (!content) return false;
if (typeof content === 'string') return false;
if (Array.isArray(content)) return content.some(isImage);
return isImage(content);
}
return false;
});
});
export function containsImageContent(messages: AnthropicChatMessage[]) {
return messages.some(
({ content }) =>
typeof content !== "string" && content.some((c) => c.type === "image")
);
}
+176 -64
View File
@@ -1,69 +1,181 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
import {
OPENAI_OUTPUT_MAX,
OpenAIV1ChatCompletionSchema,
flattenOpenAIMessageContent,
} from "./openai";
import { APIFormatTransformer } from ".";
/**
* Helper function to check if a model is from Cohere
*/
export function isCohereModel(model: string): boolean {
// Cohere's command model family
return model.includes("command") || model.includes("cohere");
// https://docs.cohere.com/reference/chat
export const CohereV1ChatSchema = z
.object({
message: z.string(),
model: z.string().default("command-r-plus"),
stream: z.boolean().default(false).optional(),
preamble: z.string().optional(),
chat_history: z
.array(
// Either a message from a chat participant, or a past tool call
z.union([
z.object({
role: z.enum(["CHATBOT", "SYSTEM", "USER"]),
message: z.string(),
tool_calls: z
.array(z.object({ name: z.string(), parameters: z.any() }))
.optional(),
}),
z.object({
role: z.enum(["TOOL"]),
tool_results: z.array(
z.object({
call: z.object({ name: z.string(), parameters: z.any() }),
outputs: z.array(z.any()),
})
),
}),
])
)
.optional(),
// Don't allow conversation_id as it causes calls to be stateful and we don't
// offer guarantees about which key a user's request will be routed to.
conversation_id: z.literal(undefined).optional(),
prompt_truncation: z
.enum(["AUTO", "AUTO_PRESERVE_ORDER", "OFF"])
.optional(),
/*
Supporting RAG is complex because documents can be arbitrary size and have
to have embeddings generated, which incurs a cost that is not trivial to
estimate. We don't support it for now.
connectors: z
.array(
z.object({
id: z.string(),
user_access_token: z.string().optional(),
continue_on_failure: z.boolean().default(false).optional(),
options: z.any().optional(),
})
)
.optional(),
search_queries_only: z.boolean().default(false).optional(),
documents: z
.array(
z.object({
id: z.string().optional(),
title: z.string().optional(),
text: z.string(),
_excludes: z.array(z.string()).optional(),
})
)
.optional(),
citation_quality: z.enum(["accurate", "fast"]).optional(),
*/
temperature: z.number().default(0.3).optional(),
max_tokens: z
.number()
.int()
.nullish()
.default(Math.min(OPENAI_OUTPUT_MAX, 4096))
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
max_input_tokens: z.number().int().optional(),
k: z.number().int().min(0).max(500).default(0).optional(),
p: z.number().min(0.01).max(0.99).default(0.75).optional(),
seed: z.number().int().optional(),
stop_sequences: z.array(z.string()).max(5).optional(),
frequency_penalty: z.number().min(0).max(1).default(0).optional(),
presence_penalty: z.number().min(0).max(1).default(0).optional(),
tools: z
.array(
z.object({
name: z.string(),
description: z.string(),
parameter_definitions: z.record(
z.object({
description: z.string().optional(),
type: z.string(),
required: z.boolean().optional().default(false),
})
),
})
)
.optional(),
tool_results: z
.array(
z.object({
call: z.object({
name: z.string(),
parameters: z.record(z.any()),
}),
outputs: z.array(z.record(z.any())),
})
)
.optional(),
// We always force single step to avoid stateful calls or expensive multi-step
// generations when tools are involved.
force_single_step: z.literal(true).default(true).optional(),
})
.strip();
export type CohereChatMessage = NonNullable<
z.infer<typeof CohereV1ChatSchema>["chat_history"]
>[number];
export function flattenCohereMessageContent(
message: CohereChatMessage
): string {
return message.role === "TOOL"
? message.tool_results.map((r) => r.outputs[0].text).join("\n")
: message.message;
}
// Basic chat message schema
const CohereChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system", "developer"]),
content: z.string().nullable(),
name: z.string().optional(),
});
const CohereMessagesSchema = z.array(CohereChatMessageSchema);
// Schema for Cohere chat completions
export const CohereV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: CohereMessagesSchema,
temperature: z.number().optional().default(1),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
.number()
.int()
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
seed: z.number().int().min(0).optional(),
response_format: z
.object({
type: z.enum(["text", "json_object"]),
schema: z.any().optional()
})
.optional(),
// Structured output with schema
tools: z.array(z.any()).optional(),
frequency_penalty: z.number().optional().default(0),
presence_penalty: z.number().optional().default(0),
});
// Schema for Cohere embeddings
export const CohereV1EmbeddingsSchema = z.object({
model: z.string(),
input: z.union([z.string(), z.array(z.string())]),
encoding_format: z.enum(["float", "base64"]).optional()
});
// Helper function to convert between different message formats if needed
export function normalizeMessages(messages: any[]): any[] {
// From documentation, Cohere supports roles: developer, user, assistant
// The 'developer' role is equivalent to 'system' in OpenAI API
return messages.map((msg) => {
// Convert system role to developer role for Cohere compatibility
if (msg.role === "system") {
return { ...msg, role: "developer" };
}
return msg;
export const transformOpenAIToCohere: APIFormatTransformer<
typeof CohereV1ChatSchema
> = async (req) => {
const { body } = req;
const result = OpenAIV1ChatCompletionSchema.safeParse({
...body,
model: "gpt-3.5-turbo",
});
}
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid OpenAI-to-Cohere request"
);
throw result.error;
}
const { messages, ...rest } = result.data;
// Final OAI message becomes the `message` field in Cohere
const message = messages[messages.length - 1];
// If the first message has system role, use it as preamble.
const hasSystemPreamble = messages[0]?.role === "system";
const preamble = hasSystemPreamble
? flattenOpenAIMessageContent(messages[0].content)
: undefined;
const chatHistory = messages.slice(0, -1).map((m) => {
const role: Exclude<CohereChatMessage["role"], "TOOL"> =
m.role === "assistant"
? "CHATBOT"
: m.role === "system"
? "SYSTEM"
: "USER";
const content = flattenOpenAIMessageContent(m.content);
const message = m.name ? `${m.name}: ${content}` : content;
return { role, message };
});
return {
model: rest.model,
preamble,
chat_history: chatHistory,
message: flattenOpenAIMessageContent(message.content),
stop_sequences:
typeof rest.stop === "string" ? [rest.stop] : rest.stop ?? undefined,
max_tokens: rest.max_tokens,
temperature: rest.temperature,
p: rest.top_p,
frequency_penalty: rest.frequency_penalty,
presence_penalty: rest.presence_penalty,
seed: rest.seed,
stream: rest.stream,
};
};
+38 -117
View File
@@ -5,92 +5,32 @@ import {
} from "./openai";
import { APIFormatTransformer } from "./index";
const TextPartSchema = z.object({
text: z.string(),
thought: z.boolean().optional()
});
const InlineDataPartSchema = z.object({
inlineData: z.object({
mimeType: z.string(),
data: z.string(),
}),
});
const PartSchema = z.union([TextPartSchema, InlineDataPartSchema]);
const GoogleAIV1ContentSchema = z.object({
parts: z
.union([PartSchema, z.array(PartSchema)])
.transform((val) => (Array.isArray(val) ? val : [val])),
role: z.enum(["user", "model"]).optional(),
});
const SafetySettingsSchema = z
.array(
z.object({
category: z.enum([
"HARM_CATEGORY_HARASSMENT",
"HARM_CATEGORY_HATE_SPEECH",
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
"HARM_CATEGORY_DANGEROUS_CONTENT",
"HARM_CATEGORY_CIVIC_INTEGRITY",
]),
threshold: z.enum([
"OFF",
"BLOCK_NONE",
"BLOCK_ONLY_HIGH",
"BLOCK_MEDIUM_AND_ABOVE",
"BLOCK_LOW_AND_ABOVE",
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
]),
})
)
.optional();
const GoogleSearchToolSchema = z.object({
googleSearch: z.object({}),
});
// Corrected: Directly assign the schema since there's only one tool type for now
const ToolSchema = GoogleSearchToolSchema;
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateContent
export const GoogleAIV1GenerateContentSchema = z
.object({
model: z.string().max(100),
stream: z.boolean().optional().default(false),
contents: z.array(GoogleAIV1ContentSchema),
tools: z.array(ToolSchema).optional(), // Uses the corrected ToolSchema
safetySettings: SafetySettingsSchema,
systemInstruction: GoogleAIV1ContentSchema.optional(),
system_instruction: GoogleAIV1ContentSchema.optional(),
generationConfig: z
.object({
temperature: z.number().min(0).max(2).optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 65536)),
candidateCount: z.literal(1).optional(),
topP: z.number().min(0).max(1).optional(),
topK: z.number().min(0).max(500).optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
seed: z.number().int().optional(),
frequencyPenalty: z.number().optional().default(0),
presencePenalty: z.number().optional().default(0),
thinkingConfig: z.object({
includeThoughts: z.boolean().optional(),
thinkingBudget: z.union([
z.literal("auto"),
z.number().int()
]).optional()
}).optional(),
responseModalities: z.any().optional(), // responseModalities: z.array(z.enum(["TEXT"])).optional()
model: z.string().max(100), //actually specified in path but we need it for the router
stream: z.boolean().optional().default(false), // also used for router
contents: z.array(
z.object({
parts: z.array(z.object({ text: z.string() })),
role: z.enum(["user", "model"]),
})
.default({}),
),
tools: z.array(z.object({})).max(0).optional(),
safetySettings: z.array(z.object({})).max(0).optional(),
generationConfig: z.object({
temperature: z.number().optional(),
maxOutputTokens: z.coerce
.number()
.int()
.optional()
.default(16)
.transform((v) => Math.min(v, 1024)), // TODO: Add config
candidateCount: z.literal(1).optional(),
topP: z.number().optional(),
topK: z.number().optional(),
stopSequences: z.array(z.string().max(500)).max(5).optional(),
}),
})
.strip();
export type GoogleAIChatMessage = z.infer<
@@ -114,11 +54,15 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
}
const { messages, ...rest } = result.data;
const foundNames = new Set<string>();
const contents = messages
.map((m) => {
const role = m.role === "assistant" ? "model" : "user";
// Detects character names so we can set stop sequences for them as Gemini
// is prone to continuing as the next character.
// If names are not available, we'll still try to prefix the message
// with generic names so we can set stops for them but they don't work
// as well as real names.
const text = flattenOpenAIMessageContent(m.content);
const propName = m.name?.trim();
const textName =
@@ -128,6 +72,12 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
foundNames.add(name);
// Prefixing messages with their character name seems to help avoid
// Gemini trying to continue as the next character, or at the very least
// ensures it will hit the stop sequence. Otherwise it will start a new
// paragraph and switch perspectives.
// The response will be very likely to include this prefix so frontends
// will need to strip it out.
const textPrefix = textName ? "" : `${name}: `;
return {
parts: [{ text: textPrefix + text }],
@@ -136,7 +86,7 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
})
.reduce<GoogleAIChatMessage[]>((acc, msg) => {
const last = acc[acc.length - 1];
if (last?.role === msg.role && 'text' in last.parts[0] && 'text' in msg.parts[0]) {
if (last?.role === msg.role) {
last.parts[0].text += "\n\n" + msg.parts[0].text;
} else {
acc.push(msg);
@@ -152,52 +102,23 @@ export const transformOpenAIToGoogleAI: APIFormatTransformer<
stops.push(...Array.from(foundNames).map((name) => `\n${name}:`));
stops = [...new Set(stops)].slice(0, 5);
let tools: z.infer<typeof ToolSchema>[] | undefined = undefined;
let responseModalities: string[] | undefined = undefined;
if (req.body.use_google_search === true) {
req.log.info("Google Search tool requested.");
tools = [{ googleSearch: {} }];
responseModalities = ["TEXT"];
}
let thinkingConfig = undefined;
if (body.generationConfig?.thinkingConfig || body.thinkingConfig) {
thinkingConfig = body.generationConfig?.thinkingConfig || body.thinkingConfig;
}
return {
model: req.body.model,
model: "gemini-pro",
stream: rest.stream,
contents,
tools: tools,
tools: [],
generationConfig: {
maxOutputTokens: rest.max_tokens,
stopSequences: stops,
topP: rest.top_p,
topK: 40,
topK: 40, // openai schema doesn't have this, google ai defaults to 40
temperature: rest.temperature,
seed: rest.seed,
frequencyPenalty: rest.frequency_penalty,
presencePenalty: rest.presence_penalty,
responseModalities: responseModalities,
...(thinkingConfig ? { thinkingConfig } : {})
},
safetySettings: [
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "BLOCK_NONE" },
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "BLOCK_NONE" },
],
...(req.body.system_instruction && { system_instruction: req.body.system_instruction }),
...(req.body.systemInstruction && { systemInstruction: req.body.systemInstruction }),
};
};
export function containsImageContent(contents: GoogleAIChatMessage[]): boolean {
return contents.some(content => {
const parts = Array.isArray(content.parts) ? content.parts : [content.parts];
return parts.some(part => 'inlineData' in part);
});
}
+21 -13
View File
@@ -17,19 +17,12 @@ import {
OpenAIV1ImagesGenerationSchema,
transformOpenAIToOpenAIImage,
} from "./openai-image";
import {
OpenAIV1ResponsesSchema,
transformOpenAIToOpenAIResponses,
} from "./openai-responses";
import {
GoogleAIV1GenerateContentSchema,
transformOpenAIToGoogleAI,
} from "./google-ai";
import {
MistralAIV1ChatCompletionsSchema,
MistralAIV1TextCompletionsSchema,
transformMistralChatToText,
} from "./mistral-ai";
import { MistralAIV1ChatCompletionsSchema } from "./mistral-ai";
import { CohereV1ChatSchema, transformOpenAIToCohere } from "./cohere";
export { OpenAIChatMessage } from "./openai";
export {
@@ -41,34 +34,49 @@ export {
export { GoogleAIChatMessage } from "./google-ai";
export { MistralAIChatMessage } from "./mistral-ai";
/** Represents a pair of API formats that can be transformed between. */
type APIPair = `${APIFormat}->${APIFormat}`;
/** Represents a map of API format pairs to transformer functions. */
type TransformerMap = {
[key in APIPair]?: APIFormatTransformer<any>;
};
/**
* Represents a transformer function that takes a Request and returns a Promise
* resolving to a value of the specified Zod schema type.
*
* @template Z The Zod schema type to transform the request into (from api-schemas).
* @param req The incoming Request to transform.
* @returns A Promise resolving to the transformed request body.
*/
export type APIFormatTransformer<Z extends z.ZodType<any, any>> = (
req: Request
) => Promise<z.infer<Z>>;
/**
* Specifies possible translations between API formats and the corresponding
* transformer functions to apply them.
*/
export const API_REQUEST_TRANSFORMERS: TransformerMap = {
"anthropic-text->anthropic-chat": transformAnthropicTextToAnthropicChat,
"openai->anthropic-chat": transformOpenAIToAnthropicChat,
"openai->anthropic-text": transformOpenAIToAnthropicText,
"openai->openai-text": transformOpenAIToOpenAIText,
"openai->openai-image": transformOpenAIToOpenAIImage,
"openai->openai-responses": transformOpenAIToOpenAIResponses,
"openai->google-ai": transformOpenAIToGoogleAI,
"mistral-ai->mistral-text": transformMistralChatToText,
"openai->cohere-chat": transformOpenAIToCohere,
};
/**
* Specifies the schema for each API format to validate incoming requests.
*/
export const API_REQUEST_VALIDATORS: Record<APIFormat, z.ZodSchema<any>> = {
"anthropic-chat": AnthropicV1MessagesSchema,
"anthropic-text": AnthropicV1TextSchema,
openai: OpenAIV1ChatCompletionSchema,
"openai-text": OpenAIV1TextCompletionSchema,
"openai-image": OpenAIV1ImagesGenerationSchema,
"openai-responses": OpenAIV1ResponsesSchema,
"google-ai": GoogleAIV1GenerateContentSchema,
"mistral-ai": MistralAIV1ChatCompletionsSchema,
"mistral-text": MistralAIV1TextCompletionsSchema,
"cohere-chat": CohereV1ChatSchema,
};
+15 -234
View File
@@ -1,86 +1,15 @@
import { z } from "zod";
import { OPENAI_OUTPUT_MAX } from "./openai";
import { Template } from "@huggingface/jinja";
import { APIFormatTransformer } from "./index";
import { logger } from "../../logger";
// Define the content types for multimodal messages
export const TextContentSchema = z.object({
type: z.literal("text"),
text: z.string()
});
export const ImageUrlContentSchema = z.object({
type: z.literal("image_url"),
image_url: z.union([
// URL format (https://...)
z.string().url(),
// Base64 format (data:image/jpeg;base64,...)
z.string().regex(/^data:image\/(jpeg|png|gif|webp);base64,/),
// Object format (might contain detail or url properties)
z.record(z.any()),
// Allow any string for maximum compatibility
z.string()
])
});
export const ContentItemSchema = z.union([TextContentSchema, ImageUrlContentSchema]);
// Export types for the content schemas
export type TextContent = z.infer<typeof TextContentSchema>;
export type ImageUrlContent = z.infer<typeof ImageUrlContentSchema>;
export type ContentItem = z.infer<typeof ContentItemSchema>;
// List of Mistral models with vision capabilities
export const MISTRAL_VISION_MODELS = [
"pixtral-12b-2409",
"pixtral-12b-latest",
"pixtral-large-2411",
"pixtral-large-latest",
"mistral-small-2503",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-medium-2505"
];
// Helper function to check if a model supports vision
export function isMistralVisionModel(model: string): boolean {
return MISTRAL_VISION_MODELS.some(visionModel =>
model === visionModel ||
model.startsWith(`${visionModel}-`)
);
}
// Main Mistral chat message schema
const MistralChatMessageSchema = z.object({
role: z.enum(["system", "user", "assistant", "tool"]), // TODO: implement tools
// Support both string content (for backwards compatibility) and array of content items (for multimodal)
content: z.union([
z.string(),
z.array(ContentItemSchema)
]),
prefix: z.boolean().optional(),
});
const MistralMessagesSchema = z.array(MistralChatMessageSchema).refine(
(input) => {
const prefixIdx = input.findIndex((msg) => Boolean(msg.prefix));
if (prefixIdx === -1) return true; // no prefix messages
const lastIdx = input.length - 1;
const lastMsg = input[lastIdx];
return prefixIdx === lastIdx && lastMsg.role === "assistant";
},
{
message:
"`prefix` can only be set to `true` on the last message, and only for an assistant message.",
}
);
// https://docs.mistral.ai/api#operation/createChatCompletion
const BaseMistralAIV1CompletionsSchema = z.object({
export const MistralAIV1ChatCompletionsSchema = z.object({
model: z.string(),
messages: MistralMessagesSchema.optional(),
prompt: z.string().optional(),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant"]),
content: z.string(),
})
),
temperature: z.number().optional().default(0.7),
top_p: z.number().optional().default(1),
max_tokens: z.coerce
@@ -89,50 +18,12 @@ const BaseMistralAIV1CompletionsSchema = z.object({
.nullish()
.transform((v) => Math.min(v ?? OPENAI_OUTPUT_MAX, OPENAI_OUTPUT_MAX)),
stream: z.boolean().optional().default(false),
// Mistral docs say that `stop` can be a string or array but AWS Mistral
// blows up if a string is passed. We must convert it to an array.
stop: z
.union([z.string(), z.array(z.string())])
.optional()
.default([])
.transform((v) => (Array.isArray(v) ? v : [v])),
random_seed: z.number().int().min(0).optional(),
response_format: z
.object({ type: z.enum(["text", "json_object"]) })
.optional(),
safe_prompt: z.boolean().optional().default(false),
random_seed: z.number().int().optional(),
});
export const MistralAIV1ChatCompletionsSchema =
BaseMistralAIV1CompletionsSchema.and(
z.object({ messages: MistralMessagesSchema })
);
export const MistralAIV1TextCompletionsSchema =
BaseMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
/*
Slightly more strict version that only allows a subset of the parameters. AWS
Mistral helpfully returns no details if unsupported parameters are passed so
this list comes from trial and error as of 2024-08-12.
*/
const BaseAWSMistralAIV1CompletionsSchema =
BaseMistralAIV1CompletionsSchema.pick({
temperature: true,
top_p: true,
max_tokens: true,
stop: true,
random_seed: true,
// response_format: true,
// safe_prompt: true,
}).strip();
export const AWSMistralV1ChatCompletionsSchema =
BaseAWSMistralAIV1CompletionsSchema.and(
z.object({ messages: MistralMessagesSchema })
);
export const AWSMistralV1TextCompletionsSchema =
BaseAWSMistralAIV1CompletionsSchema.and(z.object({ prompt: z.string() }));
export type MistralAIChatMessage = z.infer<typeof MistralChatMessageSchema>;
export type MistralAIChatMessage = z.infer<
typeof MistralAIV1ChatCompletionsSchema
>["messages"][0];
export function fixMistralPrompt(
messages: MistralAIChatMessage[]
@@ -140,11 +31,12 @@ export function fixMistralPrompt(
// Mistral uses OpenAI format but has some additional requirements:
// - Only one system message per request, and it must be the first message if
// present.
// - Final message must be a user message, unless it has `prefix: true`.
// - Final message must be a user message.
// - Cannot have multiple messages from the same role in a row.
// While frontends should be able to handle this, we can fix it here in the
// meantime.
const fixed = messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
return messages.reduce<MistralAIChatMessage[]>((acc, msg) => {
if (acc.length === 0) {
acc.push(msg);
return acc;
@@ -159,121 +51,10 @@ export function fixMistralPrompt(
// Consolidate multiple messages from the same role
const last = acc[acc.length - 1];
if (last.role === copy.role) {
// Handle different content types for consolidation
if (typeof last.content === "string" && typeof copy.content === "string") {
// Both are strings, concatenate them
last.content += "\n\n" + copy.content;
} else if (Array.isArray(last.content) && typeof copy.content === "string") {
// Add the string content as a new text content item
last.content.push({
type: "text",
text: copy.content
});
} else if (typeof last.content === "string" && Array.isArray(copy.content)) {
// Convert last.content to array and append copy.content items
last.content = [
{ type: "text", text: last.content },
...copy.content
];
} else if (Array.isArray(last.content) && Array.isArray(copy.content)) {
// Both are arrays, concatenate them
last.content = [...last.content, ...copy.content];
}
last.content += "\n\n" + copy.content;
} else {
acc.push(copy);
}
return acc;
}, []);
// If the last message is an assistant message, mark it as a prefix. An
// assistant message at the end of the conversation without `prefix: true`
// results in an error.
if (fixed[fixed.length - 1].role === "assistant") {
fixed[fixed.length - 1].prefix = true;
}
return fixed;
}
let jinjaTemplate: Template;
let renderTemplate: (messages: MistralAIChatMessage[]) => string;
// Helper function to convert multimodal content to string format for text-only models
function contentToString(content: string | any[]): string {
if (typeof content === "string") {
return content;
} else if (Array.isArray(content)) {
// For multimodal content, extract only the text parts
// Images are not supported in text-only templates
return content
.filter(item => item.type === "text")
.map(item => (item as any).text)
.join("\n\n");
}
return "";
}
function renderMistralPrompt(messages: MistralAIChatMessage[]) {
if (!jinjaTemplate) {
logger.warn("Lazy loading mistral chat template...");
const { chatTemplate, bosToken, eosToken } =
require("./templates/mistral-template").MISTRAL_TEMPLATE;
jinjaTemplate = new Template(chatTemplate);
renderTemplate = (messages) => {
// We need to convert any multimodal content to string format for the template
const textOnlyMessages = messages.map(msg => ({
...msg,
content: contentToString(msg.content)
}));
return jinjaTemplate.render({
messages: textOnlyMessages,
bos_token: bosToken,
eos_token: eosToken,
});
};
}
return renderTemplate(messages);
}
/**
* Attempts to convert a Mistral chat completions request to a text completions,
* using the official prompt template published by Mistral.
*
* Note: This transformation is only applicable for text-only models.
* Multimodal/vision models (Pixtral, etc.) cannot use this transformation.
*/
export const transformMistralChatToText: APIFormatTransformer<
typeof MistralAIV1TextCompletionsSchema
> = async (req) => {
const { body } = req;
const result = MistralAIV1ChatCompletionsSchema.safeParse(body);
if (!result.success) {
req.log.warn(
{ issues: result.error.issues, body },
"Invalid Mistral chat completions request"
);
throw result.error;
}
// Check if this is a vision request (contains any image_url content items)
const { messages, model, ...rest } = result.data;
const hasVisionContent = messages.some(msg =>
Array.isArray(msg.content) &&
msg.content.some(item => item.type === "image_url")
);
// Cannot transform vision requests to text completions
if (hasVisionContent) {
req.log.warn(
{ model },
"Cannot transform Mistral vision request to text completions format"
);
throw new Error(
"Vision requests (with image_url content) cannot be transformed to text completions format"
);
}
const prompt = renderMistralPrompt(messages);
return { ...rest, model, prompt, messages: undefined };
};

Some files were not shown because too many files have changed in this diff Show More