adds gpt4-turbo to userschema; updates docs

This commit is contained in:
nai-degen
2023-11-06 16:35:35 -06:00
parent 0d5dfeccf8
commit 5467136c1a
3 changed files with 10 additions and 12 deletions
+5 -2
View File
@@ -15,7 +15,7 @@
# MODEL_RATE_LIMIT=4 # MODEL_RATE_LIMIT=4
# Max number of output tokens a user can request at once. # Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=300 # MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400 # MAX_OUTPUT_TOKENS_ANTHROPIC=400
# Whether to show the estimated cost of consumed tokens on the info page. # Whether to show the estimated cost of consumed tokens on the info page.
@@ -27,7 +27,10 @@
# CHECK_KEYS=true # CHECK_KEYS=true
# Which model types users are allowed to access. # Which model types users are allowed to access.
# ALLOWED_MODEL_FAMILIES=claude,turbo,gpt4,gpt4-32k # If you want to restrict access to certain models, uncomment the line below and list only the models you want to allow,
# separated by commas. By default, all models are allowed. The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | claude | bison | aws-claude
# ALLOWED_MODEL_FAMILIES=turbo,gpt4-turbo,aws-claude
# URLs from which requests will be blocked. # URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com # BLOCKED_ORIGINS=reddit.com,9gag.com
+4 -10
View File
@@ -287,18 +287,12 @@ function getOpenAIInfo() {
const tokens = modelStats.get(`${f}__tokens`) || 0; const tokens = modelStats.get(`${f}__tokens`) || 0;
const cost = getTokenCostUsd(f, tokens); const cost = getTokenCostUsd(f, tokens);
const active = modelStats.get(`${f}__active`) || 0;
const trial = modelStats.get(`${f}__trial`) || 0;
const revoked = modelStats.get(`${f}__revoked`) || 0;
const overQuota = modelStats.get(`${f}__overQuota`) || 0;
if (active + trial + revoked + overQuota === 0) return;
info[f] = { info[f] = {
usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`, usage: `${prettyTokens(tokens)} tokens${getCostString(cost)}`,
activeKeys: active, activeKeys: modelStats.get(`${f}__active`) || 0,
trialKeys: trial, trialKeys: modelStats.get(`${f}__trial`) || 0,
revokedKeys: revoked, revokedKeys: modelStats.get(`${f}__revoked`) || 0,
overQuotaKeys: overQuota, overQuotaKeys: modelStats.get(`${f}__overQuota`) || 0,
}; };
}); });
} else { } else {
+1
View File
@@ -6,6 +6,7 @@ export const tokenCountsSchema: ZodType<UserTokenCounts> = z.object({
turbo: z.number().optional().default(0), turbo: z.number().optional().default(0),
gpt4: z.number().optional().default(0), gpt4: z.number().optional().default(0),
"gpt4-32k": z.number().optional().default(0), "gpt4-32k": z.number().optional().default(0),
"gpt4-turbo": z.number().optional().default(0),
claude: z.number().optional().default(0), claude: z.number().optional().default(0),
bison: z.number().optional().default(0), bison: z.number().optional().default(0),
"aws-claude": z.number().optional().default(0), "aws-claude": z.number().optional().default(0),