adds TODOs so I don't forget

This commit is contained in:
nai-degen
2023-04-10 20:02:48 -07:00
committed by nai-degen
parent f3a13129f9
commit 15ed8e7c25
4 changed files with 7 additions and 7 deletions
+1 -1
View File
@@ -23,7 +23,7 @@ type Config = {
/** Logging threshold. */
logLevel?: "debug" | "info" | "warn" | "error";
/** Whether prompts and responses should be logged. */
logPrompts?: boolean; // TODO
logPrompts?: boolean; // TODO: Implement prompt logging once we have persistent storage.
/** Whether to periodically check keys for usage and validity. */
checkKeys?: boolean;
};
+3
View File
@@ -172,6 +172,9 @@ export class KeyChecker {
return data;
}
// TODO: This endpoint seems to be very delayed. I think we will need to track
// the time it last changed and estimate token usage ourselves in between
// changes by inspecting request payloads for prompt and completion tokens.
private async getUsage(key: Key) {
const querystring = KeyChecker.getUsageQuerystring(key.isTrial);
const url = `${GET_USAGE_URL}?${querystring}`;
+3 -5
View File
@@ -6,12 +6,10 @@ import { config } from "../config";
import { logger } from "../logger";
import { KeyChecker } from "./key-checker";
// I made too many assumptions about OpenAI being the only provider and now this
// is a mess with the addition of Anthropic. Server will have to be restricted
// to operating on one provider at a time until I can refactor this to use
// some KeyProvider interface.
// TODO: Made too many assumptions about OpenAI being the only provider and now
// this doesn't really work for Anthropic. Create a Provider interface and
// implement Pool, Checker, and Models for each provider.
// TODO: Move this stuff somewhere else, it's not key management.
export type Model = OpenAIModel | AnthropicModel;
export type OpenAIModel =
| "gpt-3.5-turbo"
-1
View File
@@ -70,7 +70,6 @@ const openaiProxy = createProxyMiddleware({
const openaiRouter = Router();
openaiRouter.get("/v1/models", openaiProxy);
// openaiRouter.post("/v1/completions", openaiProxy); // TODO: Implement Davinci
openaiRouter.post("/v1/chat/completions", ipLimiter, openaiProxy);
openaiRouter.use((req, res) => {
logger.warn(`Blocked openai proxy request: ${req.method} ${req.path}`);