refactor: simplify Google AI model fetching by using synthetic response from existing keys
This commit is contained in:
+18
-33
@@ -63,46 +63,31 @@ const getModelsResponse = () => {
|
||||
|
||||
// Function to fetch native models from Google AI API
|
||||
const getNativeModelsResponse = async () => {
|
||||
// Return cached value if it was refreshed in the last minute
|
||||
if (new Date().getTime() - nativeModelsCacheTime < 1000 * 60) {
|
||||
return nativeModelsCache;
|
||||
}
|
||||
|
||||
if (!config.googleAIKey) return { models: [] };
|
||||
/*
|
||||
* The official Google API requires an API key. However SillyTavern only needs
|
||||
* a list of model IDs and does not care about any other model metadata. We
|
||||
* can therefore generate a **synthetic** response from the keys already
|
||||
* loaded into the proxy (same source we use for the OpenAI-compatible
|
||||
* endpoint) and completely avoid the outbound request. This removes the
|
||||
* need for the frontend to supply the proxy password as an API key and
|
||||
* prevents 4xx/5xx errors when the real Google API is unreachable or the key
|
||||
* is missing.
|
||||
*/
|
||||
const openaiStyle = getModelsResponse();
|
||||
const models = (openaiStyle.data || []).map((m: any) => ({
|
||||
// Google AI Studio returns names in the format "models/<id>"
|
||||
name: `models/${m.id}`,
|
||||
supportedGenerationMethods: ["generateContent"],
|
||||
}));
|
||||
|
||||
const keys = keyPool
|
||||
.list()
|
||||
.filter((k) => k.service === "google-ai") as GoogleAIKey[];
|
||||
if (keys.length === 0) {
|
||||
nativeModelsCache = { models: [] };
|
||||
nativeModelsCache = { models };
|
||||
nativeModelsCacheTime = new Date().getTime();
|
||||
return nativeModelsCache;
|
||||
}
|
||||
|
||||
try {
|
||||
// Use the first available key to fetch models
|
||||
const key = keys[0];
|
||||
const apiVersion = "v1beta"; // Use the latest API version
|
||||
const url = `https://generativelanguage.googleapis.com/${apiVersion}/models`;
|
||||
|
||||
const response = await axios.get(url, {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
params: {
|
||||
key: key.key,
|
||||
},
|
||||
});
|
||||
|
||||
// We'll update the model cache but won't attempt to update the keys
|
||||
// This avoids type issues while still keeping our models list up to date
|
||||
nativeModelsCache = response.data;
|
||||
nativeModelsCacheTime = new Date().getTime();
|
||||
return nativeModelsCache;
|
||||
} catch (error) {
|
||||
console.error("Error fetching Google AI models:", error);
|
||||
// Return empty model list on error
|
||||
return { models: [] };
|
||||
}
|
||||
};
|
||||
|
||||
const handleModelRequest: RequestHandler = (_req: Request, res: any) => {
|
||||
|
||||
+1
-1
@@ -92,7 +92,7 @@ app.use("/admin", adminRouter);
|
||||
app.use((req, _, next) => {
|
||||
// For whatever reason SillyTavern just ignores the path a user provides
|
||||
// when using Google AI with reverse proxy. We'll fix it here.
|
||||
if (req.path.match(/^\/v1(alpha|beta)\/models\//)) {
|
||||
if (req.path.match(/^\/v1(alpha|beta)\/models(\/|$)/)) {
|
||||
req.url = `${config.proxyEndpointRoute}/google-ai${req.url}`;
|
||||
return next();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user