Improve showing errors

This commit is contained in:
Enrico Ros
2023-07-10 21:58:03 -07:00
parent 6f251269cf
commit fcdc3266af
2 changed files with 12 additions and 8 deletions
+8 -5
View File
@@ -127,19 +127,22 @@ export default async function handler(req: NextRequest): Promise<Response> {
// inputs - reuse the tRPC schema
const { access, model, history } = chatGenerateSchema.parse(await req.json());
// prepare the API request data
const { headers, url } = openAIAccess(access, '/v1/chat/completions');
const body = openAIChatCompletionPayload(model, history, null, 1, true);
// begin event streaming from the OpenAI API
let upstreamResponse: Response;
try {
// prepare the API request data
const { headers, url } = openAIAccess(access, '/v1/chat/completions');
const body = openAIChatCompletionPayload(model, history, null, 1, true);
// POST to the API
upstreamResponse = await fetch(url, { headers, method: 'POST', body: JSON.stringify(body) });
await throwOpenAINotOkay(upstreamResponse);
} catch (error: any) {
const fetchOrVendorError = (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
console.log(`/api/llms/stream: fetch issue: ${fetchOrVendorError}`);
throw new Error('[OpenAI Issue] ' + fetchOrVendorError);
return new NextResponse('[OpenAI Issue] ' + fetchOrVendorError, { status: 500 });
}
/* The following code is heavily inspired by the Vercel AI SDK, but simplified to our needs and in full control.
+4 -3
View File
@@ -122,9 +122,10 @@ async function streamAssistantMessage(
signal: abortSignal,
});
if (!response.body) {
// noinspection ExceptionCaughtLocallyJS
throw new Error('No response body');
if (!response.ok || !response.body) {
const errorMessage = response.body ? await response.text() : 'No response from server';
editMessage(conversationId, assistantMessageId, { text: errorMessage, typing: false }, false);
return;
}
const responseReader = response.body.getReader();