diff --git a/src/apps/draw/create/PromptComposer.tsx b/src/apps/draw/create/PromptComposer.tsx index 8b20e0dd7..6bf735b17 100644 --- a/src/apps/draw/create/PromptComposer.tsx +++ b/src/apps/draw/create/PromptComposer.tsx @@ -119,7 +119,7 @@ export function PromptComposer(props: { const handleSimpleEnhance = React.useCallback(async () => { if (nonEmptyPrompt?.trim()) { setIsSimpleEnhancing(true); - const improvedPrompt = await imaginePromptFromText(nonEmptyPrompt, null).catch(console.error); + const improvedPrompt = await imaginePromptFromText(nonEmptyPrompt, 'DEV').catch(console.error); if (improvedPrompt) setNextPrompt(improvedPrompt); setIsSimpleEnhancing(false); diff --git a/src/modules/aifn/imagine/imaginePromptFromText.ts b/src/modules/aifn/imagine/imaginePromptFromText.ts index 8fa370d2f..9a5d7e2bc 100644 --- a/src/modules/aifn/imagine/imaginePromptFromText.ts +++ b/src/modules/aifn/imagine/imaginePromptFromText.ts @@ -1,4 +1,4 @@ -import { llmChatGenerateOrThrow, VChatMessageIn } from '~/modules/llms/llm.client'; +import { aixChatGenerateTextNS_Simple } from '~/modules/aix/client/aix.client'; import { getLLMIdOrThrow } from '~/common/stores/llms/store-llms'; @@ -13,7 +13,7 @@ Provide output a single image generation prompt and nothing else.`; /** * Creates a caption for a drawing or photo given some description - used to elevate the quality of the imaging */ -export async function imaginePromptFromText(messageText: string, contextRef: string | null): Promise { +export async function imaginePromptFromText(messageText: string, contextRef: string): Promise { // we used the fast LLM, but let's just converge to the chat LLM here const llmId = getLLMIdOrThrow(['fast', 'chat'], false, false, 'imagine-prompt-from-text'); @@ -24,12 +24,12 @@ export async function imaginePromptFromText(messageText: string, contextRef: str if (!/[.!?]$/.test(messageText)) messageText += '.'; try { - const instructions: VChatMessageIn[] = [ - { role: 'system', content: simpleImagineSystemPrompt }, - { role: 'user', content: 'Write a minimum of 20-30 words prompt and up to the size of the input, based on the INPUT below.\n\nINPUT:\n' + messageText }, - ]; - const chatResponse = await llmChatGenerateOrThrow(llmId, instructions, 'draw-expand-prompt', contextRef, null, null); - return chatResponse.content?.trim() ?? null; + return (await aixChatGenerateTextNS_Simple( + llmId, + simpleImagineSystemPrompt, + 'Write a minimum of 20-30 words prompt and up to the size of the input, based on the INPUT below.\n\nINPUT:\n' + messageText, + 'draw-expand-prompt', contextRef, + )).trim(); } catch (error: any) { console.error('imaginePromptFromText: fetch request error:', error); return null; diff --git a/src/modules/aifn/summarize/summerize.ts b/src/modules/aifn/summarize/summerize.ts index fb0457364..93996d341 100644 --- a/src/modules/aifn/summarize/summerize.ts +++ b/src/modules/aifn/summarize/summerize.ts @@ -1,4 +1,4 @@ -import { llmChatGenerateOrThrow, VChatMessageIn } from '~/modules/llms/llm.client'; +import { aixChatGenerateTextNS_Simple } from '~/modules/aix/client/aix.client'; import type { DLLMId } from '~/common/stores/llms/llms.types'; import { findLLMOrThrow } from '~/common/stores/llms/store-llms'; @@ -82,12 +82,12 @@ async function cleanUpContent(chunk: string, llmId: DLLMId, _ignored_was_targetW const autoResponseTokensSize = contextTokens ? Math.floor(contextTokens * outputTokenShare) : null; try { - const instructions: VChatMessageIn[] = [ - { role: 'system', content: cleanupPrompt }, - { role: 'user', content: chunk }, - ]; - const chatResponse = await llmChatGenerateOrThrow(llmId, instructions, 'chat-ai-summarize', null, null, null, autoResponseTokensSize ?? undefined); - return chatResponse?.content ?? ''; + return (await aixChatGenerateTextNS_Simple( + llmId, + cleanupPrompt, + chunk, + 'chat-ai-summarize', 'DEV', + )).trim(); } catch (error: any) { return ''; }