diff --git a/src/common/util/clientFetchers.ts b/src/common/util/clientFetchers.ts new file mode 100644 index 000000000..ae75234a8 --- /dev/null +++ b/src/common/util/clientFetchers.ts @@ -0,0 +1 @@ +export const frontendSideFetch = fetch; \ No newline at end of file diff --git a/src/modules/aifn/react/react.ts b/src/modules/aifn/react/react.ts index 6720d8137..b03664e58 100644 --- a/src/modules/aifn/react/react.ts +++ b/src/modules/aifn/react/react.ts @@ -8,6 +8,8 @@ import { callApiSearchGoogle } from '~/modules/google/search.client'; import { callBrowseFetchPage } from '~/modules/browse/browse.client'; import { llmChatGenerateOrThrow, VChatMessageIn } from '~/modules/llms/llm.client'; +import { frontendSideFetch } from '~/common/util/clientFetchers'; + // prompt to implement the ReAct paradigm: https://arxiv.org/abs/2210.03629 const reActPrompt = (enableBrowse: boolean): string => @@ -172,7 +174,7 @@ export class Agent { type ActionFunction = (input: string) => Promise; async function wikipedia(q: string): Promise { - const response = await fetch( + const response = await frontendSideFetch( `https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch=${encodeURIComponent(q)}&format=json&origin=*`, ); const data = await response.json(); diff --git a/src/modules/blocks/code/RenderCode.tsx b/src/modules/blocks/code/RenderCode.tsx index 263db6342..820ad3837 100644 --- a/src/modules/blocks/code/RenderCode.tsx +++ b/src/modules/blocks/code/RenderCode.tsx @@ -10,6 +10,7 @@ import SchemaIcon from '@mui/icons-material/Schema'; import ShapeLineOutlinedIcon from '@mui/icons-material/ShapeLineOutlined'; import { copyToClipboard } from '~/common/util/clipboardUtils'; +import { frontendSideFetch } from '~/common/util/clientFetchers'; import type { CodeBlock } from '../blocks'; import { ButtonCodePen, isCodePenSupported } from './ButtonCodePen'; @@ -28,7 +29,7 @@ async function fetchPlantUmlSvg(plantUmlCode: string): Promise { // retrieve and manually adapt the SVG, to remove the background const encodedPlantUML: string = plantUmlEncode(plantUmlCode); - const response = await fetch(`https://www.plantuml.com/plantuml/svg/${encodedPlantUML}`); + const response = await frontendSideFetch(`https://www.plantuml.com/plantuml/svg/${encodedPlantUML}`); text = await response.text(); } catch (e) { return null; diff --git a/src/modules/elevenlabs/elevenlabs.client.ts b/src/modules/elevenlabs/elevenlabs.client.ts index 250f4f6f9..2fb5e83a0 100644 --- a/src/modules/elevenlabs/elevenlabs.client.ts +++ b/src/modules/elevenlabs/elevenlabs.client.ts @@ -2,6 +2,7 @@ import { backendCaps } from '~/modules/backend/state-backend'; import { AudioLivePlayer } from '~/common/util/AudioLivePlayer'; import { CapabilityElevenLabsSpeechSynthesis } from '~/common/components/useCapabilities'; +import { frontendSideFetch } from '~/common/util/clientFetchers'; import { playSoundBuffer } from '~/common/util/audioUtils'; import { useUIPreferencesStore } from '~/common/state/store-ui'; @@ -35,7 +36,7 @@ export async function speakText(text: string, voiceId?: string) { const nonEnglish = !(preferredLanguage?.toLowerCase()?.startsWith('en')); try { - const edgeResponse = await fetchApiElevenlabsSpeech(text, elevenLabsApiKey, voiceId || elevenLabsVoiceId, nonEnglish, false); + const edgeResponse = await frontendFetchAPIElevenLabsSpeech(text, elevenLabsApiKey, voiceId || elevenLabsVoiceId, nonEnglish, false); const audioBuffer = await edgeResponse.arrayBuffer(); await playSoundBuffer(audioBuffer); } catch (error) { @@ -55,7 +56,7 @@ export async function EXPERIMENTAL_speakTextStream(text: string, voiceId?: strin const nonEnglish = !(preferredLanguage?.toLowerCase()?.startsWith('en')); try { - const edgeResponse = await fetchApiElevenlabsSpeech(text, elevenLabsApiKey, voiceId || elevenLabsVoiceId, nonEnglish, true); + const edgeResponse = await frontendFetchAPIElevenLabsSpeech(text, elevenLabsApiKey, voiceId || elevenLabsVoiceId, nonEnglish, true); // if (!liveAudioPlayer) const liveAudioPlayer = new AudioLivePlayer(); @@ -72,7 +73,7 @@ export async function EXPERIMENTAL_speakTextStream(text: string, voiceId?: strin /** * Note: we have to use this client-side API instead of TRPC because of ArrayBuffers.. */ -async function fetchApiElevenlabsSpeech(text: string, elevenLabsApiKey: string, elevenLabsVoiceId: string, nonEnglish: boolean, streaming: boolean): Promise { +async function frontendFetchAPIElevenLabsSpeech(text: string, elevenLabsApiKey: string, elevenLabsVoiceId: string, nonEnglish: boolean, streaming: boolean): Promise { // NOTE: hardcoded 1000 as a failsafe, since the API will take very long and consume lots of credits for longer texts const speechInput: SpeechInputSchema = { elevenKey: elevenLabsApiKey, @@ -82,7 +83,7 @@ async function fetchApiElevenlabsSpeech(text: string, elevenLabsApiKey: string, ...(streaming && { streaming: true, streamOptimization: 4 }), }; - const response = await fetch('/api/elevenlabs/speech', { + const response = await frontendSideFetch('/api/elevenlabs/speech', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(speechInput), diff --git a/src/modules/llms/vendors/unifiedStreamingClient.ts b/src/modules/llms/vendors/unifiedStreamingClient.ts index 259bf8eb9..a245fde91 100644 --- a/src/modules/llms/vendors/unifiedStreamingClient.ts +++ b/src/modules/llms/vendors/unifiedStreamingClient.ts @@ -1,4 +1,5 @@ import { apiAsync } from '~/common/util/trpc.client'; +import { frontendSideFetch } from '~/common/util/clientFetchers'; import type { ChatStreamingFirstOutputPacketSchema, ChatStreamingInputSchema } from '../server/llm.server.streaming'; import type { DLLMId } from '../store-llms'; @@ -57,7 +58,7 @@ export async function unifiedStreamingClient