Aix: move code away from the client

This commit is contained in:
Enrico Ros
2024-09-19 21:37:09 -07:00
parent 13462b6b71
commit e407eba674
2 changed files with 87 additions and 89 deletions
+82
View File
@@ -0,0 +1,82 @@
// Future Testing Code
//
// const sampleFC: boolean = false; // aixModel.id.indexOf('models/gemini') === -1;
// const sampleCE: boolean = false; // aixModel.id.indexOf('models/gemini') !== -1;
// if (sampleFC) {
// aixChatGenerate.tools = [
// {
// type: 'function_call',
// function_call: {
// name: 'get_capybara_info_given_name_and_color',
// description: 'Get the info about capybaras. Call one each per name.',
// input_schema: {
// properties: {
// 'name': {
// type: 'string',
// description: 'The name of the capybara',
// enum: ['enrico', 'coolio'],
// },
// 'color': {
// type: 'string',
// description: 'The color of the capybara. Mandatory!!',
// },
// // 'story': {
// // type: 'string',
// // description: 'A fantastic story about the capybara. Please 10 characters maximum.',
// // },
// },
// required: ['name'],
// },
// },
// },
// ];
// }
// if (sampleCE) {
// aixChatGenerate.tools = [
// {
// type: 'code_execution',
// variant: 'gemini_auto_inline',
// },
// ];
// }
/**
* OpenAI-specific moderation check. This is a separate function, as it's not part of the
* streaming chat generation, but it's a pre-check before we even start the streaming.
*
* @returns null if the message is safe, or a string with the user message if it's not safe
*/
/* NOTE: NOT PORTED TO AIX YET, this was the former "LLMS" implementation
async function _openAIModerationCheck(access: OpenAIAccessSchema, lastMessage: VChatMessageIn | null): Promise<string | null> {
if (!lastMessage || lastMessage.role !== 'user')
return null;
try {
const moderationResult = await apiAsync.llmOpenAI.moderation.mutate({
access, text: lastMessage.content,
});
const issues = moderationResult.results.reduce((acc, result) => {
if (result.flagged) {
Object
.entries(result.categories)
.filter(([_, value]) => value)
.forEach(([key, _]) => acc.add(key));
}
return acc;
}, new Set<string>());
// if there's any perceived violation, we stop here
if (issues.size) {
const categoriesText = [...issues].map(c => `\`${c}\``).join(', ');
// do not proceed with the streaming request
return `[Moderation] I an unable to provide a response to your query as it violated the following categories of the OpenAI usage policies: ${categoriesText}.\nFor further explanation please visit https://platform.openai.com/docs/guides/moderation/moderation`;
}
} catch (error: any) {
// as the moderation check was requested, we cannot proceed in case of error
return '[Issue] There was an error while checking for harmful content. ' + error?.toString();
}
// moderation check was successful
return null;
}
*/
+5 -89
View File
@@ -85,11 +85,11 @@ export async function aixChatGenerateContentStreaming(
try {
await aixLLMChatGenerateContent(llmId, aixChatContentGenerateRequest, aixCreateChatGenerateStreamContext(aixContextName, aixContextRef), true, abortSignal,
(update: AixLLMGenerateContentAccumulator, isDone: boolean) => {
({ fragments, generator }: AixLLMGenerateContentAccumulator, isDone: boolean) => {
// typesafe overwrite on all fields (Object.assign, but typesafe)
chatDMessageUpdate.fragments = update.fragments;
chatDMessageUpdate.generator = update.generator;
chatDMessageUpdate.fragments = fragments;
chatDMessageUpdate.generator = generator;
chatDMessageUpdate.pendingIncomplete = !isDone;
// throttle the update - and skip the last done message
@@ -131,8 +131,8 @@ export interface AixLLMGenerateContentAccumulator extends Pick<DMessage, 'fragme
}
/**
* Client side chat generation, with streaming.
* Inputs: llmId, and a well formatted chatGenerate request.
* Generation from an LLM Id,
*
* @throws Error if the LLM is not found or other misconfigurations, but handles most other errors internally.
*/
export async function aixLLMChatGenerateContent<TServiceSettings extends object = {}, TAccess extends AixAPI_Access = AixAPI_Access>(
@@ -309,87 +309,3 @@ async function _aix_LL_ChatGenerateContent(
// return the final accumulated message
return llAccumulator;
}
// Future Testing Code
//
// const sampleFC: boolean = false; // aixModel.id.indexOf('models/gemini') === -1;
// const sampleCE: boolean = false; // aixModel.id.indexOf('models/gemini') !== -1;
// if (sampleFC) {
// aixChatGenerate.tools = [
// {
// type: 'function_call',
// function_call: {
// name: 'get_capybara_info_given_name_and_color',
// description: 'Get the info about capybaras. Call one each per name.',
// input_schema: {
// properties: {
// 'name': {
// type: 'string',
// description: 'The name of the capybara',
// enum: ['enrico', 'coolio'],
// },
// 'color': {
// type: 'string',
// description: 'The color of the capybara. Mandatory!!',
// },
// // 'story': {
// // type: 'string',
// // description: 'A fantastic story about the capybara. Please 10 characters maximum.',
// // },
// },
// required: ['name'],
// },
// },
// },
// ];
// }
// if (sampleCE) {
// aixChatGenerate.tools = [
// {
// type: 'code_execution',
// variant: 'gemini_auto_inline',
// },
// ];
// }
/**
* OpenAI-specific moderation check. This is a separate function, as it's not part of the
* streaming chat generation, but it's a pre-check before we even start the streaming.
*
* @returns null if the message is safe, or a string with the user message if it's not safe
*/
/* NOTE: NOT PORTED TO AIX YET, this was the former "LLMS" implementation
async function _openAIModerationCheck(access: OpenAIAccessSchema, lastMessage: VChatMessageIn | null): Promise<string | null> {
if (!lastMessage || lastMessage.role !== 'user')
return null;
try {
const moderationResult = await apiAsync.llmOpenAI.moderation.mutate({
access, text: lastMessage.content,
});
const issues = moderationResult.results.reduce((acc, result) => {
if (result.flagged) {
Object
.entries(result.categories)
.filter(([_, value]) => value)
.forEach(([key, _]) => acc.add(key));
}
return acc;
}, new Set<string>());
// if there's any perceived violation, we stop here
if (issues.size) {
const categoriesText = [...issues].map(c => `\`${c}\``).join(', ');
// do not proceed with the streaming request
return `[Moderation] I an unable to provide a response to your query as it violated the following categories of the OpenAI usage policies: ${categoriesText}.\nFor further explanation please visit https://platform.openai.com/docs/guides/moderation/moderation`;
}
} catch (error: any) {
// as the moderation check was requested, we cannot proceed in case of error
return '[Issue] There was an error while checking for harmful content. ' + error?.toString();
}
// moderation check was successful
return null;
}
*/