Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3a9a6b0273 | |||
| 3b51c39fc3 | |||
| 05293ba557 | |||
| d18d5323aa |
@@ -0,0 +1,33 @@
|
||||
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
|
||||
OPENAI_API_KEY=
|
||||
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
|
||||
OPENAI_API_HOST=
|
||||
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
|
||||
OPENAI_API_ORG_ID=
|
||||
|
||||
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
|
||||
ELEVENLABS_API_KEY=
|
||||
ELEVENLABS_API_HOST=
|
||||
ELEVENLABS_VOICE_ID=
|
||||
|
||||
# [Optional] Prodia credentials on the server side - for optional image generation
|
||||
PRODIA_API_KEY=
|
||||
|
||||
# [Optional, Search] Google Cloud API Key
|
||||
# https://console.cloud.google.com/apis/credentials -
|
||||
GOOGLE_CLOUD_API_KEY=
|
||||
# [Optional, Search] Google Custom/Programmable Search Engine ID
|
||||
# https://programmablesearchengine.google.com/
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
|
||||
# see docs/authentication.md to configure this section
|
||||
AUTH_TYPE=
|
||||
# [At least one required if AUTH_TYPE == credential] You may declare credentials for users from 0 to 99.
|
||||
AUTH_USER_0=
|
||||
AUTH_PASSWORD_0=
|
||||
|
||||
# [Required if AUTH_TYPE == basic and not in development mode] See: https://next-auth.js.org/configuration/options#nextauth_url
|
||||
NEXTAUTH_URL=
|
||||
# [Required if AUTH_TYPE == basic] See: https://next-auth.js.org/configuration/options#secret
|
||||
NEXTAUTH_SECRET=
|
||||
@@ -1,59 +1,3 @@
|
||||
{
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"project": true
|
||||
},
|
||||
"plugins": [
|
||||
"@typescript-eslint"
|
||||
],
|
||||
"extends": [
|
||||
"next/core-web-vitals",
|
||||
"plugin:@typescript-eslint/recommended-type-checked",
|
||||
"plugin:@typescript-eslint/stylistic-type-checked"
|
||||
],
|
||||
"ignorePatterns": [
|
||||
"next.config.js",
|
||||
"node_modules/**/*",
|
||||
"out/**/*",
|
||||
".next/**/*",
|
||||
".vercel/**/*"
|
||||
],
|
||||
"rules": {
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-inferrable-types": "off",
|
||||
"@typescript-eslint/no-namespace": "off",
|
||||
"@typescript-eslint/no-redundant-type-constituents": "off",
|
||||
"@typescript-eslint/no-unsafe-argument": "off",
|
||||
"@typescript-eslint/no-unsafe-assignment": "off",
|
||||
"@typescript-eslint/no-unsafe-call": "off",
|
||||
"@typescript-eslint/no-unsafe-member-access": "off",
|
||||
"@typescript-eslint/no-unsafe-return": "off",
|
||||
"@typescript-eslint/prefer-nullish-coalescing": "off",
|
||||
"@typescript-eslint/unbound-method": "off",
|
||||
|
||||
"@typescript-eslint/array-type": "off",
|
||||
"@typescript-eslint/consistent-type-definitions": "off",
|
||||
"@typescript-eslint/consistent-type-imports": "off",
|
||||
/*"@typescript-eslint/consistent-type-imports": [
|
||||
"warn",
|
||||
{
|
||||
"prefer": "type-imports",
|
||||
"fixStyle": "separate-type-imports"
|
||||
}
|
||||
],*/
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"warn",
|
||||
{
|
||||
"argsIgnorePattern": "^_"
|
||||
}
|
||||
],
|
||||
"@typescript-eslint/no-misused-promises": [
|
||||
2,
|
||||
{
|
||||
"checksVoidReturn": {
|
||||
"attributes": false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
@@ -26,8 +26,7 @@ yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env
|
||||
.env.*
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"singleAttributePerLine": false,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "all",
|
||||
"endOfLine": "lf",
|
||||
"printWidth": 160
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ ENV PATH $PATH:/usr/src/app/node_modules/.bin
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY package*.json prisma/ ./
|
||||
COPY package*.json ./
|
||||
|
||||
# CI and release builds should use npm ci to fully respect the lockfile.
|
||||
# Local development may use npm install for opportunistic package updates.
|
||||
@@ -34,7 +34,6 @@ WORKDIR /usr/src/app
|
||||
# Include only the release build and production packages.
|
||||
COPY --from=build-target /usr/src/app/node_modules node_modules
|
||||
COPY --from=build-target /usr/src/app/.next .next
|
||||
COPY --from=build-target /usr/src/app/public public
|
||||
|
||||
# Expose port 3000 for the application to listen on
|
||||
EXPOSE 3000
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# `BIG-AGI` 🤖💬
|
||||
|
||||
Welcome to `big-AGI` 👋 your personal AGI application
|
||||
powered by OpenAI GPT-4 and beyond. Designed for smart humans and super-heroes,
|
||||
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
|
||||
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
|
||||
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
|
||||
data Rendering, AGI functions, chats and much more. Comes with plenty of `#big-AGI-energy` 🚀
|
||||
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
|
||||
|
||||
[](https://big-agi.com)
|
||||
|
||||
@@ -11,26 +11,27 @@ Or fork & run on Vercel
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
|
||||
|
||||
## ✨ Key Features 👊
|
||||
## Useful 👊
|
||||
|
||||

|
||||
|
||||
- **AI Personas**
|
||||
- **Polished UI**: installable web app, mobile-friendly, token counters, etc.
|
||||
- **Fast UX**: Microphone, Camera OCR, Drag files, Voice Synthesis
|
||||
- **Models**: [OpenAI](https://platform.openai.com/overview), [Anthropic](https://www.anthropic.com/product), [Azure](https://oai.azure.com/), [OpenRouter](https://openrouter.ai/), [Local models](https://github.com/oobabooga/text-generation-webui), and more
|
||||
- **Private**: use your own API keys and self-host if you like
|
||||
- **Advanced**: PDF import & Summarization, code execution
|
||||
- **Integrations**: ElevenLabs, Helicone, Paste.gg, Prodia and more
|
||||
- Engaging AI Personas
|
||||
- Clean UX, w/ tokens counters
|
||||
- Privacy: user-owned API keys and localStorage
|
||||
- Human I/O: Advanced voice support (TTS, STT)
|
||||
- Machine I/O: PDF import & Summarization, code execution
|
||||
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
|
||||
- Coming up: automatic-AGI reasoning
|
||||
|
||||
## 💖 Support
|
||||
## Support 🙌
|
||||
|
||||
[//]: # ([](https://discord.gg/MkH4qj2Jp9))
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
|
||||
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
|
||||
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
|
||||
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
|
||||
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
|
||||
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
|
||||
energy!
|
||||
* send PRs! ...
|
||||
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
|
||||
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
|
||||
@@ -39,84 +40,53 @@ Or fork & run on Vercel
|
||||
|
||||
<br/>
|
||||
|
||||
## 🧠 Latest Drops
|
||||
## Latest Drops 🚀
|
||||
|
||||
#### Next
|
||||
#### 🚨 May: mature #big-agi-energy
|
||||
|
||||
- **Cloudflare API Gateway** support
|
||||
- **Helicone for Anthropic** support
|
||||
- **Text Tools** - incl. highlight differences
|
||||
- 🎉 **Authentication** basic user authentication framework
|
||||
|
||||
#### 1.4.0: Sept/Oct: scale OUT
|
||||
#### April: #big-agi-energy grows
|
||||
|
||||
- **Expanded Model Support**: Azure and [OpenRouter](https://openrouter.ai/docs#models) models, including gpt-4-32k
|
||||
- **Share and clone** conversations with public links
|
||||
- Removed the 20 chats hard limit ([Ashesh3](https://github.com/enricoros/big-agi/pull/158))
|
||||
- Latex Rendering
|
||||
- Augmented Chat modes (Labs)
|
||||
|
||||
#### July/Aug: More Better Faster
|
||||
|
||||
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
|
||||
- **Anthropic models** support, e.g. Claude
|
||||
- **Backup/Restore** - save chats, and restore them later
|
||||
- **[Local model support with Oobabooga server](docs/config-local-oobabooga)** - run your own LLMs!
|
||||
- **Flatten conversations** - conversations summarizer with 4 modes
|
||||
- **Fork conversations** - create a new chat, to experiment with different endings
|
||||
- New commands: /s to add a System message, and /a for an Assistant message
|
||||
- New Chat modes: Write-only - just appends the message, without assistant response
|
||||
- Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
|
||||
- Fixes on the HTML block - particularly useful to see error pages
|
||||
|
||||
#### June: scale UP
|
||||
|
||||
- **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
|
||||
- **Cleaner UI** - with rationalized Settings, Modals, and Configurators
|
||||
- **Dynamic Models Configurator** - easy connection with different model vendors
|
||||
- **Multiple Model Vendors Support** framework to support many LLM vendors
|
||||
- **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
|
||||
- Support for GPT-4-32k
|
||||
- Improved Dialogs and Messages
|
||||
- Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
|
||||
|
||||
#### April / May: more #big-agi-energy
|
||||
|
||||
- **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
|
||||
Search
|
||||
- **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
|
||||
- **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
|
||||
- **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
|
||||
- **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
|
||||
- **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
|
||||
- **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- Rendering: Markdown, SVG, improved Code blocks
|
||||
- Integrations: OpenAI organization ID
|
||||
- [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
|
||||
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
|
||||
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
|
||||
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
|
||||
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
|
||||
- 🎉 **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
|
||||
- 🎉 **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- 🎉 **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- 🎉 **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- 🎉 **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
|
||||
- 🎉 **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- 🎉 Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- 🎉 Rendering: Markdown, SVG, improved Code blocks
|
||||
- 🎉 Integrations: OpenAI organization ID
|
||||
- 🎉 [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
|
||||
[awesome-agi](https://github.com/enricoros/awesome-agi)
|
||||
- [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
|
||||
- 🎉 [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
|
||||
<!-- p><a href="docs/pixels/gif_typing_040123.gif"><img src="docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
|
||||
|
||||
#### March: first release
|
||||
|
||||
- **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
|
||||
- **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
|
||||
- **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
|
||||
- **Syntax highlighting** - for multiple languages 🌈
|
||||
- **Code Execution: Sandpack** -
|
||||
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
|
||||
- Chat with GPT-4 and 3.5 Turbo 🧠💨
|
||||
- Real-time streaming of AI responses ⚡
|
||||
- **Voice Input** 🎙️ - works great on Chrome / Windows
|
||||
- Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
|
||||
- Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
|
||||
- 🎉 **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
|
||||
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
|
||||
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
|
||||
- 🎉 **Syntax highlighting** - for multiple languages 🌈
|
||||
- 🎉 **Code Execution: Sandpack
|
||||
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
|
||||
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
|
||||
- 🎉 Real-time streaming of AI responses ⚡
|
||||
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
|
||||
- 🎉 Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
|
||||
- 🎉 Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
|
||||
- 🌙 Dark model - Wide mode ⛶
|
||||
|
||||
<br/>
|
||||
|
||||
### Basic Authentication for public deployments 🔐
|
||||
|
||||
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API key (`OPENAI_API_KEY`), you can [set up basic authentication.](/docs/authentication.md).
|
||||
|
||||
## Why this? 💡
|
||||
|
||||
Because the official Chat ___lacks important features___, is ___more limited than the api___, at times
|
||||
@@ -127,7 +97,7 @@ with features that matter to them.
|
||||
|
||||

|
||||
|
||||
## Develop 🧩
|
||||
## Code 🧩
|
||||
|
||||

|
||||

|
||||
@@ -147,46 +117,10 @@ Now the app should be running on `http://localhost:3000`
|
||||
### Integrations:
|
||||
|
||||
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Settings > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
|
||||
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
|
||||
|
||||
## Deploy with Docker 🐳
|
||||
|
||||
For more detailed information on deploying with Docker, please refer to the [docker deployment documentation](docs/deploy-docker.md).
|
||||
|
||||
### 🔧 Locally built image
|
||||
|
||||
> Firstly, write all your API keys and env vars to an `.env` file, and make sure the env file is using *both build and run*.
|
||||
> See [docs/environment-variables.md](docs/environment-variables.md) for a list of all environment variables.
|
||||
|
||||
```bash
|
||||
|
||||
```bash
|
||||
docker build -t big-agi .
|
||||
docker run --detach 'big-agi'
|
||||
```
|
||||
|
||||
### Pre-built image
|
||||
|
||||
> Warning: the UI will still be asking for keys, as the image was built without the API keys
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
## Deploy with Cloudflare Pages ☁️
|
||||
|
||||
Please refer to the [Cloudflare deployment documentation](docs/deploy-cloudflare.md).
|
||||
|
||||
## Deploy with Vercel 🚀
|
||||
|
||||
Create your GitHub fork, create a Vercel project over that fork, and deploy it. Or press the button below for convenience.
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
|
||||
|
||||
|
||||
|
||||
<br/>
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
@@ -198,4 +132,4 @@ This project is licensed under the MIT License.
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/issues))
|
||||
|
||||
Made with 💙
|
||||
Made with 💙
|
||||
@@ -1,52 +0,0 @@
|
||||
import { createEmptyReadableStream, safeErrorString, serverFetchOrThrow } from '~/server/wire';
|
||||
|
||||
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
|
||||
|
||||
|
||||
/* NOTE: Why does this file even exist?
|
||||
|
||||
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
|
||||
and that would force us to use base64 encoding for the audio data, which would be a waste of
|
||||
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
|
||||
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
|
||||
and client-side vs. the tRPC implementation. So at lease we recycle the input structures.
|
||||
|
||||
*/
|
||||
const handler = async (req: Request) => {
|
||||
try {
|
||||
|
||||
// construct the upstream request
|
||||
const {
|
||||
elevenKey, text, voiceId, nonEnglish,
|
||||
streaming, streamOptimization,
|
||||
} = speechInputSchema.parse(await req.json());
|
||||
const path = `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}` + (streaming ? `/stream?optimize_streaming_latency=${streamOptimization || 1}` : '');
|
||||
const { headers, url } = elevenlabsAccess(elevenKey, path);
|
||||
const body: ElevenlabsWire.TTSRequest = {
|
||||
text: text,
|
||||
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
|
||||
};
|
||||
|
||||
// elevenlabs POST
|
||||
const upstreamResponse: Response = await serverFetchOrThrow(url, 'POST', headers, body);
|
||||
|
||||
// NOTE: this is disabled, as we pass-through what we get upstream for speed, as it is not worthy
|
||||
// to wait for the entire audio to be downloaded before we send it to the client
|
||||
// if (!streaming) {
|
||||
// const audioArrayBuffer = await upstreamResponse.arrayBuffer();
|
||||
// return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
// }
|
||||
|
||||
// stream the data to the client
|
||||
const audioReadableStream = upstreamResponse.body || createEmptyReadableStream();
|
||||
return new Response(audioReadableStream, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
|
||||
} catch (error: any) {
|
||||
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + error.cause : '');
|
||||
console.log(`api/elevenlabs/speech: fetch issue: ${fetchOrVendorError}`);
|
||||
return new Response(`[Issue] elevenlabs: ${fetchOrVendorError}`, { status: 500 });
|
||||
}
|
||||
};
|
||||
|
||||
export const runtime = 'edge';
|
||||
export { handler as POST };
|
||||
@@ -1,2 +0,0 @@
|
||||
export const runtime = 'edge';
|
||||
export { openaiStreamingRelayHandler as POST } from '~/modules/llms/transports/server/openai/openai.streaming';
|
||||
@@ -1,19 +0,0 @@
|
||||
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterEdge } from '~/server/api/trpc.router';
|
||||
import { createTRPCFetchContext } from '~/server/api/trpc.server';
|
||||
|
||||
const handlerEdgeRoutes = (req: Request) =>
|
||||
fetchRequestHandler({
|
||||
router: appRouterEdge,
|
||||
endpoint: '/api/trpc-edge',
|
||||
req,
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? '<no-path>'}:`, error)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
export const runtime = 'edge';
|
||||
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
|
||||
@@ -1,19 +0,0 @@
|
||||
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterNode } from '~/server/api/trpc.router';
|
||||
import { createTRPCFetchContext } from '~/server/api/trpc.server';
|
||||
|
||||
const handlerNodeRoutes = (req: Request) =>
|
||||
fetchRequestHandler({
|
||||
router: appRouterNode,
|
||||
endpoint: '/api/trpc-node',
|
||||
req,
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? '<no-path>'}:`, error)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
export const runtime = 'nodejs';
|
||||
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
|
||||
@@ -1,10 +0,0 @@
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:main
|
||||
ports:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
@@ -0,0 +1,37 @@
|
||||
### Authentication with NextAuth.js 🔐
|
||||
|
||||
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API
|
||||
key (`OPENAI_API_KEY`), you can set up basic authentication using [NextAuth.js](https://next-auth.js.org/).
|
||||
|
||||
#### Configuration
|
||||
|
||||
Update your `.env` file or Environment Variables with the following variables:
|
||||
|
||||
```
|
||||
# [Optional] Set the authentication type to "credential" to enable basic username/password authentication
|
||||
AUTH_TYPE=credential
|
||||
|
||||
# [Required if AUTH_TYPE == credential] Define credentials for users - you can declare up to 100 users
|
||||
AUTH_USER_0=your_username
|
||||
AUTH_PASSWORD_0=your_password
|
||||
AUTH_USER_1=...
|
||||
AUTH_PASSWORD_1=...
|
||||
...
|
||||
|
||||
# [Required if AUTH_TYPE == credential and *not in development mode*] See: https://next-auth.js.org/configuration/options#nextauth_url
|
||||
NEXTAUTH_URL=https://example.com
|
||||
|
||||
# [Required if AUTH_TYPE == credential] See: https://next-auth.js.org/configuration/options#secret
|
||||
NEXTAUTH_SECRET=your_nextauth_secret
|
||||
```
|
||||
|
||||
You can add multiple users by incrementing the index, e.g., `AUTH_USER_1`, `AUTH_PASSWORD_1`, and so on. They do not
|
||||
need to be contiguous.
|
||||
|
||||
#### Usage
|
||||
|
||||
Once you have set up basic authentication, users will be prompted to enter their credentials when accessing the app.
|
||||
Only users with valid credentials will be able to use the app and make requests to the OpenAI API.
|
||||
|
||||
For more information on configuring and using NextAuth.js, refer to
|
||||
the [official documentation](https://next-auth.js.org/).
|
||||
@@ -1,87 +0,0 @@
|
||||
# Configuring Azure OpenAI Service with `big-AGI`
|
||||
|
||||
The entire procedure takes about 5 minutes and involves creating an Azure account,
|
||||
setting up the Azure OpenAI service, deploying models, and configuring `big-AGI`
|
||||
to access these models.
|
||||
|
||||
Please note that Azure operates on a 'pay-as-you-go' pricing model and requires
|
||||
credit card information tied to a 'subscription' to the Azure service.
|
||||
|
||||
## Configuring `big-AGI`
|
||||
|
||||
If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follows:
|
||||
|
||||
1. Launch the `big-AGI` application
|
||||
2. Go to the **Models** settings
|
||||
3. Add a Vendor and select **Azure OpenAI**
|
||||
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
|
||||
- Enter the API Key (e.g., 'fd5...........................ba')
|
||||
|
||||
The deployed models are now available in the application. If you don't have a configured
|
||||
Azure OpenAI service instance, continue with the next section.
|
||||
|
||||
## Setting Up Azure
|
||||
|
||||
### Step 1: Azure Account & Subscription
|
||||
|
||||
1. Create an account on [azure.microsoft.com](https://azure.microsoft.com/en-us/)
|
||||
2. Go to the [Azure Portal](https://portal.azure.com/)
|
||||
3. Click on **Create a resource** in the top left corner
|
||||
4. Search for **Subscription** and select **[Create Subscription](https://portal.azure.com/#create/Microsoft.Subscription)**
|
||||
- Fill in the required fields and click on **Create**
|
||||
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
|
||||
|
||||
### Step 2: Apply for Azure OpenAI Service
|
||||
|
||||
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
|
||||
and acceptance should be quick (even as low as minutes).
|
||||
|
||||
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
|
||||
2. Click on **Apply for access**
|
||||
- Fill in the required fields (including the subscription ID) and click on **Apply**
|
||||
|
||||
Once your application is accepted, you can create OpenAI resources on Azure.
|
||||
|
||||
### Step 3: Create Azure OpenAI Resource
|
||||
|
||||
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
|
||||
|
||||
1. Click on **Create a resource** in the top left corner
|
||||
2. Search for **OpenAI** and select **[Create OpenAI](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)**
|
||||
3. Fill in the necessary fields on the **Create OpenAI** page
|
||||

|
||||
- Select the subscription
|
||||
- Select a resource group or create a new one
|
||||
- Select the region. Note that the region determines the available models.
|
||||
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
- Name the service (e.g., `your-openai-api-1234`)
|
||||
- Select a pricing tier (e.g., `S0` for standard)
|
||||
- Select: "All networks, including the internet, can access this resource."
|
||||
- Click on **Review + create** and then **Create**
|
||||
|
||||
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
|
||||
the OpenAI Service instance page to get this information.
|
||||
|
||||
- Click on **Go to resource**
|
||||
- Click on **Develop**
|
||||
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
|
||||
- Copy `KEY 1`
|
||||
|
||||
### Step 4: Deploy Models
|
||||
|
||||
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
|
||||
|
||||
1. Click on **Model Deployments > Manage Deployments**
|
||||
2. Click on **+Create New Deployment**
|
||||

|
||||
- Select the model you want to deploy
|
||||
- Optionally select a version
|
||||
- name the model, e.g., `gpt4-32k-0613`
|
||||
|
||||
Repeat as necessary for each model you want to deploy.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Azure OpenAI Service Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
|
||||
- [Guide: Create an Azure OpenAI Resource](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
|
||||
- [Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
@@ -1,34 +0,0 @@
|
||||
# Local LLM integration with `localai`
|
||||
|
||||
Integrate local Large Language Models (LLMs) with [LocalAI](https://localai.io).
|
||||
|
||||
_Last updated Nov 7, 2023_
|
||||
|
||||
## Instructions
|
||||
|
||||
### LocalAI installation and configuration
|
||||
|
||||
Follow the guide at: https://localai.io/basics/getting_started/
|
||||
|
||||
For instance with [Use luna-ai-llama2 with docker compose](https://localai.io/basics/getting_started/#example-use-luna-ai-llama2-model-with-docker-compose):
|
||||
|
||||
- clone LocalAI
|
||||
- get the model
|
||||
- copy the prompt template
|
||||
- start docker
|
||||
- -> the server will be listening on `localhost:8080`
|
||||
- verify it works by going to [http://localhost:8080/v1/models](http://localhost:8080/v1/models) on
|
||||
your browser and seeing listed the model you downloaded
|
||||
|
||||
### Integrating LocalAI with big-AGI
|
||||
|
||||
- Go to Models > Add a model source of type: **LocalAI**
|
||||
- Enter the address: `http://localhost:8080` (default)
|
||||
- If running remotely, replace localhost with the IP of the machine. Make sure to use the **IP:Port** format
|
||||
- Load the models
|
||||
- Select model & Chat
|
||||
|
||||
> NOTE: LocalAI does not list details about the mdoels. Every model is assumed to be
|
||||
> capable of chatting, and with a context window of 4096 tokens.
|
||||
> Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/transports/server/openai/models.data.ts)
|
||||
> file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
|
||||
@@ -1,54 +0,0 @@
|
||||
# Local LLM Integration with `text-web-ui` :llama:
|
||||
|
||||
Integrate local Large Language Models (LLMs) with
|
||||
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
|
||||
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
|
||||
|
||||
_Last updated on Nov 7, 2023_
|
||||
|
||||
### Components
|
||||
|
||||
The implementation of local LLMs involves the following components:
|
||||
|
||||
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
|
||||
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
|
||||
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
|
||||
|
||||
## Instructions
|
||||
|
||||
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
|
||||
|
||||
### Text-web-ui Installation & Configuration:
|
||||
|
||||
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation).
|
||||
- Download the one-click installer, extract it, and double-click on "start" - ~10 minutes
|
||||
- Close it afterwards as we need to modify the startup flags
|
||||
2. Enable the **openai extension**
|
||||
- Edit `CMD_FLAGS.txt`
|
||||
- Make sure that `--listen --extensions openai` is present and uncommented
|
||||
3. Restart text-generation-webui
|
||||
- Double-click on "start"
|
||||
- You should see something like:
|
||||
```
|
||||
2023-11-07 21:24:26 INFO:Loading the extension "openai"...
|
||||
2023-11-07 21:24:27 INFO:OpenAI compatible API URL:
|
||||
|
||||
http://0.0.0.0:5000/v1
|
||||
```
|
||||
- The OpenAI API is now running on port 5000, on both localhost (127.0.0.1) and your network IP address
|
||||
4. Load your first model
|
||||
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
|
||||
- Switch to the **Model** tab
|
||||
- Download, for instance, `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - 4.3 GB
|
||||
- Select the model once it's loaded
|
||||
|
||||
### Integrating text-web-ui with big-AGI:
|
||||
1. Integrating Text-Generation-WebUI with big-AGI:
|
||||
- Go to Models > Add a model source of type: **Oobabooga**
|
||||
- Enter the address: `http://127.0.0.1:5000`
|
||||
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
|
||||
- Load the models
|
||||
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
|
||||
- Select model & Chat
|
||||
|
||||
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
|
||||
@@ -1,81 +0,0 @@
|
||||
# `Ollama` x `big-AGI` :llama:
|
||||
|
||||
This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama.ai/library) to
|
||||
[big-AGI](https://big-agi.com) for a professional AI/AGI operation and a good UI/Conversational
|
||||
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
|
||||
editing tools, models switching, personas, and more.
|
||||
|
||||

|
||||
|
||||
## Quick Integration Guide
|
||||
|
||||
1. **Ensure Ollama API Server is Running**: Before starting, make sure your Ollama API server is up and running.
|
||||
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**.
|
||||
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`).
|
||||
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models.
|
||||
5. **Start Using AI Personas**: Select an Ollama model and begin interacting with AI personas tailored to your needs.
|
||||
|
||||
### Ollama: installation and Setup
|
||||
|
||||
For detailed instructions on setting up the Ollama API server, please refer to the
|
||||
[Ollama download page](https://ollama.ai/download) and [instructions for linux](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
|
||||
|
||||
### Visual Guide
|
||||
|
||||
* After adding the `Ollama` model vendor, entering the IP address of an Ollama server, and refreshing models:
|
||||
<img src="pixels/config-ollama-1-models.png" alt="config-local-ollama-1-models.png" style="max-width: 320px;">
|
||||
|
||||
* The `Ollama` admin panel, with the `Pull` button highlighted, after pulling the "Yi" model:
|
||||
<img src="pixels/config-ollama-2-admin-pull.png" alt="config-local-ollama-2-admin-pull.png" style="max-width: 320px;">
|
||||
|
||||
* You can now switch model/persona dynamically and text/voice chat with the models:
|
||||
<img src="pixels/config-ollama-3-chat.png" alt="config-local-ollama-3-chat.png" style="max-width: 320px;">
|
||||
|
||||
### Advanced: Model parameters
|
||||
|
||||
For users who wish to delve deeper into advanced settings, `big-AGI` offers additional configuration options, such
|
||||
as the model temperature, maximum tokens, etc.
|
||||
|
||||
### Advanced: Ollama under a reverse proxy
|
||||
|
||||
You can elegantly expose your Ollama server to the internet (and thus make it easier to use from your server-side
|
||||
big-AGI deployments) by exposing it on an http/https URL, such as: `https://yourdomain.com/ollama`
|
||||
|
||||
On Ubuntu Servers, you will need to install `nginx` and configure it to proxy requests to Ollama.
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install nginx
|
||||
sudo apt install certbot python3-certbot-nginx
|
||||
sudo certbot --nginx -d yourdomain.com
|
||||
```
|
||||
|
||||
Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and add the following block:
|
||||
|
||||
```nginx
|
||||
location /ollama/ {
|
||||
proxy_pass http://localhost:11434;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
|
||||
# Disable buffering for the streaming responses
|
||||
proxy_buffering off;
|
||||
}
|
||||
```
|
||||
|
||||
Reach out to our community if you need help with this.
|
||||
|
||||
### Community and Support
|
||||
|
||||
Join our community to share your experiences, get help, and discuss best practices:
|
||||
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
|
||||
---
|
||||
|
||||
`big-AGI` is committed to providing a powerful, intuitive, and privacy-respecting AI experience.
|
||||
We are excited for you to explore the possibilities with Ollama models. Happy creating!
|
||||
@@ -1,31 +0,0 @@
|
||||
# OpenRouter Configuration
|
||||
|
||||
[OpenRouter](https://openrouter.ai) is a standalone, premium service
|
||||
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
|
||||
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
|
||||
This document details the process of integrating OpenRouter with big-AGI.
|
||||
|
||||
### 1. OpenRouter Account Setup and API Key Generation
|
||||
|
||||
1. Register for an OpenRouter account at [openrouter.ai](https://openrouter.ai) by clicking on Sign In > Continue with Google.
|
||||
2. Top up your account (minimum $5) by navigating to [openrouter.ai/account](https://openrouter.ai/account) > Add Credits > Pay with Stripe.
|
||||
3. Generate an API key at [openrouter.ai/keys](https://openrouter.ai/keys) > API Key > Generate API Key.
|
||||
- **Remember to copy and securely store your API key** - the key will not be displayed again and will be in the format `sk-or-v1-...`.
|
||||
- Keep the key confidential as it can be used to expend your credits.
|
||||
|
||||
### 2. Integrating OpenRouter with big-AGI
|
||||
|
||||
1. Launch big-AGI, and navigate to the AI **Models** settings.
|
||||
2. Add a Vendor, and select **OpenRouter**.
|
||||

|
||||
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
|
||||

|
||||
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
|
||||
|
||||
### Pricing
|
||||
|
||||
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
|
||||
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
|
||||
|
||||
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
|
||||
credits - a single prompt may cost $1 or more, at the time of writing.
|
||||
@@ -1,68 +1,55 @@
|
||||
# Deploying a Next.js App on Cloudflare Pages
|
||||
# Deploying Next.js App on Cloudflare Pages
|
||||
|
||||
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
|
||||
>
|
||||
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
|
||||
> edge functions, so we cannot deploy this project on Cloudflare Pages.
|
||||
>
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
|
||||
> parts of this application will not work.
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
|
||||
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
|
||||
>
|
||||
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
|
||||
> and convert the Node runtime to Edge runtime once Prisma supports it.
|
||||
Follow these steps to deploy your Next.js app on Cloudflare Pages. This guide is based on
|
||||
the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
with a few additional steps.
|
||||
|
||||
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
|
||||
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
with some additional steps.
|
||||
## Step 1: Fork the Repository
|
||||
|
||||
## Step 1: Repository Forking
|
||||
Fork the repository to your own GitHub account.
|
||||
|
||||
Fork the repository to your personal GitHub account.
|
||||
## Step 2: Connect Cloudflare Pages to Your GitHub Account
|
||||
|
||||
## Step 2: Linking Cloudflare Pages to Your GitHub Account
|
||||
1. Go to the Cloudflare Pages section and click the `Create a project` button.
|
||||
2. Click `Connect To Git` and give Cloudflare Pages either All GitHub account Repo access or selected Repo access. We
|
||||
recommend using selected Repo access and selecting the forked repo from step 1.
|
||||
|
||||
1. Navigate to the Cloudflare Pages section and click on the `Create a project` button.
|
||||
2. Click `Connect To Git` and grant Cloudflare Pages access to either all GitHub account repositories or selected repositories.
|
||||
We recommend using selected Repo access and selecting the forked repository from step 1.
|
||||
## Step 3: Setup Build and Deployments
|
||||
|
||||
## Step 3: Configuring Build and Deployments
|
||||
1. Once you select the forked GitHub repo, click the `Begin Setup` button.
|
||||
2. On this page, set your `Project name`, `Production branch` (e.g., main), and your Build settings.
|
||||
3. Select `Next.js` from the `Framework preset` dropdown menu.
|
||||
4. Leave the preset filled Build command and Build output directory as preset defaults.
|
||||
5. Set `Environmental variables` (advanced) on this page to configure some variables as follows:
|
||||
|
||||
1. After selecting the forked GitHub repository, click the **Begin Setup** button
|
||||
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
|
||||
3. Choose `Next.js` from the **Framework preset** dropdown menu
|
||||
4. Set a custom **Build Command**:
|
||||
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
|
||||
- see the tradeoffs for this deletion on the notice at the top
|
||||
5. Keep the **Build output directory** as default
|
||||
6. Click the **Save and Deploy** button
|
||||
| Variable | Value |
|
||||
|---------------------------|---------|
|
||||
| `GO_VERSION` | `1.16` |
|
||||
| `NEXT_TELEMETRY_DISABLED` | `1` |
|
||||
| `NODE_VERSION` | `17` |
|
||||
| `PHP_VERSION` | `7.4` |
|
||||
| `PYTHON_VERSION` | `3.7` |
|
||||
| `RUBY_VERSION` | `2.7.1` |
|
||||
|
||||
## Step 4: Monitoring the Deployment Process
|
||||
6. Click the `Save and Deploy` button.
|
||||
|
||||
Observe the process as it initializes your build environment, clones the GitHub repository, builds the application, and deploys it
|
||||
to the Cloudflare Network. Once complete, proceed to the project you created.
|
||||
## Step 4: Monitor the Deployment Process
|
||||
|
||||
## Step 5: Required: Set the `nodejs_compat` compatibility flag
|
||||
Watch the process run to initialize your build environment, clone the GitHub repo, build the application, and deploy to
|
||||
the Cloudflare Network. Once that is done, proceed to the project you created.
|
||||
|
||||
1. Navigate to the [Settings > Functions](https://dash.cloudflare.com/?to=/:account/pages/view/:pages-project/settings/functions) page of your newly created project
|
||||
2. Scroll to `Compatibility flags` and enter "`nodejs_compat`" for both **Production** and **Preview** environments.
|
||||
It should look like this: 
|
||||
3. Re-deploy your project for the new flags to take effect
|
||||
|
||||
## Step 6: (Optional) Custom Domain Configuration
|
||||
## Step 5: Set up a Custom Domain
|
||||
|
||||
Use the `Custom domains` tab to set up your domain via CNAME.
|
||||
|
||||
## Step 7: (Optional) Access Policy and Web Analytics Configuration
|
||||
## Step 6: Configure Access Policy and Web Analytics
|
||||
|
||||
Navigate to the `Settings` page and enable the following settings:
|
||||
Go to the `Settings` page and enable the following settings:
|
||||
|
||||
1. Access Policy: Restrict [preview deployments](https://developers.cloudflare.com/pages/platform/preview-deployments/)
|
||||
to members of your Cloudflare account via one-time pin and restrict primary `*.YOURPROJECT.pages.dev` domain.
|
||||
Refer to [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
|
||||
for more details.
|
||||
See [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
|
||||
for more information.
|
||||
2. Enable Web Analytics.
|
||||
|
||||
Congratulations! You have successfully deployed your Next.js app on Cloudflare Pages.
|
||||
Now you have successfully deployed your Next.js app on Cloudflare Pages.
|
||||
@@ -1,47 +1,26 @@
|
||||
# Deploying `big-AGI` with Docker
|
||||
# Deploy `big-AGI` with Docker 🐳
|
||||
|
||||
Utilize Docker containers to deploy the big-AGI application for an efficient and automated deployment process.
|
||||
Docker ensures faster development cycles, easier collaboration, and seamless environment management.
|
||||
Deploy the big-AGI application using Docker containers for a consistent, efficient, and automated deployment process. Enjoy faster development cycles, easier collaboration, and seamless environment management. 🚀
|
||||
|
||||
## 🔧 Local Build & Deployment
|
||||
Docker is a platform for developing, packaging, and deploying applications as lightweight containers, ensuring consistent behavior across environments.
|
||||
|
||||
1. **Clone big-AGI**
|
||||
2. **Build the Docker Image**: Build a local docker image from the provided Dockerfile. The command is typically `docker build -t big-agi .`
|
||||
3. **Run the Docker Container**: Start a Docker container using the built image with the command `docker run -d -p 3000:3000 big-agi`
|
||||
## `big-AGI` Docker Components
|
||||
|
||||
> Note: If the Docker container is built without setting environment variables,
|
||||
> the frontend UI will be unaware of them, despite the backend being able to use them at runtime.
|
||||
> Therefore, ensure all necessary environment variables are set during the build process.
|
||||
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a Docker image of the application.
|
||||
|
||||
## Documentation
|
||||
### Dockerfile
|
||||
|
||||
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a
|
||||
Docker image of the application.
|
||||
The [`Dockerfile`](../Dockerfile) sets up a Node.js environment, installs dependencies, and creates a production-ready version of the application.
|
||||
|
||||
### Dockerfile: Containers
|
||||
### GitHub Actions Workflow
|
||||
|
||||
> A local build is recommended, as the 'ghcr' container is built without environment variables.
|
||||
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates building and publishing the Docker image when changes are pushed to the `main` branch.
|
||||
|
||||
The [`Dockerfile`](../Dockerfile) is used to create a Docker image. It establishes a Node.js environment,
|
||||
installs dependencies, and creates a production-ready version of the application as a local container.
|
||||
## Deploy Steps
|
||||
|
||||
### GitHub Actions workflow
|
||||
1. Clone the big-AGI repository
|
||||
2. Navigate to the project directory
|
||||
3. Build the Docker image using the provided Dockerfile
|
||||
4. Run the Docker container with the built image
|
||||
|
||||
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates the
|
||||
building and publishing of the Docker images to the GitHub Container Registry (ghcr) when changes are
|
||||
pushed to the `main` branch.
|
||||
|
||||
### Docker Compose
|
||||
|
||||
In addition, the repository also includes a `docker-compose.yaml` file, configured to run the pre-built
|
||||
'ghcr image'. This file is used to define the `big-agi` service, the ports to expose, and the command to run.
|
||||
|
||||
If you have Docker Compose installed, you can run the Docker container with `docker-compose up`
|
||||
to pull the Docker image (if it hasn't been pulled already) and start a Docker container. If you want to
|
||||
update the image to the latest version, you can run `docker-compose pull` before starting the service.
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Leverage Docker's capabilities for a reliable and efficient big-AGI deployment.
|
||||
Embrace the benefits of Docker for a reliable and efficient big-AGI deployment. 🎉
|
||||
@@ -1,111 +0,0 @@
|
||||
# Environment Variables
|
||||
|
||||
This document provides an explanation of the environment variables used in the big-AGI application.
|
||||
|
||||
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/common/types/env.d.ts`](../src/common/types/env.d.ts).
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
Environment variables can be set by creating a `.env` file in the root directory of the project.
|
||||
|
||||
> For Docker deployment, ensure all necessary environment variables are set **both during build and run**.
|
||||
> If the Docker container is built without setting environment variables, the frontend UI will be unaware
|
||||
> of them, despite the backend being able to use them at runtime.
|
||||
|
||||
The following is an example `.env` for copy-paste convenience:
|
||||
|
||||
```bash
|
||||
# Database
|
||||
POSTGRES_PRISMA_URL=
|
||||
POSTGRES_URL_NON_POOLING=
|
||||
|
||||
# LLMs
|
||||
OPENAI_API_KEY=
|
||||
OPENAI_API_HOST=
|
||||
OPENAI_API_ORG_ID=
|
||||
AZURE_OPENAI_API_ENDPOINT=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
ANTHROPIC_API_HOST=
|
||||
OLLAMA_API_HOST=
|
||||
OPENROUTER_API_KEY=
|
||||
|
||||
# Model Observability: Helicone
|
||||
HELICONE_API_KEY=
|
||||
|
||||
# Text-To-Speech
|
||||
ELEVENLABS_API_KEY=
|
||||
ELEVENLABS_API_HOST=
|
||||
ELEVENLABS_VOICE_ID=
|
||||
# Google Custom Search
|
||||
GOOGLE_CLOUD_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
# Text-To-Image
|
||||
PRODIA_API_KEY=
|
||||
```
|
||||
|
||||
## Variables Documentation
|
||||
|
||||
### Database
|
||||
|
||||
To enable features such as Chat Link Shring, you need to connect the backend to a database. We require
|
||||
serverless Postgres, which is available on Vercel, Neon and more.
|
||||
|
||||
Also make sure that you run `npx prisma db:push` to create the initial schema on the database for the
|
||||
first time (or update it on a later stage).
|
||||
|
||||
| Variable | Description |
|
||||
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `POSTGRES_PRISMA_URL` | The URL of the Postgres database used by Prisma - example: `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15` |
|
||||
| `POSTGRES_URL_NON_POOLING` | The URL of the Postgres database without pooling |
|
||||
|
||||
### LLMs
|
||||
|
||||
The following variables when set will enable the corresponding LLMs on the server-side, without
|
||||
requiring the user to enter an API key
|
||||
|
||||
| Variable | Description | Required |
|
||||
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
|
||||
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
|
||||
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
|
||||
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
|
||||
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
|
||||
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
|
||||
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
|
||||
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-ollama.md](config-ollama.md) | |
|
||||
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
|
||||
|
||||
### Model Observability: Helicone
|
||||
|
||||
Helicone provides observability to your LLM calls. It is a paid service, with a generous free tier.
|
||||
It is currently supported for:
|
||||
|
||||
- **Anthropic**: by setting the Helicone API key, Helicone is automatically activated
|
||||
- **OpenAI**: you also need to set `OPENAI_API_HOST` to `oai.hconeai.com`, to enable routing
|
||||
|
||||
| Variable | Description |
|
||||
|--------------------|--------------------------|
|
||||
| `HELICONE_API_KEY` | The API key for Helicone |
|
||||
|
||||
### Specials
|
||||
|
||||
Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:-------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
|
||||
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 279 KiB After Width: | Height: | Size: 283 KiB |
|
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 255 KiB |
|
Before Width: | Height: | Size: 618 KiB After Width: | Height: | Size: 626 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 370 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 8.2 KiB |
|
Before Width: | Height: | Size: 1.6 MiB After Width: | Height: | Size: 3.8 MiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 84 KiB |
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 79 KiB |
|
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 2.6 KiB After Width: | Height: | Size: 3.7 KiB |
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 5.6 KiB After Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 157 KiB After Width: | Height: | Size: 195 KiB |
|
Before Width: | Height: | Size: 156 KiB After Width: | Height: | Size: 192 KiB |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 49 KiB After Width: | Height: | Size: 62 KiB |
@@ -0,0 +1,16 @@
|
||||
import { withAuth } from 'next-auth/middleware';
|
||||
|
||||
import { authType } from '@/modules/authentication/auth.server';
|
||||
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const middleware = !authType ? () => null : withAuth({
|
||||
callbacks: {
|
||||
authorized({ req, token }) {
|
||||
// console.log('authorized', req, token);
|
||||
return !!token;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const config = { matcher: ['/:path*'] };
|
||||
@@ -1,12 +1,16 @@
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig = {
|
||||
const nextConfig = {
|
||||
reactStrictMode: true,
|
||||
modularizeImports: {
|
||||
'@mui/icons-material': {
|
||||
transform: '@mui/icons-material/{{member}}',
|
||||
},
|
||||
env: {
|
||||
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
|
||||
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
|
||||
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
|
||||
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
|
||||
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
|
||||
// for auth only
|
||||
SERVER_AUTH_TYPE: process.env.AUTH_TYPE,
|
||||
},
|
||||
webpack: (config, _options) => {
|
||||
webpack(config, { isServer, dev }) {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
@@ -18,22 +22,6 @@ let nextConfig = {
|
||||
|
||||
return config;
|
||||
},
|
||||
// NOTE: the following shall be replaced by runtime config
|
||||
env: {
|
||||
HAS_SERVER_DB_PRISMA: !!process.env.POSTGRES_PRISMA_URL && !!process.env.POSTGRES_URL_NON_POOLING,
|
||||
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
|
||||
HAS_SERVER_KEY_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
|
||||
HAS_SERVER_KEY_AZURE_OPENAI: !!process.env.AZURE_OPENAI_API_KEY && !!process.env.AZURE_OPENAI_API_ENDPOINT,
|
||||
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
|
||||
HAS_SERVER_HOST_OLLAMA: !!process.env.OLLAMA_API_HOST,
|
||||
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
|
||||
HAS_SERVER_KEY_OPENROUTER: !!process.env.OPENROUTER_API_KEY,
|
||||
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
|
||||
},
|
||||
};
|
||||
|
||||
// conditionally enable the nextjs bundle analyzer
|
||||
if (process.env.ANALYZE_BUNDLE)
|
||||
nextConfig = require('@next/bundle-analyzer')()(nextConfig);
|
||||
|
||||
module.exports = nextConfig;
|
||||
|
||||
@@ -1,71 +1,46 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "1.4.0",
|
||||
"version": "0.9.1",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "next dev --turbo",
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"env:pull": "npx vercel env pull .env.development.local",
|
||||
"postinstall": "prisma generate",
|
||||
"db:push": "prisma db push",
|
||||
"db:studio": "prisma studio"
|
||||
"lint": "next lint"
|
||||
},
|
||||
"dependencies": {
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@emotion/cache": "^11.11.0",
|
||||
"@emotion/react": "^11.11.1",
|
||||
"@emotion/server": "^11.11.0",
|
||||
"@emotion/styled": "^11.11.0",
|
||||
"@mui/icons-material": "^5.14.16",
|
||||
"@mui/joy": "^5.0.0-beta.14",
|
||||
"@next/bundle-analyzer": "~14.0.2",
|
||||
"@prisma/client": "^5.5.2",
|
||||
"@sanity/diff-match-patch": "^3.1.1",
|
||||
"@tanstack/react-query": "^4.36.1",
|
||||
"@trpc/client": "^10.43.3",
|
||||
"@trpc/next": "^10.43.3",
|
||||
"@trpc/react-query": "^10.43.3",
|
||||
"@trpc/server": "^10.43.3",
|
||||
"@vercel/analytics": "^1.1.1",
|
||||
"browser-fs-access": "^0.35.0",
|
||||
"eventsource-parser": "^1.1.1",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"next": "~14.0.2",
|
||||
"pdfjs-dist": "3.11.174",
|
||||
"plantuml-encoder": "^1.4.0",
|
||||
"@emotion/react": "^11.10.8",
|
||||
"@emotion/server": "^11.10.0",
|
||||
"@emotion/styled": "^11.10.8",
|
||||
"@mui/icons-material": "^5.11.16",
|
||||
"@mui/joy": "^5.0.0-alpha.77",
|
||||
"@tanstack/react-query": "^4.29.5",
|
||||
"@vercel/analytics": "^1.0.0",
|
||||
"eventsource-parser": "^1.0.0",
|
||||
"next": "^13.3.2",
|
||||
"pdfjs-dist": "^3.5.141",
|
||||
"next-auth": "^4.21.1",
|
||||
"prismjs": "^1.29.0",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-katex": "^3.0.1",
|
||||
"react-markdown": "^9.0.0",
|
||||
"react-timeago": "^7.2.0",
|
||||
"remark-gfm": "^4.0.0",
|
||||
"superjson": "^2.2.1",
|
||||
"tesseract.js": "^5.0.3",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4",
|
||||
"zustand": "~4.3.9"
|
||||
"react-markdown": "^8.0.7",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"uuid": "^9.0.0",
|
||||
"zustand": "^4.3.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.9.0",
|
||||
"@types/plantuml-encoder": "^1.4.2",
|
||||
"@types/prismjs": "^1.26.3",
|
||||
"@types/react": "^18.2.37",
|
||||
"@types/react-dom": "^18.2.15",
|
||||
"@types/react-katex": "^3.0.3",
|
||||
"@types/react-timeago": "^4.1.6",
|
||||
"@types/uuid": "^9.0.7",
|
||||
"@typescript-eslint/eslint-plugin": "^6.10.0",
|
||||
"@typescript-eslint/parser": "^6.10.0",
|
||||
"eslint": "^8.53.0",
|
||||
"eslint-config-next": "~14.0.2",
|
||||
"prettier": "^3.0.3",
|
||||
"prisma": "^5.5.2",
|
||||
"typescript": "^5.2.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^20.0.0 || ^18.0.0"
|
||||
"@types/node": "^18.16.3",
|
||||
"@types/prismjs": "^1.26.0",
|
||||
"@types/react": "^18.2.0",
|
||||
"@types/react-dom": "^18.2.1",
|
||||
"@types/uuid": "^9.0.1",
|
||||
"eslint": "^8.39.0",
|
||||
"eslint-config-next": "^13.3.2",
|
||||
"prettier": "^2.8.8",
|
||||
"typescript": "^5.0.4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,14 +5,12 @@ import { AppProps } from 'next/app';
|
||||
import { CacheProvider, EmotionCache } from '@emotion/react';
|
||||
import { CssBaseline, CssVarsProvider } from '@mui/joy';
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
|
||||
import { Session as NextAuthSession } from 'next-auth';
|
||||
import { SessionProvider } from 'next-auth/react';
|
||||
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
|
||||
import 'katex/dist/katex.min.css';
|
||||
import '~/common/styles/CodePrism.css'
|
||||
import '~/common/styles/GithubMarkdown.css';
|
||||
import { Brand } from '~/common/brand';
|
||||
import { createEmotionCache, theme } from '~/common/theme';
|
||||
import '@/common/styles/GithubMarkdown.css';
|
||||
import { Brand } from '@/common/brand';
|
||||
import { createEmotionCache, theme } from '@/common/theme';
|
||||
|
||||
|
||||
// Client-side cache, shared for the whole session of the user in the browser.
|
||||
@@ -20,37 +18,30 @@ const clientSideEmotionCache = createEmotionCache();
|
||||
|
||||
export interface MyAppProps extends AppProps {
|
||||
emotionCache?: EmotionCache;
|
||||
session?: NextAuthSession;
|
||||
}
|
||||
|
||||
function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps }: MyAppProps) {
|
||||
const [queryClient] = React.useState(() => new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: {
|
||||
retry: false,
|
||||
},
|
||||
mutations: {
|
||||
retry: false,
|
||||
},
|
||||
},
|
||||
}));
|
||||
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps: { session, ...pageProps } }: MyAppProps) {
|
||||
const [queryClient] = React.useState(() => new QueryClient());
|
||||
return <>
|
||||
<CacheProvider value={emotionCache}>
|
||||
<Head>
|
||||
<title>{Brand.Title.Common}</title>
|
||||
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
|
||||
</Head>
|
||||
{/* Rect-query provider */}
|
||||
<QueryClientProvider client={queryClient}>
|
||||
<CssVarsProvider defaultMode='light' theme={theme}>
|
||||
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
|
||||
<CssBaseline />
|
||||
<Component {...pageProps} />
|
||||
</CssVarsProvider>
|
||||
</QueryClientProvider>
|
||||
{/* Next-Auth provider */}
|
||||
<SessionProvider session={session}>
|
||||
{/* Rect-query provider */}
|
||||
<QueryClientProvider client={queryClient}>
|
||||
{/* JoyUI/Emotion */}
|
||||
<CssVarsProvider defaultMode='light' theme={theme}>
|
||||
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
|
||||
<CssBaseline />
|
||||
<Component {...pageProps} />
|
||||
</CssVarsProvider>
|
||||
</QueryClientProvider>
|
||||
</SessionProvider>
|
||||
</CacheProvider>
|
||||
<VercelAnalytics debug={false} />
|
||||
</>;
|
||||
}
|
||||
|
||||
// enables the react-query api invocation
|
||||
export default apiQuery.withTRPC(MyApp);
|
||||
}
|
||||
@@ -4,14 +4,13 @@ import { default as Document, DocumentContext, DocumentProps, Head, Html, Main,
|
||||
import createEmotionServer from '@emotion/server/create-instance';
|
||||
import { getInitColorSchemeScript } from '@mui/joy/styles';
|
||||
|
||||
import { Brand } from '~/common/brand';
|
||||
import { bodyFontClassName, createEmotionCache } from '~/common/theme';
|
||||
|
||||
import { Brand } from '@/common/brand';
|
||||
import { MyAppProps } from './_app';
|
||||
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
|
||||
|
||||
|
||||
interface MyDocumentProps extends DocumentProps {
|
||||
emotionStyleTags: React.JSX.Element[];
|
||||
emotionStyleTags: JSX.Element[];
|
||||
}
|
||||
|
||||
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
@@ -20,6 +19,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<Head>
|
||||
{/* Meta (missing Title, set by the App or Page) */}
|
||||
<meta name='description' content={Brand.Meta.Description} />
|
||||
<meta name='keywords' content={Brand.Meta.Keywords} />
|
||||
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
|
||||
|
||||
{/* Favicons & PWA */}
|
||||
@@ -32,7 +32,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
|
||||
|
||||
{/* Opengraph */}
|
||||
<meta property='og:title' content={Brand.Title.Common} />
|
||||
<meta property='og:title' content={Brand.Meta.Title} />
|
||||
<meta property='og:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
|
||||
<meta property='og:url' content={Brand.URIs.Home} />
|
||||
@@ -42,7 +42,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
{/* Twitter */}
|
||||
<meta property='twitter:card' content='summary_large_image' />
|
||||
<meta property='twitter:url' content={Brand.URIs.Home} />
|
||||
<meta property='twitter:title' content={Brand.Title.Common} />
|
||||
<meta property='twitter:title' content={Brand.Meta.Title} />
|
||||
<meta property='twitter:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
|
||||
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { default as NextAuth } from 'next-auth';
|
||||
|
||||
import { authBasicUsers, authCreateProviders, authType } from '@/modules/authentication/auth.server';
|
||||
|
||||
|
||||
const authOptions = {
|
||||
secret: process.env.NEXTAUTH_SECRET,
|
||||
providers: authCreateProviders(),
|
||||
};
|
||||
|
||||
export default function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
if (!authType)
|
||||
return res.status(200).send('Auth not enabled');
|
||||
|
||||
if (Object.keys(authBasicUsers).length <= 0)
|
||||
res.status(200).send('Auth enabled but no users have been set up');
|
||||
|
||||
return NextAuth(req, res, authOptions);
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
|
||||
|
||||
|
||||
function parseApiParameters(apiKey?: string) {
|
||||
return {
|
||||
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
|
||||
apiHeaders: {
|
||||
'Content-Type': 'application/json',
|
||||
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function rethrowElevenLabsError(response: Response) {
|
||||
if (!response.ok) {
|
||||
let errorPayload: object | null = null;
|
||||
try {
|
||||
errorPayload = await response.json();
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
console.error('Error in ElevenLabs API:', errorPayload);
|
||||
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
|
||||
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
|
||||
|
||||
const response = await fetch(`https://${apiHost}${apiPath}`, {
|
||||
method: 'GET',
|
||||
headers: apiHeaders,
|
||||
});
|
||||
|
||||
await rethrowElevenLabsError(response);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
|
||||
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
|
||||
|
||||
const response = await fetch(`https://${apiHost}${apiPath}`, {
|
||||
method: 'POST',
|
||||
headers: apiHeaders,
|
||||
body: JSON.stringify(body),
|
||||
signal,
|
||||
});
|
||||
|
||||
await rethrowElevenLabsError(response);
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
|
||||
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
|
||||
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
|
||||
text: text,
|
||||
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
|
||||
};
|
||||
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
|
||||
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
|
||||
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
} catch (error) {
|
||||
console.error('Error posting to ElevenLabs', error);
|
||||
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,48 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
|
||||
import { getFromElevenLabs } from './speech';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
|
||||
|
||||
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
|
||||
|
||||
// bring category != 'premade to the top
|
||||
voicesList.voices.sort((a, b) => {
|
||||
if (a.category === 'premade' && b.category !== 'premade') return 1;
|
||||
if (a.category !== 'premade' && b.category === 'premade') return -1;
|
||||
return 0;
|
||||
});
|
||||
|
||||
// map to our own response format
|
||||
const response: ElevenLabs.API.Voices.Response = {
|
||||
voices: voicesList.voices.map((voice, idx) => ({
|
||||
id: voice.voice_id,
|
||||
name: voice.name,
|
||||
description: voice.description,
|
||||
previewUrl: voice.preview_url,
|
||||
category: voice.category,
|
||||
default: idx === 0,
|
||||
})),
|
||||
};
|
||||
|
||||
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
|
||||
} catch (error) {
|
||||
console.error('Error fetching voices from ElevenLabs:', error);
|
||||
return new NextResponse(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
error: error?.toString() || error || 'Network issue',
|
||||
}),
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,27 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const requestBodyJson = await req.json();
|
||||
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
|
||||
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
|
||||
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
|
||||
return new NextResponse(JSON.stringify({
|
||||
message: upstreamResponse.choices[0].message,
|
||||
} satisfies OpenAI.API.Chat.Response));
|
||||
} catch (error: any) {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,30 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
try {
|
||||
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
|
||||
// keep working on this
|
||||
const requestBodyJson = await req.json();
|
||||
const { api } = await toApiChatRequest(requestBodyJson);
|
||||
|
||||
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
|
||||
|
||||
// flatten IDs (most recent first)
|
||||
return new NextResponse(JSON.stringify({
|
||||
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
|
||||
} satisfies OpenAI.API.Models.Response));
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,117 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
import { createParser } from 'eventsource-parser';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
|
||||
|
||||
// Handle the abort event when the connection is closed by the client
|
||||
signal.addEventListener('abort', () => {
|
||||
console.log('Client closed the connection.');
|
||||
});
|
||||
|
||||
// begin event streaming from the OpenAI API
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
let upstreamResponse: Response;
|
||||
try {
|
||||
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
|
||||
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
|
||||
} catch (error: any) {
|
||||
console.log(error);
|
||||
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
|
||||
return new ReadableStream({
|
||||
start: controller => {
|
||||
controller.enqueue(encoder.encode(message));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// decoding and re-encoding loop
|
||||
|
||||
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
|
||||
|
||||
let hasBegun = false;
|
||||
|
||||
// stream response (SSE) from OpenAI is split into multiple chunks. this function
|
||||
// will parse the event into a text stream, and re-emit it to the client
|
||||
const upstreamParser = createParser(event => {
|
||||
|
||||
// ignore reconnect interval
|
||||
if (event.type !== 'event')
|
||||
return;
|
||||
|
||||
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
|
||||
if (event.data === '[DONE]') {
|
||||
controller.close();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
|
||||
|
||||
// ignore any 'role' delta update
|
||||
if (json.choices[0].delta?.role)
|
||||
return;
|
||||
|
||||
// stringify and send the first packet as a JSON object
|
||||
if (!hasBegun) {
|
||||
hasBegun = true;
|
||||
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
|
||||
model: json.model,
|
||||
};
|
||||
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
|
||||
}
|
||||
|
||||
// transmit the text stream
|
||||
const text = json.choices[0].delta?.content || '';
|
||||
controller.enqueue(encoder.encode(text));
|
||||
|
||||
} catch (error) {
|
||||
// maybe parse error
|
||||
console.error('Error parsing OpenAI response', error);
|
||||
controller.error(error);
|
||||
}
|
||||
});
|
||||
|
||||
// https://web.dev/streams/#asynchronous-iteration
|
||||
const decoder = new TextDecoder();
|
||||
for await (const upstreamChunk of upstreamResponse.body as any)
|
||||
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
|
||||
|
||||
};
|
||||
|
||||
return new ReadableStream({
|
||||
start: onReadableStreamStart,
|
||||
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<Response> {
|
||||
try {
|
||||
const requestBodyJson = await req.json();
|
||||
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
|
||||
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
|
||||
return new NextResponse(chatResponseStream);
|
||||
} catch (error: any) {
|
||||
if (error.name === 'AbortError') {
|
||||
console.log('Fetch request aborted in handler');
|
||||
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
|
||||
} else if (error.code === 'ECONNRESET') {
|
||||
console.log('Connection reset by the client in handler');
|
||||
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
|
||||
} else {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,88 @@
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Prodia } from '@/modules/prodia/prodia.types';
|
||||
|
||||
|
||||
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
|
||||
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
|
||||
});
|
||||
|
||||
|
||||
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
|
||||
const response = await fetch('https://api.prodia.com/v1/job', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
...prodiaHeaders(apiKey),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(jobRequest),
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
console.log('Bad Prodia Response:', await response.text());
|
||||
throw new Error(`Bad Prodia Response: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
|
||||
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
|
||||
headers: prodiaHeaders(apiKey),
|
||||
});
|
||||
if (response.status !== 200)
|
||||
throw new Error(`Bad Prodia Response: ${response.status}`);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
// timeout, in seconds
|
||||
const timeout = 15;
|
||||
const tStart = Date.now();
|
||||
|
||||
try {
|
||||
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
|
||||
|
||||
// crate the job, getting back a job ID
|
||||
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
|
||||
model: prodiaModelId,
|
||||
prompt,
|
||||
...(!!cfgScale && { cfg_scale: cfgScale }),
|
||||
...(!!steps && { steps }),
|
||||
...(!!negativePrompt && { negative_prompt: negativePrompt }),
|
||||
...(!!seed && { seed }),
|
||||
};
|
||||
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
|
||||
|
||||
// poll the job status until it's done
|
||||
let sleepDelay = 2000;
|
||||
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
|
||||
await new Promise(resolve => setTimeout(resolve, sleepDelay));
|
||||
job = await getJobStatus(apiKey, job.job);
|
||||
if (sleepDelay > 250)
|
||||
sleepDelay /= 2;
|
||||
}
|
||||
|
||||
// check for success
|
||||
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
|
||||
if (job.status !== 'succeeded' || !job.imageUrl)
|
||||
throw new Error(`Prodia image generation failed within ${elapsed}s`);
|
||||
|
||||
// respond with the image URL
|
||||
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
|
||||
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
|
||||
return new NextResponse(JSON.stringify(response));
|
||||
|
||||
} catch (error) {
|
||||
console.error('Handler failed:', error);
|
||||
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
|
||||
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
|
||||
return new NextResponse(JSON.stringify(response), { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,48 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Prodia } from '@/modules/prodia/prodia.types';
|
||||
|
||||
|
||||
// for lack of an API
|
||||
const HARDCODED_MODELS: Prodia.API.Models.Response = {
|
||||
models: [
|
||||
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
|
||||
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
|
||||
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
|
||||
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
|
||||
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
|
||||
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
|
||||
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
|
||||
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
|
||||
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
|
||||
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
|
||||
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
|
||||
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
|
||||
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
|
||||
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
|
||||
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
|
||||
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
|
||||
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
|
||||
],
|
||||
};
|
||||
|
||||
// sort by priority
|
||||
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
try {
|
||||
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
|
||||
// noinspection JSUnusedLocalSymbols
|
||||
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
|
||||
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
|
||||
} catch (error: any) {
|
||||
console.error('Handler failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,50 @@
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { PasteGG } from '@/modules/pastegg/pastegg.types';
|
||||
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
|
||||
|
||||
|
||||
/**
|
||||
* 'Proxy' that uploads a file to paste.gg.
|
||||
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
|
||||
*/
|
||||
export default async function handler(req: NextRequest) {
|
||||
|
||||
try {
|
||||
|
||||
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
|
||||
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
|
||||
throw new Error('Invalid options');
|
||||
|
||||
const paste = await pasteGgPost(title, fileName, fileContent, origin);
|
||||
console.log(`Posted to paste.gg`, paste);
|
||||
|
||||
if (paste?.status !== 'success')
|
||||
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
|
||||
|
||||
return new NextResponse(JSON.stringify({
|
||||
type: 'success',
|
||||
url: `https://paste.gg/${paste.result.id}`,
|
||||
expires: paste.result.expires || 'never',
|
||||
deletionKey: paste.result.deletion_key || 'none',
|
||||
created: paste.result.created_at,
|
||||
} satisfies PasteGG.API.Publish.Response));
|
||||
|
||||
} catch (error) {
|
||||
|
||||
console.error('Error posting to paste.gg', error);
|
||||
return new NextResponse(JSON.stringify({
|
||||
type: 'error',
|
||||
error: error?.toString() || 'Network issue',
|
||||
} satisfies PasteGG.API.Publish.Response), { status: 500 });
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,47 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Search } from '@/modules/search/search.types';
|
||||
import { objectToQueryString } from '@/modules/search/search.client';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
const { searchParams } = new URL(req.url);
|
||||
|
||||
const customSearchParams: Search.Wire.RequestParams = {
|
||||
q: searchParams.get('query') || '',
|
||||
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
|
||||
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
|
||||
num: 5,
|
||||
};
|
||||
|
||||
try {
|
||||
if (!customSearchParams.key || !customSearchParams.cx) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error('Missing API Key or Custom Search Engine ID');
|
||||
}
|
||||
|
||||
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
|
||||
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
|
||||
|
||||
if (data.error) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
|
||||
}
|
||||
|
||||
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
|
||||
title: result.title,
|
||||
link: result.link,
|
||||
snippet: result.snippet,
|
||||
})) || [];
|
||||
return new NextResponse(JSON.stringify(apiResponse));
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('Handler failed:', error);
|
||||
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,14 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppCall } from '../src/apps/call/AppCall';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function CallPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppCall />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,18 +1,53 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppChat } from '../src/apps/chat/AppChat';
|
||||
import { useShowNewsOnUpdate } from '../src/apps/news/news.hooks';
|
||||
import { Container, useTheme } from '@mui/joy';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
import { NoSSR } from '@/common/components/NoSSR';
|
||||
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { Chat } from '../src/apps/chat/Chat';
|
||||
import { SettingsModal } from '../src/apps/settings/SettingsModal';
|
||||
|
||||
|
||||
export default function ChatPage() {
|
||||
// show the News page on updates
|
||||
useShowNewsOnUpdate();
|
||||
export default function Home() {
|
||||
// state
|
||||
const [settingsShown, setSettingsShown] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
const apiKey = useSettingsStore(state => state.apiKey);
|
||||
const centerMode = useSettingsStore(state => state.centerMode);
|
||||
|
||||
|
||||
// show the Settings Dialog at startup if the API key is required but not set
|
||||
React.useEffect(() => {
|
||||
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
|
||||
setSettingsShown(true);
|
||||
}, [apiKey]);
|
||||
|
||||
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppChat />
|
||||
</AppLayout>
|
||||
/**
|
||||
* Note the global NoSSR wrapper
|
||||
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
|
||||
*/
|
||||
<NoSSR>
|
||||
|
||||
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
|
||||
boxShadow: {
|
||||
xs: 'none',
|
||||
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
|
||||
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
|
||||
},
|
||||
}}>
|
||||
|
||||
<Chat onShowSettings={() => setSettingsShown(true)} />
|
||||
|
||||
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
|
||||
|
||||
</Container>
|
||||
|
||||
</NoSSR>
|
||||
);
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppLabs } from '../src/apps/labs/AppLabs';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function LabsPage() {
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<AppLabs />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Alert, Box, Button, Typography } from '@mui/joy';
|
||||
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
|
||||
|
||||
import { setComposerStartupText } from '../src/apps/chat/components/composer/store-composer';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
import { LogoProgress } from '~/common/components/LogoProgress';
|
||||
import { asValidURL } from '~/common/util/urlUtils';
|
||||
|
||||
|
||||
/**
|
||||
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
|
||||
* See the /public/manifest.json for how this is configured. Parameters:
|
||||
* - text: the text to share
|
||||
* - url: the URL to share
|
||||
* - if the URL is a valid URL, it will be downloaded and the content will be shared
|
||||
* - if the URL is not a valid URL, it will be shared as text
|
||||
* - title: the title of the shared content
|
||||
*/
|
||||
function AppShareTarget() {
|
||||
// state
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
|
||||
const [intentText, setIntentText] = React.useState<string | null>(null);
|
||||
const [intentURL, setIntentURL] = React.useState<string | null>(null);
|
||||
const [isDownloading, setIsDownloading] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const { query, push: routerPush, replace: routerReplace } = useRouter();
|
||||
|
||||
|
||||
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
|
||||
setComposerStartupText(text);
|
||||
void routerReplace('/');
|
||||
}, [routerReplace]);
|
||||
|
||||
|
||||
// Detect the share Intent from the query
|
||||
React.useEffect(() => {
|
||||
// skip when query is not parsed yet
|
||||
if (!Object.keys(query).length)
|
||||
return;
|
||||
|
||||
// single item from the query
|
||||
let queryTextItem: string[] | string | null = query.url || query.text || null;
|
||||
if (Array.isArray(queryTextItem))
|
||||
queryTextItem = queryTextItem[0];
|
||||
|
||||
// check if the item is a URL
|
||||
const url = asValidURL(queryTextItem);
|
||||
if (url)
|
||||
setIntentURL(url);
|
||||
else if (queryTextItem)
|
||||
setIntentText(queryTextItem);
|
||||
else
|
||||
setErrorMessage('No text or url. Received: ' + JSON.stringify(query));
|
||||
|
||||
}, [query.url, query.text, query]);
|
||||
|
||||
|
||||
// Text -> Composer
|
||||
React.useEffect(() => {
|
||||
if (intentText)
|
||||
queueComposerTextAndLaunchApp(intentText);
|
||||
}, [intentText, queueComposerTextAndLaunchApp]);
|
||||
|
||||
|
||||
// URL -> download -> Composer
|
||||
React.useEffect(() => {
|
||||
if (intentURL) {
|
||||
setIsDownloading(true);
|
||||
// TEMP: until the Browse module is ready, just use the URL, verbatim
|
||||
queueComposerTextAndLaunchApp(intentURL);
|
||||
setIsDownloading(false);
|
||||
/*callBrowseFetchSinglePage(intentURL)
|
||||
.then(pageContent => {
|
||||
if (pageContent)
|
||||
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + pageContent + '\n```\n');
|
||||
else
|
||||
setErrorMessage('Could not read any data');
|
||||
})
|
||||
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
|
||||
.finally(() => setIsDownloading(false));*/
|
||||
}
|
||||
}, [intentURL, queueComposerTextAndLaunchApp]);
|
||||
|
||||
|
||||
return (
|
||||
|
||||
<Box sx={{
|
||||
backgroundColor: 'background.level2',
|
||||
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
|
||||
flexGrow: 1,
|
||||
}}>
|
||||
|
||||
{/* Logo with Circular Progress */}
|
||||
<LogoProgress showProgress={isDownloading} />
|
||||
|
||||
{/* Title */}
|
||||
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
|
||||
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
|
||||
</Typography>
|
||||
|
||||
{/* Possible Error */}
|
||||
{errorMessage && <>
|
||||
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
|
||||
<Typography>{errorMessage}</Typography>
|
||||
</Alert>
|
||||
<Button
|
||||
variant='solid' color='danger'
|
||||
onClick={() => routerPush('/')}
|
||||
endDecorator={<ArrowBackIcon />}
|
||||
sx={{ mt: 2 }}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
</>}
|
||||
|
||||
{/* URL under analysis */}
|
||||
<Typography level='body-xs'>
|
||||
{intentURL}
|
||||
</Typography>
|
||||
</Box>
|
||||
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
|
||||
* Example URL: https://get.big-agi.com/launch?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
|
||||
*/
|
||||
export default function LaunchPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppShareTarget />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { AppChatLink } from '../../../src/apps/link/AppChatLink';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function ChatLinkPage() {
|
||||
const { query } = useRouter();
|
||||
const chatLinkId = query?.chatLinkId as string ?? '';
|
||||
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<AppChatLink linkId={chatLinkId} />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppNews } from '../src/apps/news/AppNews';
|
||||
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function NewsPage() {
|
||||
// update the last seen news version
|
||||
useMarkNewsAsSeen();
|
||||
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<AppNews />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppPersonas } from '../src/apps/personas/AppPersonas';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function PersonasPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppPersonas />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// Prisma is the ORM for server-side (API) access to the database
|
||||
//
|
||||
// This file defines the schema for the database.
|
||||
// - make sure to run 'prisma generate' after making changes to this file
|
||||
// - make sure to run 'prisma db push' to sync the remote database with the schema
|
||||
//
|
||||
// Database is optional: when the environment variables are not set, the database is not used at all,
|
||||
// and the storage of data in Big-AGI is limited to client-side (browser) storage.
|
||||
//
|
||||
// The database is used for:
|
||||
// - the 'sharing' function, to let users share the chats with each other
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = env("POSTGRES_PRISMA_URL") // uses connection pooling
|
||||
directUrl = env("POSTGRES_URL_NON_POOLING") // uses a direct connection
|
||||
}
|
||||
|
||||
//
|
||||
// Storage of Linked Data
|
||||
//
|
||||
model LinkStorage {
|
||||
id String @id @default(uuid())
|
||||
|
||||
ownerId String
|
||||
visibility LinkStorageVisibility
|
||||
|
||||
dataType LinkStorageDataType
|
||||
dataTitle String?
|
||||
dataSize Int
|
||||
data Json
|
||||
|
||||
upVotes Int @default(0)
|
||||
downVotes Int @default(0)
|
||||
flagsCount Int @default(0)
|
||||
readCount Int @default(0)
|
||||
writeCount Int @default(1)
|
||||
|
||||
// time-based expiration
|
||||
expiresAt DateTime?
|
||||
|
||||
// manual deletion
|
||||
deletionKey String
|
||||
isDeleted Boolean @default(false)
|
||||
deletedAt DateTime?
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
|
||||
enum LinkStorageVisibility {
|
||||
PUBLIC
|
||||
UNLISTED
|
||||
PRIVATE
|
||||
}
|
||||
|
||||
enum LinkStorageDataType {
|
||||
CHAT_V1
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"name": "big-AGI",
|
||||
"short_name": "big-AGI",
|
||||
"theme_color": "#32383E",
|
||||
"background_color": "#9FA6AD",
|
||||
"short_name": "AGI",
|
||||
"theme_color": "#434356",
|
||||
"background_color": "#B9B9C6",
|
||||
"description": "Personal AGI App",
|
||||
"display": "standalone",
|
||||
"start_url": "/",
|
||||
@@ -23,15 +23,5 @@
|
||||
"sizes": "1024x1024",
|
||||
"type": "image/png"
|
||||
}
|
||||
],
|
||||
"share_target": {
|
||||
"action": "/launch",
|
||||
"method": "GET",
|
||||
"enctype": "application/x-www-form-urlencoded",
|
||||
"params": {
|
||||
"title": "title",
|
||||
"text": "text",
|
||||
"url": "url"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Container, Sheet } from '@mui/joy';
|
||||
|
||||
import { AppCallQueryParams } from '~/common/routes';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
|
||||
import { CallUI } from './CallUI';
|
||||
import { CallWizard } from './CallWizard';
|
||||
|
||||
|
||||
export const APP_CALL_ENABLED = false;
|
||||
|
||||
|
||||
export function AppCall() {
|
||||
// external state
|
||||
const { query } = useRouter();
|
||||
|
||||
// derived state
|
||||
const { conversationId, personaId } = query as any as AppCallQueryParams;
|
||||
const validInput = !!conversationId && !!personaId;
|
||||
|
||||
return (
|
||||
<Sheet variant='solid' color='neutral' invertedColors sx={{
|
||||
display: 'flex', flexDirection: 'column', justifyContent: 'center',
|
||||
flexGrow: 1,
|
||||
overflowY: 'auto',
|
||||
minHeight: 96,
|
||||
}}>
|
||||
|
||||
<Container maxWidth='sm' sx={{
|
||||
display: 'flex', flexDirection: 'column',
|
||||
alignItems: 'center',
|
||||
minHeight: '80dvh', justifyContent: 'space-evenly',
|
||||
gap: { xs: 2, md: 4 },
|
||||
}}>
|
||||
|
||||
{!validInput && <InlineError error={`Something went wrong. ${JSON.stringify(query)}`} />}
|
||||
|
||||
{validInput && (
|
||||
<CallWizard conversationId={conversationId}>
|
||||
<CallUI conversationId={conversationId} personaId={personaId} />
|
||||
</CallWizard>
|
||||
)}
|
||||
|
||||
</Container>
|
||||
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
@@ -1,392 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Box, Card, ListItemDecorator, MenuItem, Switch, Typography } from '@mui/joy';
|
||||
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
|
||||
import CallEndIcon from '@mui/icons-material/CallEnd';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import ChatOutlinedIcon from '@mui/icons-material/ChatOutlined';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import MicNoneIcon from '@mui/icons-material/MicNone';
|
||||
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
|
||||
|
||||
import { useChatLLMDropdown } from '../chat/components/applayout/useLLMDropdown';
|
||||
|
||||
import { EXPERIMENTAL_speakTextStream } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { SystemPurposeId, SystemPurposes } from '../../data';
|
||||
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
|
||||
import { streamChat } from '~/modules/llms/transports/streamChat';
|
||||
import { useVoiceDropdown } from '~/modules/elevenlabs/useVoiceDropdown';
|
||||
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
|
||||
import { conversationTitle, createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
|
||||
import { playSoundUrl, usePlaySoundUrl } from '~/common/util/audioUtils';
|
||||
import { useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
|
||||
import { CallAvatar } from './components/CallAvatar';
|
||||
import { CallButton } from './components/CallButton';
|
||||
import { CallMessage } from './components/CallMessage';
|
||||
import { CallStatus } from './components/CallStatus';
|
||||
|
||||
|
||||
function CallMenuItems(props: {
|
||||
pushToTalk: boolean,
|
||||
setPushToTalk: (pushToTalk: boolean) => void,
|
||||
override: boolean,
|
||||
setOverride: (overridePersonaVoice: boolean) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { voicesDropdown } = useVoiceDropdown(false, !props.override);
|
||||
|
||||
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
|
||||
|
||||
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
|
||||
|
||||
return <>
|
||||
|
||||
<MenuItem onClick={handlePushToTalkToggle}>
|
||||
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
|
||||
Push to talk
|
||||
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleChangeVoiceToggle}>
|
||||
<ListItemDecorator><RecordVoiceOverIcon /></ListItemDecorator>
|
||||
Change Voice
|
||||
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem>
|
||||
<ListItemDecorator>{' '}</ListItemDecorator>
|
||||
{voicesDropdown}
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem component={Link} href='https://github.com/enricoros/big-agi/issues/175' target='_blank'>
|
||||
<ListItemDecorator><ChatOutlinedIcon /></ListItemDecorator>
|
||||
Voice Calls Feedback
|
||||
</MenuItem>
|
||||
|
||||
</>;
|
||||
}
|
||||
|
||||
|
||||
export function CallUI(props: {
|
||||
conversationId: string,
|
||||
personaId: string,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
|
||||
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
|
||||
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
|
||||
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
|
||||
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
|
||||
const [pushToTalk, setPushToTalk] = React.useState(true);
|
||||
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
|
||||
const responseAbortController = React.useRef<AbortController | null>(null);
|
||||
|
||||
// external state
|
||||
const { push: routerPush } = useRouter();
|
||||
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
|
||||
const { chatTitle, messages } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
chatTitle: conversation ? conversationTitle(conversation) : 'no conversation',
|
||||
messages: conversation ? conversation.messages : [],
|
||||
};
|
||||
}, shallow);
|
||||
const persona = SystemPurposes[props.personaId as SystemPurposeId] ?? undefined;
|
||||
const personaCallStarters = persona?.call?.starters ?? undefined;
|
||||
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
|
||||
const personaSystemMessage = persona?.systemMessage ?? undefined;
|
||||
|
||||
// hooks and speech
|
||||
const [speechInterim, setSpeechInterim] = React.useState<SpeechResult | null>(null);
|
||||
const onSpeechResultCallback = React.useCallback((result: SpeechResult) => {
|
||||
setSpeechInterim(result.done ? null : { ...result });
|
||||
if (result.done) {
|
||||
const transcribed = result.transcript.trim();
|
||||
if (transcribed.length >= 1)
|
||||
setCallMessages(messages => [...messages, createDMessage('user', transcribed)]);
|
||||
}
|
||||
}, []);
|
||||
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000);
|
||||
|
||||
// derived state
|
||||
const isRinging = stage === 'ring';
|
||||
const isConnected = stage === 'connected';
|
||||
const isDeclined = stage === 'declined';
|
||||
const isEnded = stage === 'ended';
|
||||
|
||||
|
||||
/// Sounds
|
||||
|
||||
// pickup / hangup
|
||||
React.useEffect(() => {
|
||||
!isRinging && playSoundUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
|
||||
}, [isRinging, isConnected]);
|
||||
|
||||
// ringtone
|
||||
usePlaySoundUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
|
||||
|
||||
/// CONNECTED
|
||||
|
||||
const handleCallStop = () => {
|
||||
stopRecording();
|
||||
setStage('ended');
|
||||
};
|
||||
|
||||
// [E] pickup -> seed message and call timer
|
||||
// FIXME: Overriding the voice will reset the call - not a desired behavior
|
||||
React.useEffect(() => {
|
||||
if (!isConnected) return;
|
||||
|
||||
// show the call timer
|
||||
setCallElapsedTime('00:00');
|
||||
const start = Date.now();
|
||||
const interval = setInterval(() => {
|
||||
const elapsedSeconds = Math.floor((Date.now() - start) / 1000);
|
||||
const minutes = Math.floor(elapsedSeconds / 60);
|
||||
const seconds = elapsedSeconds % 60;
|
||||
setCallElapsedTime(`${minutes < 10 ? '0' : ''}${minutes}:${seconds < 10 ? '0' : ''}${seconds}`);
|
||||
}, 1000);
|
||||
|
||||
// seed the first message
|
||||
const phoneMessages = personaCallStarters || ['Hello?', 'Hey!'];
|
||||
const firstMessage = phoneMessages[Math.floor(Math.random() * phoneMessages.length)];
|
||||
|
||||
setCallMessages([createDMessage('assistant', firstMessage)]);
|
||||
// fire/forget
|
||||
void EXPERIMENTAL_speakTextStream(firstMessage, personaVoiceId);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isConnected, personaCallStarters, personaVoiceId]);
|
||||
|
||||
// [E] persona streaming response - upon new user message
|
||||
React.useEffect(() => {
|
||||
// only act when we have a new user message
|
||||
if (!isConnected || callMessages.length < 1 || callMessages[callMessages.length - 1].role !== 'user')
|
||||
return;
|
||||
switch (callMessages[callMessages.length - 1].text) {
|
||||
// do not respond
|
||||
case 'Stop.':
|
||||
return;
|
||||
// command: close the call
|
||||
case 'Goodbye.':
|
||||
setStage('ended');
|
||||
setTimeout(() => {
|
||||
void routerPush('/');
|
||||
}, 2000);
|
||||
return;
|
||||
// command: regenerate answer
|
||||
case 'Retry.':
|
||||
case 'Try again.':
|
||||
setCallMessages(messages => messages.slice(0, messages.length - 2));
|
||||
return;
|
||||
// command: restart chat
|
||||
case 'Restart.':
|
||||
setCallMessages([]);
|
||||
return;
|
||||
}
|
||||
|
||||
// bail if no llm selected
|
||||
if (!chatLLMId) return;
|
||||
|
||||
// temp fix: when the chat has no messages, only assume a single system message
|
||||
const chatMessages: { role: VChatMessageIn['role'], text: string }[] = messages.length > 0
|
||||
? messages
|
||||
: personaSystemMessage
|
||||
? [{ role: 'system', text: personaSystemMessage }]
|
||||
: [];
|
||||
|
||||
// 'prompt' for a "telephone call"
|
||||
// FIXME: can easily run ouf of tokens - if this gets traction, we'll fix it
|
||||
const callPrompt: VChatMessageIn[] = [
|
||||
{ role: 'system', content: 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.' },
|
||||
...chatMessages.map(message => ({ role: message.role, content: message.text })),
|
||||
{ role: 'system', content: 'You are now on the phone call related to the chat above. Respect your personality and answer with short, friendly and accurate thoughtful lines.' },
|
||||
...callMessages.map(message => ({ role: message.role, content: message.text })),
|
||||
];
|
||||
|
||||
// perform completion
|
||||
responseAbortController.current = new AbortController();
|
||||
let finalText = '';
|
||||
let error: any | null = null;
|
||||
streamChat(chatLLMId, callPrompt, responseAbortController.current.signal, (updatedMessage: Partial<DMessage>) => {
|
||||
const text = updatedMessage.text?.trim();
|
||||
if (text) {
|
||||
finalText = text;
|
||||
setPersonaTextInterim(text);
|
||||
}
|
||||
}).catch((err: DOMException) => {
|
||||
if (err?.name !== 'AbortError')
|
||||
error = err;
|
||||
}).finally(() => {
|
||||
setPersonaTextInterim(null);
|
||||
setCallMessages(messages => [...messages, createDMessage('assistant', finalText + (error ? ` (ERROR: ${error.message || error.toString()})` : ''))]);
|
||||
// fire/forget
|
||||
void EXPERIMENTAL_speakTextStream(finalText, personaVoiceId);
|
||||
});
|
||||
|
||||
return () => {
|
||||
responseAbortController.current?.abort();
|
||||
responseAbortController.current = null;
|
||||
};
|
||||
}, [isConnected, callMessages, chatLLMId, messages, personaVoiceId, personaSystemMessage, routerPush]);
|
||||
|
||||
// [E] Message interrupter
|
||||
const abortTrigger = isConnected && isRecordingSpeech;
|
||||
React.useEffect(() => {
|
||||
if (abortTrigger && responseAbortController.current) {
|
||||
responseAbortController.current.abort();
|
||||
responseAbortController.current = null;
|
||||
}
|
||||
// TODO.. abort current speech
|
||||
}, [abortTrigger]);
|
||||
|
||||
|
||||
// [E] continuous speech recognition (reload)
|
||||
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !isRecordingAudio;
|
||||
React.useEffect(() => {
|
||||
if (shouldStartRecording)
|
||||
startRecording();
|
||||
}, [shouldStartRecording, startRecording]);
|
||||
|
||||
|
||||
// more derived state
|
||||
const personaName = persona?.title ?? 'Unknown';
|
||||
const isMicEnabled = isSpeechEnabled;
|
||||
const isTTSEnabled = true;
|
||||
const isEnabled = isMicEnabled && isTTSEnabled;
|
||||
|
||||
|
||||
// pluggable UI
|
||||
|
||||
const menuItems = React.useMemo(() =>
|
||||
<CallMenuItems
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
override={overridePersonaVoice} setOverride={setOverridePersonaVoice} />
|
||||
, [overridePersonaVoice, pushToTalk],
|
||||
);
|
||||
|
||||
useLayoutPluggable(chatLLMDropdown, null, menuItems);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Typography
|
||||
level='h1'
|
||||
sx={{
|
||||
fontSize: { xs: '2.5rem', md: '3rem' },
|
||||
textAlign: 'center',
|
||||
mx: 2,
|
||||
}}
|
||||
>
|
||||
{isConnected ? personaName : 'Hello'}
|
||||
</Typography>
|
||||
|
||||
<CallAvatar
|
||||
symbol={persona?.symbol || '?'}
|
||||
imageUrl={persona?.imageUri}
|
||||
isRinging={isRinging}
|
||||
onClick={() => setAvatarClickCount(avatarClickCount + 1)}
|
||||
/>
|
||||
|
||||
<CallStatus
|
||||
callerName={isConnected ? undefined : personaName}
|
||||
statusText={isRinging ? 'is calling you' : isDeclined ? 'call declined' : isEnded ? 'call ended' : callElapsedTime}
|
||||
regardingText={chatTitle}
|
||||
micError={!isMicEnabled} speakError={!isTTSEnabled}
|
||||
/>
|
||||
|
||||
{/* Live Transcript, w/ streaming messages, audio indication, etc. */}
|
||||
{(isConnected || isEnded) && (
|
||||
<Card variant='soft' sx={{
|
||||
flexGrow: 1,
|
||||
minHeight: '15dvh', maxHeight: '24dvh',
|
||||
overflow: 'auto',
|
||||
width: '100%',
|
||||
borderRadius: 'lg',
|
||||
flexDirection: 'column-reverse',
|
||||
}}>
|
||||
|
||||
{/* Messages in reverse order, for auto-scroll from the bottom */}
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column-reverse', gap: 1 }}>
|
||||
|
||||
{/* Listening... */}
|
||||
{isRecording && (
|
||||
<CallMessage
|
||||
text={<>{speechInterim?.transcript ? speechInterim.transcript + ' ' : ''}<i>{speechInterim?.interimTranscript}</i></>}
|
||||
variant={isRecordingSpeech ? 'solid' : 'outlined'}
|
||||
role='user'
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Persona streaming text... */}
|
||||
{!!personaTextInterim && (
|
||||
<CallMessage
|
||||
text={personaTextInterim}
|
||||
variant='solid' color='neutral'
|
||||
role='assistant'
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Messages (last 6 messages, in reverse order) */}
|
||||
{callMessages.slice(-6).reverse().map((message) =>
|
||||
<CallMessage
|
||||
key={message.id}
|
||||
text={message.text}
|
||||
variant={message.role === 'assistant' ? 'solid' : 'soft'} color='neutral'
|
||||
role={message.role} />,
|
||||
)}
|
||||
</Box>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
{/* Call Buttons */}
|
||||
<Box sx={{ width: '100%', display: 'flex', justifyContent: 'space-evenly' }}>
|
||||
|
||||
{/* [ringing] Decline / Accept */}
|
||||
{isRinging && <CallButton Icon={CallEndIcon} text='Decline' color='danger' onClick={() => setStage('declined')} />}
|
||||
{isRinging && isEnabled && <CallButton Icon={CallIcon} text='Accept' color='success' variant='soft' onClick={() => setStage('connected')} />}
|
||||
|
||||
{/* [Calling] Hang / PTT (mute not enabled yet) */}
|
||||
{isConnected && <CallButton Icon={CallEndIcon} text='Hang up' color='danger' onClick={handleCallStop} />}
|
||||
{isConnected && (pushToTalk
|
||||
? <CallButton Icon={MicIcon} onClick={toggleRecording}
|
||||
text={isRecordingSpeech ? 'Listening...' : isRecording ? 'Listening' : 'Push To Talk'}
|
||||
variant={isRecordingSpeech ? 'solid' : isRecording ? 'soft' : 'outlined'} />
|
||||
: null
|
||||
// <CallButton disabled={true} Icon={MicOffIcon} onClick={() => setMicMuted(muted => !muted)}
|
||||
// text={micMuted ? 'Muted' : 'Mute'}
|
||||
// color={micMuted ? 'warning' : undefined} variant={micMuted ? 'solid' : 'outlined'} />
|
||||
)}
|
||||
|
||||
{/* [ended] Back / Call Again */}
|
||||
{(isEnded || isDeclined) && <Link noLinkStyle href='/'><CallButton Icon={ArrowBackIcon} text='Back' variant='soft' /></Link>}
|
||||
{(isEnded || isDeclined) && <CallButton Icon={CallIcon} text='Call Again' color='success' variant='soft' onClick={() => setStage('connected')} />}
|
||||
|
||||
</Box>
|
||||
|
||||
{/* DEBUG state */}
|
||||
{avatarClickCount > 10 && (avatarClickCount % 2 === 0) && (
|
||||
<Card variant='outlined' sx={{ maxHeight: '25dvh', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
|
||||
Special commands: Stop, Retry, Try Again, Restart, Goodbye.
|
||||
{JSON.stringify({ isSpeechEnabled, isRecordingAudio, speechInterim }, null, 2)}
|
||||
</Card>
|
||||
)}
|
||||
|
||||
{/*{isEnded && <Card variant='solid' size='lg' color='primary'>*/}
|
||||
{/* <CardContent>*/}
|
||||
{/* <Typography>*/}
|
||||
{/* Please rate the call quality, 1 to 5 - Just a Joke*/}
|
||||
{/* </Typography>*/}
|
||||
{/* </CardContent>*/}
|
||||
{/*</Card>}*/}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { keyframes } from '@emotion/react';
|
||||
|
||||
import { Box, Button, Card, CardContent, IconButton, ListItemDecorator, Typography } from '@mui/joy';
|
||||
import ArrowForwardIcon from '@mui/icons-material/ArrowForward';
|
||||
import ChatIcon from '@mui/icons-material/Chat';
|
||||
import CheckIcon from '@mui/icons-material/Check';
|
||||
import CloseIcon from '@mui/icons-material/Close';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
|
||||
import WarningIcon from '@mui/icons-material/Warning';
|
||||
|
||||
import { navigateBack } from '~/common/routes';
|
||||
import { openLayoutPreferences } from '~/common/layout/store-applayout';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useChatStore } from '~/common/state/store-chats';
|
||||
import { useUICounter } from '~/common/state/store-ui';
|
||||
|
||||
|
||||
const cssRainbowBackgroundKeyframes = keyframes`
|
||||
100%, 0% {
|
||||
background-color: rgb(128, 0, 0);
|
||||
}
|
||||
8% {
|
||||
background-color: rgb(102, 51, 0);
|
||||
}
|
||||
16% {
|
||||
background-color: rgb(64, 64, 0);
|
||||
}
|
||||
25% {
|
||||
background-color: rgb(38, 76, 0);
|
||||
}
|
||||
33% {
|
||||
background-color: rgb(0, 89, 0);
|
||||
}
|
||||
41% {
|
||||
background-color: rgb(0, 76, 41);
|
||||
}
|
||||
50% {
|
||||
background-color: rgb(0, 64, 64);
|
||||
}
|
||||
58% {
|
||||
background-color: rgb(0, 51, 102);
|
||||
}
|
||||
66% {
|
||||
background-color: rgb(0, 0, 128);
|
||||
}
|
||||
75% {
|
||||
background-color: rgb(63, 0, 128);
|
||||
}
|
||||
83% {
|
||||
background-color: rgb(76, 0, 76);
|
||||
}
|
||||
91% {
|
||||
background-color: rgb(102, 0, 51);
|
||||
}`;
|
||||
|
||||
function StatusCard(props: { icon: React.JSX.Element, hasIssue: boolean, text: string, button?: React.JSX.Element }) {
|
||||
return (
|
||||
<Card sx={{ width: '100%' }}>
|
||||
<CardContent sx={{ flexDirection: 'row' }}>
|
||||
<ListItemDecorator>
|
||||
{props.icon}
|
||||
</ListItemDecorator>
|
||||
<Typography level='title-md' color={props.hasIssue ? 'warning' : undefined} sx={{ flexGrow: 1 }}>
|
||||
{props.text}
|
||||
{props.button}
|
||||
</Typography>
|
||||
<ListItemDecorator>
|
||||
{props.hasIssue ? <WarningIcon color='warning' /> : <CheckIcon color='success' />}
|
||||
</ListItemDecorator>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
export function CallWizard(props: { strict?: boolean, conversationId: string, children: React.ReactNode }) {
|
||||
// state
|
||||
const [chatEmptyOverride, setChatEmptyOverride] = React.useState(false);
|
||||
const [recognitionOverride, setRecognitionOverride] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const recognition = useCapabilityBrowserSpeechRecognition();
|
||||
const synthesis = useCapabilityElevenLabs();
|
||||
const chatIsEmpty = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return !(conversation?.messages?.length);
|
||||
});
|
||||
const { novel, touch } = useUICounter('call-wizard');
|
||||
|
||||
// derived state
|
||||
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenRecognition = recognitionOverride || recognition.mayWork;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
|
||||
const fatalGood = overriddenRecognition && synthesis.mayWork;
|
||||
|
||||
if (!novel && fatalGood)
|
||||
return props.children;
|
||||
|
||||
const handleOverrideChatEmpty = () => setChatEmptyOverride(true);
|
||||
|
||||
const handleOverrideRecognition = () => setRecognitionOverride(true);
|
||||
|
||||
const handleConfigureElevenLabs = () => {
|
||||
openLayoutPreferences(3);
|
||||
};
|
||||
|
||||
const handleFinishButton = () => {
|
||||
if (!allGood)
|
||||
return navigateBack();
|
||||
touch();
|
||||
};
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
<Typography level='title-lg' sx={{ fontSize: '3rem', fontWeight: 200, lineHeight: '1.5em', textAlign: 'center' }}>
|
||||
Welcome to<br />
|
||||
<Typography
|
||||
component='span'
|
||||
sx={{
|
||||
backgroundColor: 'primary.solidActiveBg', mx: -0.5, px: 0.5,
|
||||
animation: `${cssRainbowBackgroundKeyframes} 15s linear infinite`,
|
||||
}}>
|
||||
your first call
|
||||
</Typography>
|
||||
</Typography>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
<Typography level='body-lg'>
|
||||
{/*Before you receive your first call, */}
|
||||
Let's get you all set up.
|
||||
</Typography>
|
||||
|
||||
{/* Chat Empty status */}
|
||||
<StatusCard
|
||||
icon={<ChatIcon />}
|
||||
hasIssue={!overriddenEmptyChat}
|
||||
text={overriddenEmptyChat ? 'Great! Your chat has messages.' : 'The chat is empty. Calls are effective when the caller has context.'}
|
||||
button={overriddenEmptyChat ? undefined : (
|
||||
<Button variant='outlined' onClick={handleOverrideChatEmpty} sx={{ mx: 1 }}>
|
||||
Ignore
|
||||
</Button>
|
||||
)}
|
||||
/>
|
||||
|
||||
{/* Add the speech to text feature status */}
|
||||
<StatusCard
|
||||
icon={<MicIcon />}
|
||||
text={
|
||||
((overriddenRecognition && !recognition.warnings.length) ? 'Speech recognition should be good to go.' : 'There might be a speech recognition issue.')
|
||||
+ (recognition.isApiAvailable ? '' : ' Your browser does not support the speech recognition API.')
|
||||
+ (recognition.isDeviceNotSupported ? ' Your device does not provide this feature.' : '')
|
||||
+ (recognition.warnings.length ? ' ⚠️ ' + recognition.warnings.join(' · ') : '')
|
||||
}
|
||||
button={overriddenRecognition ? undefined : (
|
||||
<Button variant='outlined' onClick={handleOverrideRecognition} sx={{ mx: 1 }}>
|
||||
Ignore
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!overriddenRecognition}
|
||||
/>
|
||||
|
||||
{/* Text to Speech status */}
|
||||
<StatusCard
|
||||
icon={<RecordVoiceOverIcon />}
|
||||
text={
|
||||
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
|
||||
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
}
|
||||
button={synthesis.mayWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
|
||||
Configure
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!synthesis.mayWork}
|
||||
/>
|
||||
|
||||
{/*<Typography>*/}
|
||||
{/* 1. To start a call, click the "Accept" button when you receive an incoming call.*/}
|
||||
{/* 2. If your mic is enabled, you'll see a "Push to Talk" button. Press and hold it to speak, then release it to stop speaking.*/}
|
||||
{/* 3. If your mic is disabled, you can still type your messages in the chat and the assistant will respond.*/}
|
||||
{/* 4. During the call, you can control the voice synthesis settings from the menu in the top right corner.*/}
|
||||
{/* 5. To end the call, click the "Hang up" button.*/}
|
||||
{/*</Typography>*/}
|
||||
|
||||
<Box sx={{ flexGrow: 2 }} />
|
||||
|
||||
{/* bottom: text & button */}
|
||||
<Box sx={{ display: 'flex', justifyContent: 'space-around', alignItems: 'center', width: '100%', gap: 2, px: 0.5 }}>
|
||||
|
||||
<Typography level='body-lg'>
|
||||
{allGood ? 'Ready, Set, Call' : 'Please resolve the issues above before proceeding with the call'}
|
||||
</Typography>
|
||||
|
||||
<IconButton
|
||||
size='lg' variant={allGood ? 'soft' : 'solid'} color={allGood ? 'success' : 'danger'}
|
||||
onClick={handleFinishButton} sx={{ borderRadius: '50px' }}
|
||||
>
|
||||
{allGood ? <ArrowForwardIcon sx={{ fontSize: '1.5em' }} /> : <CloseIcon sx={{ fontSize: '1.5em' }} />}
|
||||
</IconButton>
|
||||
</Box>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { keyframes } from '@emotion/react';
|
||||
|
||||
import { Avatar, Box } from '@mui/joy';
|
||||
|
||||
|
||||
const cssScaleKeyframes = keyframes`
|
||||
0% {
|
||||
transform: scale(1);
|
||||
}
|
||||
50% {
|
||||
transform: scale(1.2);
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
}`;
|
||||
|
||||
|
||||
export function CallAvatar(props: { symbol: string, imageUrl?: string, isRinging: boolean, onClick: () => void }) {
|
||||
return (
|
||||
<Avatar
|
||||
variant='soft' color='neutral'
|
||||
onClick={props.onClick}
|
||||
src={props.imageUrl}
|
||||
sx={{
|
||||
'--Avatar-size': { xs: '160px', md: '200px' },
|
||||
'--variant-borderWidth': '4px',
|
||||
boxShadow: !props.imageUrl ? 'md' : null,
|
||||
fontSize: { xs: '100px', md: '120px' },
|
||||
}}
|
||||
>
|
||||
|
||||
{/* As fallback, show the large Persona Symbol */}
|
||||
{!props.imageUrl && (
|
||||
<Box
|
||||
sx={{
|
||||
...(props.isRinging
|
||||
? { animation: `${cssScaleKeyframes} 1.4s ease-in-out infinite` }
|
||||
: {}),
|
||||
}}
|
||||
>
|
||||
{props.symbol}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
</Avatar>
|
||||
);
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, ColorPaletteProp, IconButton, Typography, VariantProp } from '@mui/joy';
|
||||
|
||||
|
||||
/**
|
||||
* Large button to operate the call, e.g.
|
||||
* --------
|
||||
* | 🎤 |
|
||||
* | Mute |
|
||||
* --------
|
||||
*/
|
||||
export function CallButton(props: {
|
||||
Icon: React.FC, text: string,
|
||||
variant?: VariantProp, color?: ColorPaletteProp, disabled?: boolean,
|
||||
onClick?: () => void,
|
||||
}) {
|
||||
return (
|
||||
<Box
|
||||
onClick={() => !props.disabled && props.onClick?.()}
|
||||
sx={{
|
||||
display: 'flex', flexDirection: 'column', alignItems: 'center',
|
||||
gap: { xs: 1, md: 2 },
|
||||
}}
|
||||
>
|
||||
|
||||
<IconButton
|
||||
disabled={props.disabled} variant={props.variant || 'solid'} color={props.color}
|
||||
sx={{
|
||||
'--IconButton-size': { xs: '4.2rem', md: '5rem' },
|
||||
borderRadius: '50%',
|
||||
// boxShadow: 'lg',
|
||||
}}>
|
||||
<props.Icon />
|
||||
</IconButton>
|
||||
|
||||
<Typography level='title-md' variant={props.disabled ? 'soft' : undefined}>
|
||||
{props.text}
|
||||
</Typography>
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Chip, ColorPaletteProp, VariantProp } from '@mui/joy';
|
||||
import { SxProps } from '@mui/system';
|
||||
|
||||
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
|
||||
|
||||
|
||||
export function CallMessage(props: {
|
||||
text?: string | React.JSX.Element,
|
||||
variant?: VariantProp, color?: ColorPaletteProp,
|
||||
role: VChatMessageIn['role'],
|
||||
sx?: SxProps,
|
||||
}) {
|
||||
return (
|
||||
<Chip
|
||||
color={props.color} variant={props.variant}
|
||||
sx={{
|
||||
alignSelf: props.role === 'user' ? 'end' : 'start',
|
||||
whiteSpace: 'break-spaces',
|
||||
borderRadius: 'lg',
|
||||
mt: 'auto',
|
||||
// boxShadow: 'md',
|
||||
py: 1,
|
||||
...(props.sx || {}),
|
||||
}}
|
||||
>
|
||||
|
||||
{props.text}
|
||||
|
||||
</Chip>
|
||||
);
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Typography } from '@mui/joy';
|
||||
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
|
||||
|
||||
/**
|
||||
* A status message for the call, such as:
|
||||
*
|
||||
* $Name
|
||||
* "Connecting..." or "Call ended",
|
||||
* re: $Regarding
|
||||
*/
|
||||
export function CallStatus(props: {
|
||||
callerName?: string,
|
||||
statusText: string,
|
||||
regardingText?: string,
|
||||
micError: boolean, speakError: boolean,
|
||||
// llmComponent?: React.JSX.Element,
|
||||
}) {
|
||||
return (
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column' }}>
|
||||
|
||||
{!!props.callerName && <Typography level='h3' sx={{ textAlign: 'center' }}>
|
||||
<b>{props.callerName}</b>
|
||||
</Typography>}
|
||||
|
||||
{/*{props.llmComponent}*/}
|
||||
|
||||
<Typography level='body-md' sx={{ textAlign: 'center' }}>
|
||||
{props.statusText}
|
||||
</Typography>
|
||||
|
||||
{!!props.regardingText && <Typography level='body-md' sx={{ textAlign: 'center', mt: 0 }}>
|
||||
re: {props.regardingText}
|
||||
</Typography>}
|
||||
|
||||
{props.micError && <InlineError
|
||||
severity='danger' error='But this browser does not support speech recognition... 🤦♀️ - Try Chrome on Windows?' />}
|
||||
|
||||
{props.speakError && <InlineError
|
||||
severity='danger' error='And text-to-speech is not configured... 🤦♀️ - Configure it in Settings?' />}
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,272 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
|
||||
import { CmdRunReact } from '~/modules/aifn/react/react';
|
||||
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
|
||||
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
|
||||
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
|
||||
import { useGlobalShortcut } from '~/common/components/useGlobalShortcut';
|
||||
import { useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
|
||||
import { ChatDrawerItems } from './components/applayout/ChatDrawerItems';
|
||||
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
|
||||
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
|
||||
import { ChatMessageList } from './components/ChatMessageList';
|
||||
import { ChatModeId } from './components/composer/store-composer';
|
||||
import { CmdAddRoleMessage, extractCommands } from './commands';
|
||||
import { Composer } from './components/composer/Composer';
|
||||
import { Ephemerals } from './components/Ephemerals';
|
||||
|
||||
import { TradeConfig, TradeModal } from './trade/TradeModal';
|
||||
import { runAssistantUpdatingState } from './editors/chat-stream';
|
||||
import { runImageGenerationUpdatingState } from './editors/image-generate';
|
||||
import { runReActUpdatingState } from './editors/react-tangent';
|
||||
|
||||
|
||||
const SPECIAL_ID_ALL_CHATS = 'all-chats';
|
||||
|
||||
|
||||
export function AppChat() {
|
||||
|
||||
// state
|
||||
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
|
||||
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
|
||||
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
|
||||
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
|
||||
const [flattenConversationId, setFlattenConversationId] = React.useState<string | null>(null);
|
||||
|
||||
// external state
|
||||
const { activeConversationId, isConversationEmpty, hasAnyContent, duplicateConversation, deleteAllConversations, setMessages, systemPurposeId, setAutoTitle } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
|
||||
const isConversationEmpty = conversation ? !conversation.messages.length : true;
|
||||
const hasAnyContent = state.conversations.length > 1 || !isConversationEmpty;
|
||||
return {
|
||||
activeConversationId: state.activeConversationId,
|
||||
isConversationEmpty,
|
||||
hasAnyContent,
|
||||
duplicateConversation: state.duplicateConversation,
|
||||
deleteAllConversations: state.deleteAllConversations,
|
||||
setMessages: state.setMessages,
|
||||
systemPurposeId: conversation?.systemPurposeId ?? null,
|
||||
setAutoTitle: state.setAutoTitle,
|
||||
};
|
||||
}, shallow);
|
||||
|
||||
|
||||
const handleExecuteConversation = async (chatModeId: ChatModeId, conversationId: string, history: DMessage[]) => {
|
||||
const { chatLLMId } = useModelsStore.getState();
|
||||
if (!chatModeId || !conversationId || !chatLLMId) return;
|
||||
|
||||
// "/command ...": overrides the chat mode
|
||||
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
|
||||
if (lastMessage?.role === 'user') {
|
||||
const pieces = extractCommands(lastMessage.text);
|
||||
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
|
||||
const [command, prompt] = [pieces[0].value, pieces[1].value];
|
||||
if (CmdRunProdia.includes(command)) {
|
||||
setMessages(conversationId, history);
|
||||
return await runImageGenerationUpdatingState(conversationId, prompt);
|
||||
}
|
||||
if (CmdRunReact.includes(command) && chatLLMId) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, prompt, chatLLMId);
|
||||
}
|
||||
if (CmdAddRoleMessage.includes(command)) {
|
||||
lastMessage.role = command.startsWith('/s') ? 'system' : command.startsWith('/a') ? 'assistant' : 'user';
|
||||
lastMessage.sender = 'Bot';
|
||||
lastMessage.text = prompt;
|
||||
return setMessages(conversationId, history);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// synchronous long-duration tasks, which update the state as they go
|
||||
if (chatLLMId && systemPurposeId) {
|
||||
switch (chatModeId) {
|
||||
case 'immediate':
|
||||
case 'immediate-follow-up':
|
||||
return await runAssistantUpdatingState(conversationId, history, chatLLMId, systemPurposeId, true, chatModeId === 'immediate-follow-up');
|
||||
case 'write-user':
|
||||
return setMessages(conversationId, history);
|
||||
case 'react':
|
||||
if (!lastMessage?.text)
|
||||
break;
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, lastMessage.text, chatLLMId);
|
||||
case 'draw-imagine':
|
||||
case 'draw-imagine-plus':
|
||||
if (!lastMessage?.text)
|
||||
break;
|
||||
const imagePrompt = chatModeId == 'draw-imagine-plus'
|
||||
? await imaginePromptFromText(lastMessage.text) || 'An error sign.'
|
||||
: lastMessage.text;
|
||||
setMessages(conversationId, history.map(message => message.id !== lastMessage.id ? message : {
|
||||
...message,
|
||||
text: `${CmdRunProdia[0]} ${imagePrompt}`,
|
||||
}));
|
||||
return await runImageGenerationUpdatingState(conversationId, imagePrompt);
|
||||
}
|
||||
}
|
||||
|
||||
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
|
||||
console.log('handleExecuteConversation: issue running', chatModeId, conversationId, lastMessage);
|
||||
setMessages(conversationId, history);
|
||||
};
|
||||
|
||||
const _findConversation = (conversationId: string) =>
|
||||
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
|
||||
|
||||
const handleExecuteChatHistory = async (conversationId: string, history: DMessage[]) =>
|
||||
await handleExecuteConversation('immediate', conversationId, history);
|
||||
|
||||
const handleImagineFromText = async (conversationId: string, messageText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation)
|
||||
return await handleExecuteConversation('draw-imagine-plus', conversationId, [...conversation.messages, createDMessage('user', messageText)]);
|
||||
};
|
||||
|
||||
const handleComposerNewMessage = async (chatModeId: ChatModeId, conversationId: string, userText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation)
|
||||
return await handleExecuteConversation(chatModeId, conversationId, [...conversation.messages, createDMessage('user', userText)]);
|
||||
};
|
||||
|
||||
const handleRegenerateAssistant = async () => {
|
||||
const conversation = activeConversationId ? _findConversation(activeConversationId) : null;
|
||||
if (conversation?.messages?.length) {
|
||||
const lastMessage = conversation.messages[conversation.messages.length - 1];
|
||||
if (lastMessage.role === 'assistant') {
|
||||
const newMessages = [...conversation.messages];
|
||||
newMessages.pop();
|
||||
return await handleExecuteConversation('immediate', conversation.id, newMessages);
|
||||
}
|
||||
}
|
||||
};
|
||||
useGlobalShortcut('r', true, true, handleRegenerateAssistant);
|
||||
|
||||
|
||||
const handleClearConversation = (conversationId: string) => setClearConfirmationId(conversationId);
|
||||
|
||||
const handleConfirmedClearConversation = () => {
|
||||
if (clearConfirmationId) {
|
||||
setMessages(clearConfirmationId, []);
|
||||
setAutoTitle(clearConfirmationId, '');
|
||||
setClearConfirmationId(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteAllConversations = () => setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
|
||||
|
||||
const handleConfirmedDeleteConversation = () => {
|
||||
if (deleteConfirmationId) {
|
||||
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
|
||||
deleteAllConversations();
|
||||
}// else
|
||||
// deleteConversation(deleteConfirmationId);
|
||||
setDeleteConfirmationId(null);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const handleImportConversation = () => setTradeConfig({ dir: 'import' });
|
||||
|
||||
const handleExportConversation = (conversationId: string | null) => setTradeConfig({ dir: 'export', conversationId });
|
||||
|
||||
const handleFlattenConversation = (conversationId: string) => setFlattenConversationId(conversationId);
|
||||
|
||||
|
||||
// Pluggable ApplicationBar components
|
||||
|
||||
const centerItems = React.useMemo(() =>
|
||||
<ChatDropdowns conversationId={activeConversationId} />,
|
||||
[activeConversationId],
|
||||
);
|
||||
|
||||
const drawerItems = React.useMemo(() =>
|
||||
<ChatDrawerItems
|
||||
conversationId={activeConversationId}
|
||||
onImportConversation={handleImportConversation}
|
||||
onDeleteAllConversations={handleDeleteAllConversations}
|
||||
/>,
|
||||
[activeConversationId],
|
||||
);
|
||||
|
||||
const menuItems = React.useMemo(() =>
|
||||
<ChatMenuItems
|
||||
conversationId={activeConversationId} isConversationEmpty={isConversationEmpty} hasConversations={hasAnyContent}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onClearConversation={handleClearConversation}
|
||||
onDuplicateConversation={duplicateConversation}
|
||||
onExportConversation={handleExportConversation}
|
||||
onFlattenConversation={handleFlattenConversation}
|
||||
/>,
|
||||
[activeConversationId, duplicateConversation, hasAnyContent, isConversationEmpty, isMessageSelectionMode],
|
||||
);
|
||||
|
||||
useLayoutPluggable(centerItems, drawerItems, menuItems);
|
||||
|
||||
return <>
|
||||
|
||||
<ChatMessageList
|
||||
conversationId={activeConversationId}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onExecuteChatHistory={handleExecuteChatHistory}
|
||||
onImagineFromText={handleImagineFromText}
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
backgroundColor: 'background.level1',
|
||||
overflowY: 'auto', // overflowY: 'hidden'
|
||||
minHeight: 96,
|
||||
}} />
|
||||
|
||||
<Ephemerals
|
||||
conversationId={activeConversationId}
|
||||
sx={{
|
||||
// flexGrow: 0.1,
|
||||
flexShrink: 0.5,
|
||||
overflowY: 'auto',
|
||||
minHeight: 64,
|
||||
}} />
|
||||
|
||||
<Composer
|
||||
conversationId={activeConversationId} messageId={null}
|
||||
isDeveloperMode={systemPurposeId === 'Developer'}
|
||||
onNewMessage={handleComposerNewMessage}
|
||||
sx={{
|
||||
zIndex: 21, // position: 'sticky', bottom: 0,
|
||||
backgroundColor: 'background.surface',
|
||||
borderTop: `1px solid`,
|
||||
borderTopColor: 'divider',
|
||||
p: { xs: 1, md: 2 },
|
||||
}} />
|
||||
|
||||
|
||||
{/* Import / Export */}
|
||||
{!!tradeConfig && <TradeModal config={tradeConfig} onClose={() => setTradeConfig(null)} />}
|
||||
|
||||
{/* Flatten */}
|
||||
{!!flattenConversationId && <FlattenerModal conversationId={flattenConversationId} onClose={() => setFlattenConversationId(null)} />}
|
||||
|
||||
{/* [confirmation] Reset Conversation */}
|
||||
{!!clearConfirmationId && <ConfirmationModal
|
||||
open onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
|
||||
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
|
||||
/>}
|
||||
|
||||
{/* [confirmation] Delete All */}
|
||||
{!!deleteConfirmationId && <ConfirmationModal
|
||||
open onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
|
||||
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
|
||||
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
|
||||
: 'Are you sure you want to delete this conversation?'}
|
||||
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
|
||||
? 'Yes, delete all'
|
||||
: 'Delete conversation'}
|
||||
/>}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,193 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
|
||||
import { CmdRunReact } from '@/modules/search/search.client';
|
||||
import { PasteGG } from '@/modules/pastegg/pastegg.types';
|
||||
import { PublishedModal } from '@/modules/pastegg/PublishedModal';
|
||||
import { callPublish } from '@/modules/pastegg/pastegg.client';
|
||||
|
||||
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
|
||||
import { Link } from '@/common/components/Link';
|
||||
import { conversationToMarkdown } from '@/common/util/conversationToMarkdown';
|
||||
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
|
||||
import { extractCommands } from '@/common/util/extractCommands';
|
||||
import { useComposerStore } from '@/common/state/store-composer';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { ApplicationBar } from './components/appbar/ApplicationBar';
|
||||
import { ChatMessageList } from './components/ChatMessageList';
|
||||
import { Composer } from './components/composer/Composer';
|
||||
import { Ephemerals } from './components/ephemerals/Ephemerals';
|
||||
import { imaginePromptFromText } from './util/ai-functions';
|
||||
import { runAssistantUpdatingState } from './util/agi-immediate';
|
||||
import { runImageGenerationUpdatingState } from './util/imagine';
|
||||
import { runReActUpdatingState } from './util/agi-react';
|
||||
|
||||
|
||||
export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
|
||||
// state
|
||||
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
|
||||
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
|
||||
const [publishResponse, setPublishResponse] = React.useState<PasteGG.API.Publish.Response | null>(null);
|
||||
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
const { sendModeId } = useComposerStore(state => ({ sendModeId: state.sendModeId }), shallow);
|
||||
const { activeConversationId, setMessages, chatModelId, systemPurposeId } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
|
||||
return {
|
||||
activeConversationId: state.activeConversationId,
|
||||
setMessages: state.setMessages,
|
||||
chatModelId: conversation?.chatModelId ?? null,
|
||||
systemPurposeId: conversation?.systemPurposeId ?? null,
|
||||
};
|
||||
}, shallow);
|
||||
|
||||
|
||||
const handleExecuteConversation = async (conversationId: string, history: DMessage[]) => {
|
||||
if (!conversationId) return;
|
||||
|
||||
// Command - last user message is a cmd
|
||||
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
|
||||
if (lastMessage?.role === 'user') {
|
||||
const pieces = extractCommands(lastMessage.text);
|
||||
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
|
||||
const command = pieces[0].value;
|
||||
const prompt = pieces[1].value;
|
||||
if (CmdRunProdia.includes(command)) {
|
||||
setMessages(conversationId, history);
|
||||
return await runImageGenerationUpdatingState(conversationId, prompt);
|
||||
}
|
||||
if (CmdRunReact.includes(command) && chatModelId) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, prompt, chatModelId);
|
||||
}
|
||||
// if (CmdRunSearch.includes(command))
|
||||
// return await run...
|
||||
}
|
||||
}
|
||||
|
||||
// synchronous long-duration tasks, which update the state as they go
|
||||
if (sendModeId && chatModelId && systemPurposeId) {
|
||||
switch (sendModeId) {
|
||||
case 'immediate':
|
||||
return await runAssistantUpdatingState(conversationId, history, chatModelId, systemPurposeId);
|
||||
case 'react':
|
||||
if (lastMessage?.text) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, lastMessage.text, chatModelId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
|
||||
setMessages(conversationId, history);
|
||||
};
|
||||
|
||||
const _findConversation = (conversationId: string) =>
|
||||
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
|
||||
|
||||
const handleSendUserMessage = async (conversationId: string, userText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation)
|
||||
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', userText)]);
|
||||
};
|
||||
|
||||
const handleImagineFromText = async (conversationId: string, messageText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation && chatModelId) {
|
||||
const prompt = await imaginePromptFromText(messageText, chatModelId);
|
||||
if (prompt)
|
||||
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const handlePublishConversation = (conversationId: string) => setPublishConversationId(conversationId);
|
||||
|
||||
const handleConfirmedPublishConversation = async () => {
|
||||
if (publishConversationId) {
|
||||
const conversation = _findConversation(publishConversationId);
|
||||
setPublishConversationId(null);
|
||||
if (conversation) {
|
||||
const markdownContent = conversationToMarkdown(conversation, !useSettingsStore.getState().showSystemMessages);
|
||||
const publishResponse = await callPublish('paste.gg', markdownContent);
|
||||
setPublishResponse(publishResponse);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
return (
|
||||
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex', flexDirection: 'column', height: '100vh',
|
||||
...(props.sx || {}),
|
||||
}}>
|
||||
|
||||
<ApplicationBar
|
||||
conversationId={activeConversationId}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onPublishConversation={handlePublishConversation}
|
||||
onShowSettings={props.onShowSettings}
|
||||
sx={{
|
||||
zIndex: 20, // position: 'sticky', top: 0,
|
||||
// ...(process.env.NODE_ENV === 'development' ? { background: theme.vars.palette.danger.solidBg } : {}),
|
||||
}} />
|
||||
|
||||
<ChatMessageList
|
||||
conversationId={activeConversationId}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onExecuteConversation={handleExecuteConversation}
|
||||
onImagineFromText={handleImagineFromText}
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
background: theme.vars.palette.background.level2,
|
||||
overflowY: 'auto', // overflowY: 'hidden'
|
||||
minHeight: 96,
|
||||
}} />
|
||||
|
||||
<Ephemerals
|
||||
conversationId={activeConversationId}
|
||||
sx={{
|
||||
// flexGrow: 0.1,
|
||||
flexShrink: 0.5,
|
||||
overflowY: 'auto',
|
||||
minHeight: 64,
|
||||
}} />
|
||||
|
||||
<Composer
|
||||
conversationId={activeConversationId} messageId={null}
|
||||
isDeveloperMode={systemPurposeId === 'Developer'}
|
||||
onSendMessage={handleSendUserMessage}
|
||||
sx={{
|
||||
zIndex: 21, // position: 'sticky', bottom: 0,
|
||||
background: theme.vars.palette.background.surface,
|
||||
borderTop: `1px solid ${theme.vars.palette.divider}`,
|
||||
p: { xs: 1, md: 2 },
|
||||
}} />
|
||||
|
||||
{/* Confirmation for Publishing */}
|
||||
<ConfirmationModal
|
||||
open={!!publishConversationId} onClose={() => setPublishConversationId(null)} onPositive={handleConfirmedPublishConversation}
|
||||
confirmationText={<>
|
||||
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
|
||||
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
|
||||
Are you sure you want to proceed?
|
||||
</>} positiveActionText={'Understood, upload to paste.gg'}
|
||||
/>
|
||||
|
||||
{/* Show the Published details */}
|
||||
{!!publishResponse && (
|
||||
<PublishedModal open onClose={() => setPublishResponse(null)} response={publishResponse} />
|
||||
)}
|
||||
|
||||
</Box>
|
||||
|
||||
);
|
||||
}
|
||||
@@ -1,76 +1,32 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, List, Sheet, Switch, Tooltip, Typography } from '@mui/joy';
|
||||
import { Box, List } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { useChatLLM } from '~/modules/llms/store-llms';
|
||||
|
||||
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
|
||||
import { useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { ChatMessage } from './message/ChatMessage';
|
||||
import { CleanerMessage, MessagesSelectionHeader } from './message/CleanerMessage';
|
||||
import { PersonaSelector } from './persona-selector/PersonaSelector';
|
||||
|
||||
|
||||
/**
|
||||
* [Experimental] A panel with tools for the chat
|
||||
*/
|
||||
function ToolsPanel(props: { showDiff: boolean, setShowDiff: (showDiff: boolean) => void }) {
|
||||
return (
|
||||
<Sheet
|
||||
variant='outlined' invertedColors
|
||||
sx={{
|
||||
position: 'fixed', top: 64, left: 8, zIndex: 101,
|
||||
boxShadow: 'md', borderRadius: '100px',
|
||||
p: 2,
|
||||
display: 'flex', flexFlow: 'row wrap', alignItems: 'center', justifyContent: 'space-between', gap: 2,
|
||||
}}
|
||||
>
|
||||
<Typography level='title-md'>
|
||||
🪛
|
||||
</Typography>
|
||||
<Tooltip title='Highlight differences'>
|
||||
<Switch
|
||||
checked={props.showDiff} onChange={() => props.setShowDiff(!props.showDiff)}
|
||||
startDecorator={<Typography level='title-md'>Diff</Typography>}
|
||||
/>
|
||||
</Tooltip>
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
import { ChatMessageSelectable, MessagesSelectionHeader } from './message/ChatMessageSelectable';
|
||||
import { PurposeSelector } from './PurposeSelector';
|
||||
|
||||
|
||||
/**
|
||||
* A list of ChatMessages
|
||||
*/
|
||||
export function ChatMessageList(props: {
|
||||
conversationId: string | null,
|
||||
showTools?: boolean,
|
||||
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
onExecuteChatHistory: (conversationId: string, history: DMessage[]) => void,
|
||||
onImagineFromText: (conversationId: string, userText: string) => Promise<any>,
|
||||
sx?: SxProps
|
||||
}) {
|
||||
export function ChatMessageList(props: { conversationId: string | null, isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void, onExecuteConversation: (conversationId: string, history: DMessage[]) => void, onImagineFromText: (conversationId: string, userText: string) => void, sx?: SxProps }) {
|
||||
// state
|
||||
const [diffing, setDiffing] = React.useState<boolean>(false);
|
||||
const [selectedMessages, setSelectedMessages] = React.useState<Set<string>>(new Set());
|
||||
|
||||
// external state
|
||||
const { experimentalLabs, showSystemMessages } = useUIPreferencesStore(state => ({
|
||||
experimentalLabs: state.experimentalLabs,
|
||||
showSystemMessages: state.showSystemMessages,
|
||||
}));
|
||||
const { messages, editMessage, deleteMessage, historyTokenCount } = useChatStore(state => {
|
||||
const showSystemMessages = useSettingsStore(state => state.showSystemMessages);
|
||||
const { editMessage, deleteMessage } = useChatStore(state => ({ editMessage: state.editMessage, deleteMessage: state.deleteMessage }), shallow);
|
||||
const messages = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
messages: conversation ? conversation.messages : [],
|
||||
editMessage: state.editMessage, deleteMessage: state.deleteMessage,
|
||||
historyTokenCount: conversation ? conversation.tokenCount : 0,
|
||||
};
|
||||
return conversation ? conversation.messages : [];
|
||||
}, shallow);
|
||||
const { chatLLM } = useChatLLM();
|
||||
|
||||
|
||||
const handleMessageDelete = (messageId: string) =>
|
||||
props.conversationId && deleteMessage(props.conversationId, messageId);
|
||||
@@ -78,20 +34,16 @@ export function ChatMessageList(props: {
|
||||
const handleMessageEdit = (messageId: string, newText: string) =>
|
||||
props.conversationId && editMessage(props.conversationId, messageId, { text: newText }, true);
|
||||
|
||||
const handleImagineFromText = (messageText: string): Promise<any> => {
|
||||
if (props.conversationId)
|
||||
return props.onImagineFromText(props.conversationId, messageText);
|
||||
else
|
||||
return Promise.reject('No conversation');
|
||||
};
|
||||
const handleImagineFromText = (messageText: string) =>
|
||||
props.conversationId && props.onImagineFromText(props.conversationId, messageText);
|
||||
|
||||
const handleRestartFromMessage = (messageId: string, offset: number) => {
|
||||
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + offset + 1);
|
||||
props.conversationId && props.onExecuteChatHistory(props.conversationId, truncatedHistory);
|
||||
props.conversationId && props.onExecuteConversation(props.conversationId, truncatedHistory);
|
||||
};
|
||||
|
||||
const handleRunExample = (text: string) =>
|
||||
props.conversationId && props.onExecuteChatHistory(props.conversationId, [...messages, createDMessage('user', text)]);
|
||||
props.conversationId && props.onExecuteConversation(props.conversationId, [...messages, createDMessage('user', text)]);
|
||||
|
||||
|
||||
// hide system messages if the user chooses so
|
||||
@@ -102,7 +54,7 @@ export function ChatMessageList(props: {
|
||||
if (!filteredMessages.length)
|
||||
return props.conversationId ? (
|
||||
<Box sx={props.sx || {}}>
|
||||
<PersonaSelector conversationId={props.conversationId} runExample={handleRunExample} />
|
||||
<PurposeSelector conversationId={props.conversationId} runExample={handleRunExample} />
|
||||
</Box>
|
||||
) : null;
|
||||
|
||||
@@ -116,14 +68,14 @@ export function ChatMessageList(props: {
|
||||
const handleSelectAllMessages = (selected: boolean) => {
|
||||
const newSelected = new Set<string>();
|
||||
if (selected)
|
||||
for (const message of messages)
|
||||
for (let message of messages)
|
||||
newSelected.add(message.id);
|
||||
setSelectedMessages(newSelected);
|
||||
};
|
||||
|
||||
const handleDeleteSelectedMessages = () => {
|
||||
if (props.conversationId)
|
||||
for (const selectedMessage of selectedMessages)
|
||||
for (let selectedMessage of selectedMessages)
|
||||
deleteMessage(props.conversationId, selectedMessage);
|
||||
setSelectedMessages(new Set());
|
||||
};
|
||||
@@ -134,36 +86,18 @@ export function ChatMessageList(props: {
|
||||
// '&::-webkit-scrollbar': {
|
||||
// md: {
|
||||
// width: 8,
|
||||
// background: theme.palette.neutral.plainHoverBg,
|
||||
// background: theme.vars.palette.neutral.plainHoverBg,
|
||||
// },
|
||||
// },
|
||||
// '&::-webkit-scrollbar-thumb': {
|
||||
// background: theme.palette.neutral.solidBg,
|
||||
// background: theme.vars.palette.neutral.solidBg,
|
||||
// borderRadius: 6,
|
||||
// },
|
||||
// '&::-webkit-scrollbar-thumb:hover': {
|
||||
// background: theme.palette.neutral.solidHoverBg,
|
||||
// background: theme.vars.palette.neutral.solidHoverBg,
|
||||
// },
|
||||
// };
|
||||
|
||||
|
||||
// pass the diff text to most recent assistant message, once done
|
||||
const showTextTools = !!props.showTools || experimentalLabs;
|
||||
let diffMessage: DMessage | undefined;
|
||||
let diffText: string | undefined;
|
||||
if (diffing && showTextTools) {
|
||||
const [msgB, msgA] = filteredMessages.filter(m => m.role === 'assistant');
|
||||
if (!msgB.typing && msgB?.text && msgA?.text) {
|
||||
const textA = msgA.text, textB = msgB.text;
|
||||
const lenA = textA.length, lenB = textB.length;
|
||||
if (lenA > 80 && lenB > 80 && lenA > lenB / 2 && lenB > lenA / 2) {
|
||||
diffMessage = msgB;
|
||||
diffText = textA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return (
|
||||
<List sx={{
|
||||
p: 0, ...(props.sx || {}),
|
||||
@@ -175,35 +109,27 @@ export function ChatMessageList(props: {
|
||||
|
||||
{filteredMessages.map((message, idx) =>
|
||||
props.isMessageSelectionMode ? (
|
||||
|
||||
<CleanerMessage
|
||||
<ChatMessageSelectable
|
||||
key={'sel-' + message.id} message={message}
|
||||
isBottom={idx === 0} remainingTokens={(chatLLM ? chatLLM.contextTokens : 0) - historyTokenCount}
|
||||
isBottom={idx === 0}
|
||||
selected={selectedMessages.has(message.id)} onToggleSelected={handleToggleSelected}
|
||||
/>
|
||||
|
||||
) : (
|
||||
|
||||
<ChatMessage
|
||||
key={'msg-' + message.id} message={message} diffText={message === diffMessage ? diffText : undefined}
|
||||
key={'msg-' + message.id} message={message}
|
||||
isBottom={idx === 0}
|
||||
onMessageDelete={() => handleMessageDelete(message.id)}
|
||||
onMessageEdit={newText => handleMessageEdit(message.id, newText)}
|
||||
onMessageRunFrom={(offset: number) => handleRestartFromMessage(message.id, offset)}
|
||||
onImagine={handleImagineFromText}
|
||||
/>
|
||||
|
||||
onImagine={handleImagineFromText} />
|
||||
),
|
||||
)}
|
||||
|
||||
{showTextTools && <ToolsPanel showDiff={diffing} setShowDiff={setDiffing} />}
|
||||
|
||||
{/* Header at the bottom because of 'row-reverse' */}
|
||||
{props.isMessageSelectionMode && (
|
||||
<MessagesSelectionHeader
|
||||
hasSelected={selectedMessages.size > 0}
|
||||
isBottom={filteredMessages.length === 0}
|
||||
sumTokens={historyTokenCount}
|
||||
onClose={() => props.setIsMessageSelectionMode(false)}
|
||||
onSelectAll={handleSelectAllMessages}
|
||||
onDeleteMessages={handleDeleteSelectedMessages}
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, Button, Checkbox, Grid, IconButton, Input, Stack, Textarea, Typography } from '@mui/joy';
|
||||
import { Box, Button, Checkbox, Grid, IconButton, Input, Stack, Textarea, Typography, useTheme } from '@mui/joy';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import ScienceIcon from '@mui/icons-material/Science';
|
||||
import SearchIcon from '@mui/icons-material/Search';
|
||||
import TelegramIcon from '@mui/icons-material/Telegram';
|
||||
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { useChatStore } from '~/common/state/store-chats';
|
||||
import { useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
|
||||
import { SystemPurposeId, SystemPurposes } from '../../../../data';
|
||||
import { usePurposeStore } from './store-purposes';
|
||||
import { SystemPurposeId, SystemPurposes } from '../../../data';
|
||||
import { useChatStore } from '@/common/state/store-chats';
|
||||
import { usePurposeStore } from '@/common/state/store-purposes';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
|
||||
// Constants for tile sizes / grid width - breakpoints need to be computed here to work around
|
||||
@@ -27,25 +23,26 @@ const bpMaxWidth = Object.entries(bpTileSize).reduce((acc, [key, value], index)
|
||||
acc[key] = tileCols[index] * (value + 8 * tileSpacing) - 8 * tileSpacing;
|
||||
return acc;
|
||||
}, {} as Record<string, number>);
|
||||
const bpTileGap = { xs: 0.5, md: 1 };
|
||||
const bpTileGap = { xs: 2, md: 3 };
|
||||
|
||||
|
||||
// Add this utility function to get a random array element
|
||||
const getRandomElement = <T, >(array: T[]): T | undefined =>
|
||||
const getRandomElement = <T extends any>(array: T[]): T | undefined =>
|
||||
array.length > 0 ? array[Math.floor(Math.random() * array.length)] : undefined;
|
||||
|
||||
|
||||
/**
|
||||
* Purpose selector for the current chat. Clicking on any item activates it for the current chat.
|
||||
*/
|
||||
export function PersonaSelector(props: { conversationId: string, runExample: (example: string) => void }) {
|
||||
export function PurposeSelector(props: { conversationId: string, runExample: (example: string) => void }) {
|
||||
// state
|
||||
const [searchQuery, setSearchQuery] = React.useState('');
|
||||
const [filteredIDs, setFilteredIDs] = React.useState<SystemPurposeId[] | null>(null);
|
||||
const [editMode, setEditMode] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const showFinder = useUIPreferencesStore(state => state.showPurposeFinder);
|
||||
const theme = useTheme();
|
||||
const showPurposeFinder = useSettingsStore(state => state.showPurposeFinder);
|
||||
const { systemPurposeId, setSystemPurposeId } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
@@ -108,7 +105,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
|
||||
|
||||
// we show them all if the filter is clear (null)
|
||||
const unfilteredPurposeIDs = (filteredIDs && showFinder) ? filteredIDs : Object.keys(SystemPurposes);
|
||||
const unfilteredPurposeIDs = (filteredIDs && showPurposeFinder) ? filteredIDs : Object.keys(SystemPurposes);
|
||||
const purposeIDs = editMode ? unfilteredPurposeIDs : unfilteredPurposeIDs.filter(id => !hiddenPurposeIDs.includes(id));
|
||||
|
||||
const selectedPurpose = purposeIDs.length ? (SystemPurposes[systemPurposeId] ?? null) : null;
|
||||
@@ -116,7 +113,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
|
||||
return <>
|
||||
|
||||
{showFinder && <Box sx={{ p: 2 * tileSpacing }}>
|
||||
{showPurposeFinder && <Box sx={{ p: 2 * tileSpacing }}>
|
||||
<Input
|
||||
fullWidth
|
||||
variant='outlined' color='neutral'
|
||||
@@ -130,7 +127,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
</IconButton>
|
||||
)}
|
||||
sx={{
|
||||
boxShadow: 'sm',
|
||||
boxShadow: theme.vars.shadow.sm,
|
||||
}}
|
||||
/>
|
||||
</Box>}
|
||||
@@ -140,8 +137,8 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
<Box sx={{ maxWidth: bpMaxWidth }}>
|
||||
|
||||
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'baseline', justifyContent: 'space-between', gap: 2, mb: 1 }}>
|
||||
<Typography level='title-sm'>
|
||||
AI Persona
|
||||
<Typography level='body2' color='neutral'>
|
||||
Select an AI purpose
|
||||
</Typography>
|
||||
<Button variant='plain' color='neutral' size='sm' onClick={toggleEditMode}>
|
||||
{editMode ? 'Done' : 'Edit'}
|
||||
@@ -162,14 +159,14 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
height: bpTileSize,
|
||||
width: bpTileSize,
|
||||
...((editMode || systemPurposeId !== spId) ? {
|
||||
boxShadow: 'md',
|
||||
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { backgroundColor: 'background.surface' }),
|
||||
boxShadow: theme.vars.shadow.md,
|
||||
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { background: theme.vars.palette.background.level1 }),
|
||||
} : {}),
|
||||
}}
|
||||
>
|
||||
{editMode && (
|
||||
<Checkbox
|
||||
label={<Typography level='body-sm'>show</Typography>}
|
||||
label={<Typography level='body2'>show</Typography>}
|
||||
checked={!hiddenPurposeIDs.includes(spId)} onChange={() => toggleHiddenPurposeId(spId)}
|
||||
sx={{ alignSelf: 'flex-start' }}
|
||||
/>
|
||||
@@ -183,35 +180,10 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
</Button>
|
||||
</Grid>
|
||||
))}
|
||||
{/* Button to start the YouTube persona creator */}
|
||||
<Grid>
|
||||
<Button
|
||||
variant='soft' color='neutral'
|
||||
component={Link} noLinkStyle href='/personas'
|
||||
sx={{
|
||||
'--Icon-fontSize': '2rem',
|
||||
flexDirection: 'column',
|
||||
fontWeight: 500,
|
||||
// gap: bpTileGap,
|
||||
height: bpTileSize,
|
||||
width: bpTileSize,
|
||||
border: `1px dashed`,
|
||||
boxShadow: 'md',
|
||||
backgroundColor: 'background.surface',
|
||||
}}
|
||||
>
|
||||
<div>
|
||||
<ScienceIcon />
|
||||
</div>
|
||||
<div>
|
||||
YouTube persona creator
|
||||
</div>
|
||||
</Button>
|
||||
</Grid>
|
||||
</Grid>
|
||||
|
||||
<Typography
|
||||
level='body-sm'
|
||||
level='body2'
|
||||
sx={{
|
||||
mt: selectedExample ? 1 : 3,
|
||||
display: 'flex', alignItems: 'center', gap: 1,
|
||||
@@ -219,16 +191,16 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
'&:hover > button': { opacity: 1 },
|
||||
}}>
|
||||
{!selectedPurpose
|
||||
? 'Oops! No AI persona found for your search.'
|
||||
? 'Oops! No AI purposes found for your search.'
|
||||
: (selectedExample
|
||||
? <>
|
||||
Example: {selectedExample}
|
||||
<i>{selectedExample}</i>
|
||||
<IconButton
|
||||
variant='plain' color='primary' size='md'
|
||||
variant='plain' color='neutral' size='md'
|
||||
onClick={() => props.runExample(selectedExample)}
|
||||
sx={{ opacity: 0, transition: 'opacity 0.3s' }}
|
||||
>
|
||||
<TelegramIcon />
|
||||
💬
|
||||
</IconButton>
|
||||
</>
|
||||
: selectedPurpose.description
|
||||
@@ -241,10 +213,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
|
||||
minRows={3}
|
||||
defaultValue={SystemPurposes['Custom']?.systemMessage} onChange={handleCustomSystemMessageChange}
|
||||
sx={{
|
||||
backgroundColor: 'background.level1',
|
||||
'&:focus-within': {
|
||||
backgroundColor: 'background.popup',
|
||||
},
|
||||
background: theme.vars.palette.background.level1,
|
||||
lineHeight: 1.75,
|
||||
mt: 1,
|
||||
}} />
|
||||
@@ -0,0 +1,47 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Option, Select } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
|
||||
|
||||
|
||||
/**
|
||||
* A Select component that blends-in nicely (cleaner, easier to the eyes)
|
||||
*/
|
||||
export const AppBarDropdown = <TValue extends string>(props: { value: TValue, items: Record<string, { title: string }>, onChange: (event: any, value: TValue | null) => void, sx?: SxProps }) =>
|
||||
<Select
|
||||
variant='solid' color='neutral' size='md'
|
||||
value={props.value} onChange={props.onChange}
|
||||
indicator={<KeyboardArrowDownIcon />}
|
||||
slotProps={{
|
||||
root: {
|
||||
sx: {
|
||||
backgroundColor: 'transparent',
|
||||
},
|
||||
},
|
||||
listbox: {
|
||||
variant: 'plain', color: 'neutral', size: 'lg',
|
||||
disablePortal: false,
|
||||
sx: {
|
||||
minWidth: 160,
|
||||
},
|
||||
},
|
||||
indicator: {
|
||||
sx: {
|
||||
opacity: 0.5,
|
||||
},
|
||||
},
|
||||
}}
|
||||
sx={{
|
||||
mx: 0,
|
||||
/*fontFamily: theme.vars.fontFamily.code,*/
|
||||
fontWeight: 500,
|
||||
...(props.sx || {}),
|
||||
}}
|
||||
>
|
||||
{Object.keys(props.items).map((key: string) => (
|
||||
<Option key={key} value={key}>
|
||||
{props.items[key].title}
|
||||
</Option>
|
||||
))}
|
||||
</Select>;
|
||||
@@ -0,0 +1,31 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppBarDropdown } from './AppBarDropdown';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
|
||||
/**
|
||||
* Wrapper for AppBarDropdown that adds a symbol in front of the title
|
||||
*/
|
||||
type Props<TValue extends string> = {
|
||||
value: TValue;
|
||||
items: Record<string, { title: string, symbol: string }>;
|
||||
onChange: (event: any, value: TValue | null) => void;
|
||||
sx?: SxProps;
|
||||
};
|
||||
|
||||
export const AppBarDropdownWithSymbol = <TValue extends string>({ value, items, onChange, sx }: Props<TValue>) => {
|
||||
const itemsWithSymbol = Object.keys(items).map((key: string) => ({
|
||||
key,
|
||||
value: (!!items[key].symbol ? items[key].symbol + ' ' : '') + items[key].title,
|
||||
}));
|
||||
|
||||
return (
|
||||
<AppBarDropdown
|
||||
value={value}
|
||||
items={Object.fromEntries(itemsWithSymbol.map(({ key, value }) => [key, { title: value }]))}
|
||||
onChange={onChange}
|
||||
sx={sx}
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,356 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
import { signIn, signOut, useSession } from 'next-auth/react';
|
||||
|
||||
import { Badge, Box, Button, IconButton, ListDivider, ListItem, ListItemDecorator, Menu, MenuItem, Sheet, Stack, SvgIcon, Switch, Typography, useColorScheme, useTheme } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
|
||||
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import DarkModeIcon from '@mui/icons-material/DarkMode';
|
||||
import ExitToAppIcon from '@mui/icons-material/ExitToApp';
|
||||
import FileDownloadIcon from '@mui/icons-material/FileDownload';
|
||||
import GitHubIcon from '@mui/icons-material/GitHub';
|
||||
import LoginIcon from '@mui/icons-material/Login';
|
||||
import LogoutIcon from '@mui/icons-material/Logout';
|
||||
import MenuIcon from '@mui/icons-material/Menu';
|
||||
import MoreVertIcon from '@mui/icons-material/MoreVert';
|
||||
import SettingsOutlinedIcon from '@mui/icons-material/SettingsOutlined';
|
||||
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
|
||||
|
||||
import { buildTimeAuthEnabled } from '@/modules/authentication/auth.client';
|
||||
|
||||
import { Brand } from '@/common/brand';
|
||||
import { ChatModelId, ChatModels, SystemPurposeId, SystemPurposes } from '../../../../data';
|
||||
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
|
||||
import { Link } from '@/common/components/Link';
|
||||
import { cssRainbowColorKeyframes } from '@/common/theme';
|
||||
import { downloadConversationJson, restoreConversationFromJson, useChatStore } from '@/common/state/store-chats';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { AppBarDropdown } from './AppBarDropdown';
|
||||
import { AppBarDropdownWithSymbol } from './AppBarDropdownWithSymbol';
|
||||
import { ImportedModal, ImportedOutcome } from './ImportedModal';
|
||||
import { PagesMenu } from './PagesMenu';
|
||||
|
||||
|
||||
// missing from MUI, using Tabler for Discord
|
||||
function DiscordIcon(props: { sx?: SxProps }) {
|
||||
return <SvgIcon viewBox='0 0 24 24' width='24' height='24' stroke='currentColor' fill='none' stroke-linecap='round' stroke-linejoin='round' {...props}>
|
||||
<path stroke='none' d='M0 0h24v24H0z' fill='none'></path>
|
||||
<path d='M14.983 3l.123 .006c2.014 .214 3.527 .672 4.966 1.673a1 1 0 0 1 .371 .488c1.876 5.315 2.373 9.987 1.451 12.28c-1.003 2.005 -2.606 3.553 -4.394 3.553c-.94 0 -2.257 -1.596 -2.777 -2.969l-.02 .005c.838 -.131 1.69 -.323 2.572 -.574a1 1 0 1 0 -.55 -1.924c-3.32 .95 -6.13 .95 -9.45 0a1 1 0 0 0 -.55 1.924c.725 .207 1.431 .373 2.126 .499l.444 .074c-.477 1.37 -1.695 2.965 -2.627 2.965c-1.743 0 -3.276 -1.555 -4.267 -3.644c-.841 -2.206 -.369 -6.868 1.414 -12.174a1 1 0 0 1 .358 -.49c1.392 -1.016 2.807 -1.475 4.717 -1.685a1 1 0 0 1 .938 .435l.063 .107l.652 1.288l.16 -.019c.877 -.09 1.718 -.09 2.595 0l.158 .019l.65 -1.287a1 1 0 0 1 .754 -.54l.123 -.01zm-5.983 6a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15zm6 0a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15z' strokeWidth='0' fill='currentColor'></path>
|
||||
</SvgIcon>;
|
||||
}
|
||||
|
||||
function BringTheLove(props: { text: string, link: string, icon: JSX.Element }) {
|
||||
const [loved, setLoved] = React.useState(false);
|
||||
const icon = loved ? '❤️' : props.icon; // '❤️' : '🤍';
|
||||
return <Button
|
||||
color='neutral'
|
||||
component={Link} noLinkStyle href={props.link} target='_blank'
|
||||
onClick={() => setLoved(true)}
|
||||
endDecorator={icon}
|
||||
sx={{
|
||||
background: 'transparent',
|
||||
// '&:hover': { background: props.theme.palette.neutral.solidBg },
|
||||
'&:hover': { animation: `${cssRainbowColorKeyframes} 5s linear infinite` },
|
||||
}}>
|
||||
{props.text}
|
||||
</Button>;
|
||||
}
|
||||
|
||||
function SupportItem() {
|
||||
const theme = useTheme();
|
||||
const fadedColor = theme.palette.neutral.plainDisabledColor;
|
||||
const iconColor = '';
|
||||
return (
|
||||
<ListItem
|
||||
variant='solid' color='neutral'
|
||||
sx={{
|
||||
mb: -1, // absorb the bottom margin of the list
|
||||
mt: 1,
|
||||
// background: theme.palette.neutral.solidActiveBg,
|
||||
display: 'flex', flexDirection: 'row', gap: 1,
|
||||
justifyContent: 'space-between',
|
||||
}}>
|
||||
<Box
|
||||
sx={{
|
||||
mx: { xs: 1, sm: 2 },
|
||||
fontWeight: 600,
|
||||
color: fadedColor,
|
||||
}}>
|
||||
{Brand.Meta.SiteName}
|
||||
</Box>
|
||||
<BringTheLove text='Discord' icon={<DiscordIcon sx={{ color: iconColor }} />} link={Brand.URIs.SupportInvite} />
|
||||
<BringTheLove text='GitHub' icon={<GitHubIcon sx={{ color: iconColor }} />} link={Brand.URIs.OpenRepo} />
|
||||
</ListItem>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The top bar of the application, with the model and purpose selection, and menu/settings icons
|
||||
*/
|
||||
export function ApplicationBar(props: {
|
||||
conversationId: string | null;
|
||||
isMessageSelectionMode: boolean; setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void;
|
||||
onPublishConversation: (conversationId: string) => void;
|
||||
onShowSettings: () => void;
|
||||
sx?: SxProps
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [actionsMenuAnchor, setActionsMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [pagesMenuAnchor, setPagesMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
|
||||
const [conversationImportOutcome, setConversationImportOutcome] = React.useState<ImportedOutcome | null>(null);
|
||||
const conversationFileInputRef = React.useRef<HTMLInputElement>(null);
|
||||
|
||||
|
||||
// center buttons
|
||||
|
||||
const { data: authSession } = useSession();
|
||||
|
||||
const handleChatModelChange = (event: any, value: ChatModelId | null) =>
|
||||
value && props.conversationId && setChatModelId(props.conversationId, value);
|
||||
|
||||
const handleSystemPurposeChange = (event: any, value: SystemPurposeId | null) =>
|
||||
value && props.conversationId && setSystemPurposeId(props.conversationId, value);
|
||||
|
||||
|
||||
// quick actions
|
||||
|
||||
const closeActionsMenu = () => setActionsMenuAnchor(null);
|
||||
|
||||
const { mode: colorMode, setMode: setColorMode } = useColorScheme();
|
||||
|
||||
const { showSystemMessages, setShowSystemMessages, zenMode } = useSettingsStore(state => ({
|
||||
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
|
||||
zenMode: state.zenMode,
|
||||
}), shallow);
|
||||
|
||||
const handleDarkModeToggle = () => setColorMode(colorMode === 'dark' ? 'light' : 'dark');
|
||||
|
||||
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
|
||||
|
||||
const handleActionShowSettings = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
props.onShowSettings();
|
||||
closeActionsMenu();
|
||||
};
|
||||
|
||||
// conversation actions
|
||||
|
||||
const { conversationsCount, isConversationEmpty, chatModelId, systemPurposeId, setMessages, setChatModelId, setSystemPurposeId, setAutoTitle, importConversation } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
conversationsCount: state.conversations.length,
|
||||
isConversationEmpty: conversation ? !conversation.messages.length : true,
|
||||
chatModelId: conversation ? conversation.chatModelId : null,
|
||||
systemPurposeId: conversation ? conversation.systemPurposeId : null,
|
||||
setMessages: state.setMessages,
|
||||
setChatModelId: state.setChatModelId,
|
||||
setSystemPurposeId: state.setSystemPurposeId,
|
||||
setAutoTitle: state.setAutoTitle,
|
||||
importConversation: state.importConversation,
|
||||
};
|
||||
}, shallow);
|
||||
|
||||
const handleConversationPublish = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
props.conversationId && props.onPublishConversation(props.conversationId);
|
||||
};
|
||||
|
||||
const handleConversationDownload = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
const conversation = useChatStore.getState().conversations.find(conversation => conversation.id === props.conversationId);
|
||||
if (conversation)
|
||||
downloadConversationJson(conversation);
|
||||
};
|
||||
|
||||
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
closeActionsMenu();
|
||||
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
|
||||
};
|
||||
|
||||
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
setClearConfirmationId(props.conversationId);
|
||||
};
|
||||
|
||||
const handleConfirmedClearConversation = () => {
|
||||
if (clearConfirmationId) {
|
||||
setMessages(clearConfirmationId, []);
|
||||
setAutoTitle(clearConfirmationId, '');
|
||||
setClearConfirmationId(null);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// pages actions
|
||||
|
||||
const closePagesMenu = () => setPagesMenuAnchor(null);
|
||||
|
||||
const handleConversationUpload = () => conversationFileInputRef.current?.click();
|
||||
|
||||
const handleLoadConversations = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const files = e.target?.files;
|
||||
if (!files || files.length < 1)
|
||||
return;
|
||||
|
||||
// try to restore conversations from the selected files
|
||||
const outcomes: ImportedOutcome = { conversations: [] };
|
||||
for (const file of files) {
|
||||
const fileName = file.name || 'unknown file';
|
||||
try {
|
||||
const conversation = restoreConversationFromJson(await file.text());
|
||||
if (conversation) {
|
||||
importConversation(conversation);
|
||||
outcomes.conversations.push({ fileName, success: true, conversationId: conversation.id });
|
||||
} else {
|
||||
const fileDesc = `(${file.type}) ${file.size.toLocaleString()} bytes`;
|
||||
outcomes.conversations.push({ fileName, success: false, error: `Invalid file: ${fileDesc}` });
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
outcomes.conversations.push({ fileName, success: false, error: (error as any)?.message || error?.toString() || 'unknown error' });
|
||||
}
|
||||
}
|
||||
|
||||
// show the outcome of the import
|
||||
setConversationImportOutcome(outcomes);
|
||||
|
||||
// this is needed to allow the same file to be selected again
|
||||
e.target.value = '';
|
||||
};
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
{/* Top Bar with 2 icons and Model/Purpose selectors */}
|
||||
<Sheet
|
||||
variant='solid' color='neutral' invertedColors
|
||||
sx={{
|
||||
p: 1,
|
||||
display: 'flex', flexDirection: 'row', justifyContent: 'space-between',
|
||||
...(props.sx || {}),
|
||||
}}>
|
||||
|
||||
<IconButton variant='plain' onClick={event => setPagesMenuAnchor(event.currentTarget)}>
|
||||
<Badge variant='solid' size='sm' badgeContent={conversationsCount < 2 ? 0 : conversationsCount}>
|
||||
<MenuIcon />
|
||||
</Badge>
|
||||
</IconButton>
|
||||
|
||||
<Stack direction='row' sx={{ my: 'auto' }}>
|
||||
|
||||
{chatModelId && <AppBarDropdown items={ChatModels} value={chatModelId} onChange={handleChatModelChange} />}
|
||||
|
||||
{systemPurposeId && (zenMode === 'cleaner'
|
||||
? <AppBarDropdown items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
|
||||
: <AppBarDropdownWithSymbol items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
|
||||
)}
|
||||
|
||||
</Stack>
|
||||
<Stack direction='row'>
|
||||
{buildTimeAuthEnabled && (
|
||||
authSession?.user ? (
|
||||
<IconButton onClick={() => signOut()}>
|
||||
<LogoutIcon style={{ marginRight: '0.33em' }} />
|
||||
<Typography level='body3'>Sign out {authSession.user?.name ?? ''}</Typography>
|
||||
</IconButton>
|
||||
) : (
|
||||
<IconButton onClick={() => signIn()}>
|
||||
<LoginIcon style={{ marginRight: '0.33em' }} />
|
||||
<Typography>Sign in </Typography>
|
||||
</IconButton>
|
||||
)
|
||||
)}
|
||||
<IconButton variant='plain' onClick={event => setActionsMenuAnchor(event.currentTarget)}>
|
||||
<MoreVertIcon />
|
||||
</IconButton>
|
||||
</Stack>
|
||||
</Sheet>
|
||||
|
||||
|
||||
{/* Left menu content */}
|
||||
<PagesMenu
|
||||
conversationId={props.conversationId}
|
||||
pagesMenuAnchor={pagesMenuAnchor}
|
||||
onClose={closePagesMenu}
|
||||
onImportConversation={handleConversationUpload}
|
||||
/>
|
||||
|
||||
{/* Right menu content */}
|
||||
<Menu
|
||||
variant='plain' color='neutral' size='lg' placement='bottom-end' sx={{ minWidth: 280 }}
|
||||
open={!!actionsMenuAnchor} anchorEl={actionsMenuAnchor} onClose={closeActionsMenu}
|
||||
disablePortal={false}>
|
||||
|
||||
<MenuItem onClick={handleDarkModeToggle}>
|
||||
<ListItemDecorator><DarkModeIcon /></ListItemDecorator>
|
||||
Dark
|
||||
<Switch checked={colorMode === 'dark'} onChange={handleDarkModeToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleSystemMessagesToggle}>
|
||||
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
|
||||
System text
|
||||
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleActionShowSettings}>
|
||||
<ListItemDecorator><SettingsOutlinedIcon /></ListItemDecorator>
|
||||
Settings
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationPublish}>
|
||||
<ListItemDecorator>
|
||||
{/*<Badge size='sm' color='primary'>*/}
|
||||
<ExitToAppIcon />
|
||||
{/*</Badge>*/}
|
||||
</ListItemDecorator>
|
||||
Share via paste.gg
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationDownload}>
|
||||
<ListItemDecorator>
|
||||
<FileDownloadIcon />
|
||||
</ListItemDecorator>
|
||||
Export conversation
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleToggleMessageSelectionMode}>
|
||||
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
|
||||
Cleanup ...
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationClear}>
|
||||
<ListItemDecorator><ClearIcon /></ListItemDecorator>
|
||||
Clear conversation
|
||||
</MenuItem>
|
||||
|
||||
<SupportItem />
|
||||
</Menu>
|
||||
|
||||
|
||||
{/* Modals */}
|
||||
<ConfirmationModal
|
||||
open={!!clearConfirmationId} onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
|
||||
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
|
||||
/>
|
||||
|
||||
{!!conversationImportOutcome && (
|
||||
<ImportedModal open outcome={conversationImportOutcome} onClose={() => setConversationImportOutcome(null)} />
|
||||
)}
|
||||
|
||||
{/* Files */}
|
||||
<input type='file' multiple hidden accept='.json' ref={conversationFileInputRef} onChange={handleLoadConversations} />
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, Divider, List, ListItem, Modal, ModalDialog, Typography } from '@mui/joy';
|
||||
|
||||
|
||||
export interface ImportedOutcome {
|
||||
conversations: {
|
||||
fileName: string;
|
||||
success: boolean;
|
||||
conversationId?: string;
|
||||
error?: string;
|
||||
}[];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Displays the result of an import operation as a modal dialog.
|
||||
*
|
||||
* Import operations supported:
|
||||
* - JSON Chat
|
||||
*/
|
||||
export function ImportedModal(props: { open: boolean, outcome: ImportedOutcome, onClose: () => void, }) {
|
||||
const { conversations } = props.outcome;
|
||||
|
||||
const successes = conversations.filter(c => c.success);
|
||||
const failures = conversations.filter(c => !c.success);
|
||||
const hasAnyResults = successes.length > 0 || failures.length > 0;
|
||||
const hasAnyFailures = failures.length > 0;
|
||||
|
||||
return (
|
||||
<Modal open={props.open} onClose={props.onClose}>
|
||||
<ModalDialog variant='outlined' color='neutral' sx={{ maxWidth: '100vw' }}>
|
||||
|
||||
<Typography level='h5'>
|
||||
{hasAnyResults ? hasAnyFailures ? 'Import issues' : 'Import successful' : 'Import failed'}
|
||||
</Typography>
|
||||
|
||||
<Divider sx={{ my: 2 }} />
|
||||
|
||||
{successes.length >= 1 && <>
|
||||
<Typography>
|
||||
Imported {successes.length} conversation{successes.length === 1 ? '' : 's'}.
|
||||
</Typography>
|
||||
<Typography>
|
||||
{successes.length === 1 ? 'It' : 'They'} can be found in the Pages menu. Opening {successes.length === 1 ? 'it' : 'the last one'}.
|
||||
</Typography>
|
||||
</>}
|
||||
|
||||
{failures.length >= 1 && <>
|
||||
<Typography variant='soft' color='danger'>
|
||||
Issues importing {failures.length} conversation{failures.length === 1 ? '' : 's'}:
|
||||
</Typography>
|
||||
<List>
|
||||
{failures.map((f, idx) =>
|
||||
<ListItem color='warning' key={'fail-' + idx}>{f.fileName}: {f.error}</ListItem>,
|
||||
)}
|
||||
</List>
|
||||
</>}
|
||||
|
||||
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
|
||||
<Button variant='soft' color='neutral' onClick={props.onClose}>
|
||||
Close
|
||||
</Button>
|
||||
</Box>
|
||||
</ModalDialog>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, ListDivider, ListItemDecorator, Menu, MenuItem, Tooltip, Typography } from '@mui/joy';
|
||||
import AddIcon from '@mui/icons-material/Add';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import FileUploadIcon from '@mui/icons-material/FileUpload';
|
||||
|
||||
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
|
||||
import { MAX_CONVERSATIONS, useChatStore } from '@/common/state/store-chats';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { PagesMenuItem } from './PagesMenuItem';
|
||||
|
||||
|
||||
const SPECIAL_ID_ALL_CHATS = 'all-chats';
|
||||
|
||||
|
||||
/**
|
||||
* FIXME: use a proper Pages drawer instead of this menu
|
||||
*/
|
||||
export function PagesMenu(props: { conversationId: string | null, pagesMenuAnchor: HTMLElement | null, onClose: () => void, onImportConversation: () => void }) {
|
||||
// state
|
||||
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
|
||||
|
||||
// external state
|
||||
const conversationIDs = useChatStore(state => state.conversations.map(conversation => conversation.id), shallow);
|
||||
const { setActiveConversationId, createConversation, deleteConversation, newConversationId } = useChatStore(state => ({
|
||||
setActiveConversationId: state.setActiveConversationId,
|
||||
createConversation: state.createConversation,
|
||||
deleteConversation: state.deleteConversation,
|
||||
newConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
|
||||
}), shallow);
|
||||
const showSymbols = useSettingsStore(state => state.zenMode) !== 'cleaner';
|
||||
|
||||
|
||||
const hasChats = conversationIDs.length > 0;
|
||||
const singleChat = conversationIDs.length === 1;
|
||||
const maxReached = conversationIDs.length >= MAX_CONVERSATIONS;
|
||||
|
||||
|
||||
const handleNew = () => {
|
||||
// if the first in the stack is a new conversation, just activate it
|
||||
if (newConversationId)
|
||||
setActiveConversationId(newConversationId);
|
||||
else
|
||||
createConversation();
|
||||
props.onClose();
|
||||
};
|
||||
|
||||
const handleConversationActivate = (conversationId: string) => setActiveConversationId(conversationId);
|
||||
|
||||
const handleConversationDelete = (e: React.MouseEvent, conversationId: string) => {
|
||||
if (!singleChat) {
|
||||
e.stopPropagation();
|
||||
// NOTE: the old behavior was good, keeping it for reference - now we'll only ask for confirmation when deleting all chats
|
||||
// // if the chat is empty, just delete it
|
||||
// if (conversationId === newConversationId)
|
||||
// deleteConversation(conversationId);
|
||||
// // otherwise, ask for confirmation
|
||||
// else {
|
||||
// setActiveConversationId(conversationId);
|
||||
// setDeleteConfirmationId(conversationId);
|
||||
// }
|
||||
if (conversationId)
|
||||
deleteConversation(conversationId);
|
||||
}
|
||||
};
|
||||
|
||||
const handleConfirmedDeleteConversation = () => {
|
||||
if (hasChats && deleteConfirmationId) {
|
||||
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
|
||||
createConversation();
|
||||
conversationIDs.forEach(conversationId => deleteConversation(conversationId));
|
||||
} else
|
||||
deleteConversation(deleteConfirmationId);
|
||||
setDeleteConfirmationId(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteAll = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
|
||||
};
|
||||
|
||||
|
||||
const NewPrefix = maxReached && <Tooltip title={`Maximum limit: ${MAX_CONVERSATIONS} chats. Proceeding will remove the oldest chat.`}><Box sx={{ mr: 2 }}>⚠️</Box></Tooltip>;
|
||||
|
||||
return <>
|
||||
|
||||
<Menu
|
||||
variant='plain' color='neutral' size='lg' placement='bottom-start' sx={{ minWidth: 320 }}
|
||||
open={!!props.pagesMenuAnchor} anchorEl={props.pagesMenuAnchor} onClose={props.onClose}
|
||||
disablePortal={false}>
|
||||
|
||||
{/*<ListItem>*/}
|
||||
{/* <Typography level='body2'>*/}
|
||||
{/* Active chats*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>*/}
|
||||
|
||||
<MenuItem onClick={handleNew} disabled={!!newConversationId && newConversationId === props.conversationId}>
|
||||
<ListItemDecorator><AddIcon /></ListItemDecorator>
|
||||
{NewPrefix}New
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
{conversationIDs.map(conversationId =>
|
||||
<PagesMenuItem
|
||||
key={'c-id-' + conversationId}
|
||||
conversationId={conversationId}
|
||||
isActive={conversationId === props.conversationId}
|
||||
isSingle={singleChat}
|
||||
showSymbols={showSymbols}
|
||||
conversationActivate={handleConversationActivate}
|
||||
conversationDelete={handleConversationDelete}
|
||||
/>)}
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem onClick={props.onImportConversation}>
|
||||
<ListItemDecorator>
|
||||
<FileUploadIcon />
|
||||
</ListItemDecorator>
|
||||
Import conversation
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={!hasChats} onClick={handleDeleteAll}>
|
||||
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
|
||||
<Typography>
|
||||
Delete all
|
||||
</Typography>
|
||||
</MenuItem>
|
||||
|
||||
{/*<ListItem>*/}
|
||||
{/* <Typography level='body2'>*/}
|
||||
{/* Scratchpad*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>*/}
|
||||
{/*<MenuItem>*/}
|
||||
{/* <ListItemDecorator />*/}
|
||||
{/* <Typography sx={{ opacity: 0.5 }}>*/}
|
||||
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</MenuItem>*/}
|
||||
|
||||
</Menu>
|
||||
|
||||
{/* Confirmations */}
|
||||
<ConfirmationModal
|
||||
open={!!deleteConfirmationId} onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
|
||||
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
|
||||
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
|
||||
: 'Are you sure you want to delete this conversation?'}
|
||||
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
|
||||
? 'Yes, delete all'
|
||||
: 'Delete conversation'}
|
||||
/>
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -6,54 +6,45 @@ import { SxProps } from '@mui/joy/styles/types';
|
||||
import CloseIcon from '@mui/icons-material/Close';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
|
||||
import { InlineTextarea } from '@/common/components/InlineTextarea';
|
||||
import { SystemPurposes } from '../../../../data';
|
||||
|
||||
import { InlineTextarea } from '~/common/components/InlineTextarea';
|
||||
import { conversationTitle, useChatStore } from '~/common/state/store-chats';
|
||||
import { useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
import { conversationTitle, useChatStore } from '@/common/state/store-chats';
|
||||
|
||||
|
||||
const DEBUG_CONVERSATION_IDs = false;
|
||||
|
||||
|
||||
export function ConversationItem(props: {
|
||||
export function PagesMenuItem(props: {
|
||||
conversationId: string,
|
||||
isActive: boolean, isSingle: boolean, showSymbols: boolean, maxChatMessages: number,
|
||||
conversationActivate: (conversationId: string, closeMenu: boolean) => void,
|
||||
conversationDelete: (conversationId: string) => void,
|
||||
isActive: boolean, isSingle: boolean, showSymbols: boolean,
|
||||
conversationActivate: (conversationId: string) => void,
|
||||
conversationDelete: (e: React.MouseEvent, conversationId: string) => void,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [isEditingTitle, setIsEditingTitle] = React.useState(false);
|
||||
const [deleteArmed, setDeleteArmed] = React.useState(false);
|
||||
const doubleClickToEdit = useUIPreferencesStore(state => state.doubleClickToEdit);
|
||||
|
||||
// bind to conversation
|
||||
const cState = useChatStore(state => {
|
||||
const conversation = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return conversation && {
|
||||
isNew: conversation.messages.length === 0,
|
||||
messageCount: conversation.messages.length,
|
||||
assistantTyping: !!conversation.abortController,
|
||||
systemPurposeId: conversation.systemPurposeId,
|
||||
title: conversationTitle(conversation, 'new conversation'),
|
||||
title: conversationTitle(conversation),
|
||||
setUserTitle: state.setUserTitle,
|
||||
};
|
||||
}, shallow);
|
||||
|
||||
// auto-close the arming menu when clicking away
|
||||
// NOTE: there currently is a bug (race condition) where the menu closes on a new item right after opening
|
||||
// because the isActive prop is not yet updated
|
||||
// auto-close the menu when clicking away
|
||||
React.useEffect(() => {
|
||||
if (deleteArmed && !props.isActive)
|
||||
setDeleteArmed(false);
|
||||
}, [deleteArmed, props.isActive]);
|
||||
|
||||
// sanity check: shouldn't happen, but just in case
|
||||
if (!cState) return null;
|
||||
const { isNew, messageCount, assistantTyping, setUserTitle, systemPurposeId, title } = cState;
|
||||
|
||||
const handleActivate = () => props.conversationActivate(props.conversationId, true);
|
||||
if (!conversation) return null;
|
||||
|
||||
const handleEditBegin = () => setIsEditingTitle(true);
|
||||
|
||||
@@ -62,50 +53,33 @@ export function ConversationItem(props: {
|
||||
setUserTitle(props.conversationId, text);
|
||||
};
|
||||
|
||||
const handleDeleteBegin = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
if (!props.isActive)
|
||||
props.conversationActivate(props.conversationId, false);
|
||||
else
|
||||
setDeleteArmed(true);
|
||||
};
|
||||
const handleDeleteBegin = () => setDeleteArmed(true);
|
||||
|
||||
const handleDeleteConfirm = (e: React.MouseEvent) => {
|
||||
if (deleteArmed) {
|
||||
setDeleteArmed(false);
|
||||
e.stopPropagation();
|
||||
props.conversationDelete(props.conversationId);
|
||||
props.conversationDelete(e, props.conversationId);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteCancel = () => setDeleteArmed(false);
|
||||
|
||||
|
||||
const { assistantTyping, setUserTitle, systemPurposeId, title } = conversation;
|
||||
const textSymbol = SystemPurposes[systemPurposeId]?.symbol || '❓';
|
||||
const buttonSx: SxProps = { ml: 1, ...(props.isActive ? { color: 'white' } : {}) };
|
||||
|
||||
const progress = props.maxChatMessages ? 100 * messageCount / props.maxChatMessages : 0;
|
||||
|
||||
return (
|
||||
<MenuItem
|
||||
variant={props.isActive ? 'solid' : 'plain'} color='neutral'
|
||||
selected={props.isActive}
|
||||
onClick={handleActivate}
|
||||
onClick={() => props.conversationActivate(props.conversationId)}
|
||||
sx={{
|
||||
// py: 0,
|
||||
position: 'relative',
|
||||
border: 'none', // note, there's a default border of 1px and invisible.. hmm
|
||||
'&:hover > button': { opacity: 1 },
|
||||
}}
|
||||
>
|
||||
|
||||
{/* Optional prgoress bar */}
|
||||
{progress > 0 && (
|
||||
<Box sx={{
|
||||
backgroundColor: 'neutral.softActiveBg',
|
||||
position: 'absolute', left: 0, bottom: 0, width: progress + '%', height: 4,
|
||||
}} />
|
||||
)}
|
||||
|
||||
{/* Icon */}
|
||||
{props.showSymbols && <ListItemDecorator>
|
||||
{assistantTyping
|
||||
@@ -116,12 +90,12 @@ export function ConversationItem(props: {
|
||||
sx={{
|
||||
width: 24,
|
||||
height: 24,
|
||||
borderRadius: 'var(--joy-radius-sm)',
|
||||
borderRadius: 8,
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<Typography sx={{ fontSize: '18px' }}>
|
||||
{isNew ? '' : textSymbol}
|
||||
{conversation.isNew ? '' : textSymbol}
|
||||
</Typography>
|
||||
)}
|
||||
</ListItemDecorator>}
|
||||
@@ -129,7 +103,7 @@ export function ConversationItem(props: {
|
||||
{/* Text */}
|
||||
{!isEditingTitle ? (
|
||||
|
||||
<Box onDoubleClick={() => doubleClickToEdit ? handleEditBegin() : null} sx={{ flexGrow: 1 }}>
|
||||
<Box onDoubleClick={handleEditBegin} sx={{ flexGrow: 1 }}>
|
||||
{DEBUG_CONVERSATION_IDs ? props.conversationId.slice(0, 10) : title}{assistantTyping && '...'}
|
||||
</Box>
|
||||
|
||||
@@ -139,7 +113,6 @@ export function ConversationItem(props: {
|
||||
|
||||
)}
|
||||
|
||||
{/* // TODO: Commented code */}
|
||||
{/* Edit */}
|
||||
{/*<IconButton*/}
|
||||
{/* variant='plain' color='neutral'*/}
|
||||
@@ -153,7 +126,7 @@ export function ConversationItem(props: {
|
||||
{/* Delete Arming */}
|
||||
{!props.isSingle && !deleteArmed && (
|
||||
<IconButton
|
||||
variant={props.isActive ? 'solid' : 'outlined'} color='neutral'
|
||||
variant='outlined' color='neutral'
|
||||
size='sm' sx={{ opacity: { xs: 1, sm: 0 }, transition: 'opacity 0.3s', ...buttonSx }}
|
||||
onClick={handleDeleteBegin}>
|
||||
<DeleteOutlineIcon />
|
||||
@@ -165,11 +138,10 @@ export function ConversationItem(props: {
|
||||
<IconButton size='sm' variant='solid' color='danger' sx={buttonSx} onClick={handleDeleteConfirm}>
|
||||
<DeleteOutlineIcon />
|
||||
</IconButton>
|
||||
<IconButton size='sm' variant='solid' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
|
||||
<IconButton size='sm' variant='plain' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
|
||||
<CloseIcon />
|
||||
</IconButton>
|
||||
</>}
|
||||
</MenuItem>
|
||||
|
||||
);
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, ListDivider, ListItemDecorator, MenuItem, Typography } from '@mui/joy';
|
||||
import AddIcon from '@mui/icons-material/Add';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import FileUploadIcon from '@mui/icons-material/FileUpload';
|
||||
|
||||
import { OpenAIIcon } from '~/common/components/icons/OpenAIIcon';
|
||||
import { closeLayoutDrawer } from '~/common/layout/store-applayout';
|
||||
import { useChatStore } from '~/common/state/store-chats';
|
||||
import { useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
|
||||
import { ConversationItem } from './ConversationItem';
|
||||
|
||||
|
||||
type ListGrouping = 'off' | 'persona';
|
||||
|
||||
export function ChatDrawerItems(props: {
|
||||
conversationId: string | null
|
||||
onDeleteAllConversations: () => void,
|
||||
onImportConversation: () => void,
|
||||
}) {
|
||||
|
||||
// local state
|
||||
const [grouping] = React.useState<ListGrouping>('off');
|
||||
|
||||
// external state
|
||||
const { conversationIDs, topNewConversationId, maxChatMessages, setActiveConversationId, createConversation, deleteConversation } = useChatStore(state => ({
|
||||
conversationIDs: state.conversations.map(conversation => conversation.id),
|
||||
topNewConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
|
||||
maxChatMessages: state.conversations.reduce((longest, conversation) => Math.max(longest, conversation.messages.length), 0),
|
||||
setActiveConversationId: state.setActiveConversationId,
|
||||
createConversation: state.createConversation,
|
||||
deleteConversation: state.deleteConversation,
|
||||
}), shallow);
|
||||
const { experimentalLabs, showSymbols } = useUIPreferencesStore(state => ({
|
||||
experimentalLabs: state.experimentalLabs,
|
||||
showSymbols: state.zenMode !== 'cleaner',
|
||||
}), shallow);
|
||||
|
||||
|
||||
const totalConversations = conversationIDs.length;
|
||||
const hasChats = totalConversations > 0;
|
||||
const singleChat = totalConversations === 1;
|
||||
const softMaxReached = totalConversations >= 50;
|
||||
|
||||
const handleNew = () => {
|
||||
// if the first in the stack is a new conversation, just activate it
|
||||
if (topNewConversationId)
|
||||
setActiveConversationId(topNewConversationId);
|
||||
else
|
||||
createConversation();
|
||||
closeLayoutDrawer();
|
||||
};
|
||||
|
||||
const handleConversationActivate = React.useCallback((conversationId: string, closeMenu: boolean) => {
|
||||
setActiveConversationId(conversationId);
|
||||
if (closeMenu)
|
||||
closeLayoutDrawer();
|
||||
}, [setActiveConversationId]);
|
||||
|
||||
const handleConversationDelete = React.useCallback((conversationId: string) => {
|
||||
if (!singleChat && conversationId)
|
||||
deleteConversation(conversationId);
|
||||
}, [deleteConversation, singleChat]);
|
||||
|
||||
// grouping
|
||||
let sortedIds = conversationIDs;
|
||||
if (grouping === 'persona') {
|
||||
const conversations = useChatStore.getState().conversations;
|
||||
|
||||
// group conversations by persona
|
||||
const groupedConversations: { [personaId: string]: string[] } = {};
|
||||
conversations.forEach(conversation => {
|
||||
const persona = conversation.systemPurposeId;
|
||||
if (persona) {
|
||||
if (!groupedConversations[persona])
|
||||
groupedConversations[persona] = [];
|
||||
groupedConversations[persona].push(conversation.id);
|
||||
}
|
||||
});
|
||||
|
||||
// flatten grouped conversations
|
||||
sortedIds = Object.values(groupedConversations).flat();
|
||||
}
|
||||
|
||||
return <>
|
||||
|
||||
{/*<ListItem>*/}
|
||||
{/* <Typography level='body-sm'>*/}
|
||||
{/* Active chats*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>*/}
|
||||
|
||||
<MenuItem disabled={!!topNewConversationId && topNewConversationId === props.conversationId} onClick={handleNew}>
|
||||
<ListItemDecorator><AddIcon /></ListItemDecorator>
|
||||
New
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider sx={{ mb: 0 }} />
|
||||
|
||||
<Box sx={{ flex: 1, overflowY: 'auto' }}>
|
||||
{/*<ListItem sticky sx={{ justifyContent: 'space-between', boxShadow: 'sm' }}>*/}
|
||||
{/* <Typography level='body-sm'>*/}
|
||||
{/* Conversations*/}
|
||||
{/* </Typography>*/}
|
||||
{/* <ToggleButtonGroup variant='soft' size='sm' value={grouping} onChange={(_event, newValue) => newValue && setGrouping(newValue)}>*/}
|
||||
{/* <IconButton value='off'>*/}
|
||||
{/* <AccessTimeIcon />*/}
|
||||
{/* </IconButton>*/}
|
||||
{/* <IconButton value='persona'>*/}
|
||||
{/* <PersonIcon />*/}
|
||||
{/* </IconButton>*/}
|
||||
{/* </ToggleButtonGroup>*/}
|
||||
{/*</ListItem>*/}
|
||||
|
||||
{sortedIds.map(conversationId =>
|
||||
<ConversationItem
|
||||
key={'c-id-' + conversationId}
|
||||
conversationId={conversationId}
|
||||
isActive={conversationId === props.conversationId}
|
||||
isSingle={singleChat}
|
||||
showSymbols={showSymbols}
|
||||
maxChatMessages={(experimentalLabs || softMaxReached) ? maxChatMessages : 0}
|
||||
conversationActivate={handleConversationActivate}
|
||||
conversationDelete={handleConversationDelete}
|
||||
/>)}
|
||||
</Box>
|
||||
|
||||
<ListDivider sx={{ mt: 0 }} />
|
||||
|
||||
<MenuItem onClick={props.onImportConversation}>
|
||||
<ListItemDecorator>
|
||||
<FileUploadIcon />
|
||||
</ListItemDecorator>
|
||||
Import chats
|
||||
<OpenAIIcon sx={{ fontSize: 'xl', ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={!hasChats} onClick={props.onDeleteAllConversations}>
|
||||
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
|
||||
<Typography>
|
||||
Delete {totalConversations >= 2 ? `all ${totalConversations} chats` : 'chat'}
|
||||
</Typography>
|
||||
</MenuItem>
|
||||
|
||||
{/*<ListItem>*/}
|
||||
{/* <Typography level='body-sm'>*/}
|
||||
{/* Scratchpad*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>*/}
|
||||
{/*<MenuItem>*/}
|
||||
{/* <ListItemDecorator />*/}
|
||||
{/* <Typography sx={{ opacity: 0.5 }}>*/}
|
||||
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</MenuItem>*/}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { useChatLLMDropdown } from './useLLMDropdown';
|
||||
import { usePersonaIdDropdown } from './usePersonaDropdown';
|
||||
|
||||
|
||||
export function ChatDropdowns(props: {
|
||||
conversationId: string | null
|
||||
}) {
|
||||
|
||||
// state
|
||||
const { chatLLMDropdown } = useChatLLMDropdown();
|
||||
const { personaDropdown } = usePersonaIdDropdown(props.conversationId);
|
||||
|
||||
return <>
|
||||
|
||||
{/* Model selector */}
|
||||
{chatLLMDropdown}
|
||||
|
||||
{/* Persona selector */}
|
||||
{personaDropdown}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Badge, ListDivider, ListItemDecorator, MenuItem, Switch } from '@mui/joy';
|
||||
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
|
||||
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import CompressIcon from '@mui/icons-material/Compress';
|
||||
import FileDownloadIcon from '@mui/icons-material/FileDownload';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
|
||||
|
||||
import { closeLayoutMenu } from '~/common/layout/store-applayout';
|
||||
import { useUICounter, useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
|
||||
|
||||
export function ChatMenuItems(props: {
|
||||
conversationId: string | null, isConversationEmpty: boolean, hasConversations: boolean,
|
||||
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
onClearConversation: (conversationId: string) => void,
|
||||
onDuplicateConversation: (conversationId: string) => void,
|
||||
onExportConversation: (conversationId: string | null) => void,
|
||||
onFlattenConversation: (conversationId: string) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { novel: shareBadge, touch: shareTouch } = useUICounter('export-share');
|
||||
const { showSystemMessages, setShowSystemMessages } = useUIPreferencesStore(state => ({
|
||||
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
|
||||
}), shallow);
|
||||
|
||||
// derived state
|
||||
const disabled = !props.conversationId || props.isConversationEmpty;
|
||||
|
||||
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
|
||||
|
||||
const handleConversationExport = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
closeLayoutMenu();
|
||||
props.onExportConversation(!disabled ? props.conversationId : null);
|
||||
shareTouch();
|
||||
};
|
||||
|
||||
const handleConversationDuplicate = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
closeLayoutMenu();
|
||||
props.conversationId && props.onDuplicateConversation(props.conversationId);
|
||||
};
|
||||
|
||||
const handleConversationFlatten = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
closeLayoutMenu();
|
||||
props.conversationId && props.onFlattenConversation(props.conversationId);
|
||||
};
|
||||
|
||||
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
closeLayoutMenu();
|
||||
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
|
||||
};
|
||||
|
||||
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
|
||||
e.stopPropagation();
|
||||
props.conversationId && props.onClearConversation(props.conversationId);
|
||||
};
|
||||
|
||||
return <>
|
||||
|
||||
{/*<ListItem>*/}
|
||||
{/* <Typography level='body-sm'>*/}
|
||||
{/* Conversation*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>*/}
|
||||
|
||||
<MenuItem onClick={handleSystemMessagesToggle}>
|
||||
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
|
||||
System message
|
||||
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider inset='startContent' />
|
||||
|
||||
<MenuItem disabled={disabled} onClick={handleConversationDuplicate}>
|
||||
<ListItemDecorator>
|
||||
{/*<Badge size='sm' color='success'>*/}
|
||||
<ForkRightIcon color='success' />
|
||||
{/*</Badge>*/}
|
||||
</ListItemDecorator>
|
||||
Duplicate
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={disabled} onClick={handleConversationFlatten}>
|
||||
<ListItemDecorator>
|
||||
{/*<Badge size='sm' color='success'>*/}
|
||||
<CompressIcon color='success' />
|
||||
{/*</Badge>*/}
|
||||
</ListItemDecorator>
|
||||
Flatten
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider inset='startContent' />
|
||||
|
||||
<MenuItem disabled={disabled} onClick={handleToggleMessageSelectionMode}>
|
||||
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
|
||||
<span style={props.isMessageSelectionMode ? { fontWeight: 800 } : {}}>
|
||||
Cleanup ...
|
||||
</span>
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={!props.hasConversations} onClick={handleConversationExport}>
|
||||
<ListItemDecorator>
|
||||
<Badge color='danger' invisible={!shareBadge || !props.hasConversations}>
|
||||
<FileDownloadIcon />
|
||||
</Badge>
|
||||
</ListItemDecorator>
|
||||
Share / Export ...
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem disabled={disabled} onClick={handleConversationClear}>
|
||||
<ListItemDecorator><ClearIcon /></ListItemDecorator>
|
||||
Reset
|
||||
</MenuItem>
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, ListItemButton, ListItemDecorator } from '@mui/joy';
|
||||
import BuildCircleIcon from '@mui/icons-material/BuildCircle';
|
||||
import SettingsIcon from '@mui/icons-material/Settings';
|
||||
|
||||
import { DLLM, DLLMId, DModelSourceId, useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { AppBarDropdown, DropdownItems } from '~/common/layout/AppBarDropdown';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
import { hideOnMobile } from '~/common/theme';
|
||||
import { openLayoutLLMOptions, openLayoutModelsSetup } from '~/common/layout/store-applayout';
|
||||
|
||||
|
||||
function AppBarLLMDropdown(props: {
|
||||
llms: DLLM[],
|
||||
llmId: DLLMId | null,
|
||||
setLlmId: (llmId: DLLMId | null) => void,
|
||||
placeholder?: string,
|
||||
}) {
|
||||
|
||||
// build model menu items, filtering-out hidden models, and add Source separators
|
||||
const llmItems: DropdownItems = {};
|
||||
let prevSourceId: DModelSourceId | null = null;
|
||||
for (const llm of props.llms) {
|
||||
if (!llm.hidden || llm.id === props.llmId) {
|
||||
if (!prevSourceId || llm.sId !== prevSourceId) {
|
||||
if (prevSourceId)
|
||||
llmItems[`sep-${llm.id}`] = { type: 'separator', title: llm.sId };
|
||||
prevSourceId = llm.sId;
|
||||
}
|
||||
llmItems[llm.id] = { title: llm.label };
|
||||
}
|
||||
}
|
||||
|
||||
const handleChatLLMChange = (_event: any, value: DLLMId | null) => value && props.setLlmId(value);
|
||||
|
||||
const handleOpenLLMOptions = () => props.llmId && openLayoutLLMOptions(props.llmId);
|
||||
|
||||
|
||||
return (
|
||||
<AppBarDropdown
|
||||
items={llmItems}
|
||||
value={props.llmId} onChange={handleChatLLMChange}
|
||||
placeholder={props.placeholder || 'Models …'}
|
||||
appendOption={<>
|
||||
|
||||
{props.llmId && (
|
||||
<ListItemButton key='menu-opt' onClick={handleOpenLLMOptions}>
|
||||
<ListItemDecorator><SettingsIcon color='success' /></ListItemDecorator>
|
||||
Options
|
||||
</ListItemButton>
|
||||
)}
|
||||
|
||||
<ListItemButton key='menu-llms' onClick={openLayoutModelsSetup}>
|
||||
<ListItemDecorator><BuildCircleIcon color='success' /></ListItemDecorator>
|
||||
<Box sx={{ flexGrow: 1, display: 'flex', justifyContent: 'space-between', gap: 1 }}>
|
||||
Models
|
||||
<KeyStroke light combo='Ctrl + Shift + M' sx={hideOnMobile} />
|
||||
</Box>
|
||||
</ListItemButton>
|
||||
|
||||
</>}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function useChatLLMDropdown() {
|
||||
// external state
|
||||
const { llms, chatLLMId, setChatLLMId } = useModelsStore(state => ({
|
||||
llms: state.llms,
|
||||
chatLLMId: state.chatLLMId,
|
||||
setChatLLMId: state.setChatLLMId,
|
||||
}), shallow);
|
||||
|
||||
const chatLLMDropdown = React.useMemo(
|
||||
() => <AppBarLLMDropdown llms={llms} llmId={chatLLMId} setLlmId={setChatLLMId} />,
|
||||
[llms, chatLLMId, setChatLLMId],
|
||||
);
|
||||
|
||||
return { chatLLMId, chatLLMDropdown };
|
||||
}
|
||||
|
||||
/*export function useTempLLMDropdown(props: { initialLlmId: DLLMId | null }) {
|
||||
// local state
|
||||
const [llmId, setLlmId] = React.useState<DLLMId | null>(props.initialLlmId);
|
||||
|
||||
// external state
|
||||
const llms = useModelsStore(state => state.llms, shallow);
|
||||
|
||||
const chatLLMDropdown = React.useMemo(
|
||||
() => <AppBarLLMDropdown llms={llms} llmId={llmId} setLlmId={setLlmId} />,
|
||||
[llms, llmId, setLlmId],
|
||||
);
|
||||
|
||||
return { llmId, chatLLMDropdown };
|
||||
}*/
|
||||