Compare commits

..

4 Commits

Author SHA1 Message Date
Enrico Ros 3a9a6b0273 Merge branch 'jondwillis-feature/auth' into jondwillis-feature/auth-merged 2023-05-01 23:44:07 -07:00
Enrico Ros 3b51c39fc3 Small bits 2023-05-01 22:13:34 -07:00
Enrico Ros 05293ba557 Merge branch 'feature/auth' of https://github.com/jondwillis/nextjs-chatgpt-app into jondwillis-feature/auth 2023-05-01 22:13:02 -07:00
jon d18d5323aa auth squash and rebase 2023-04-11 13:25:39 -07:00
210 changed files with 6175 additions and 12551 deletions
+15 -10
View File
@@ -1,16 +1,9 @@
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
OPENAI_API_KEY=
# [Optional] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
OPENAI_API_ORG_ID=
# [Optional] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
OPENAI_API_HOST=
# [Optional, Helicone] Helicone API key: https://www.helicone.ai/keys
HELICONE_API_KEY=
# [Optional] Anthropic credentials for the server-side
ANTHROPIC_API_KEY=
ANTHROPIC_API_HOST=
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
OPENAI_API_ORG_ID=
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
ELEVENLABS_API_KEY=
@@ -26,3 +19,15 @@ GOOGLE_CLOUD_API_KEY=
# [Optional, Search] Google Custom/Programmable Search Engine ID
# https://programmablesearchengine.google.com/
GOOGLE_CSE_ID=
# see docs/authentication.md to configure this section
AUTH_TYPE=
# [At least one required if AUTH_TYPE == credential] You may declare credentials for users from 0 to 99.
AUTH_USER_0=
AUTH_PASSWORD_0=
# [Required if AUTH_TYPE == basic and not in development mode] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=
# [Required if AUTH_TYPE == basic] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=
+1 -1
View File
@@ -26,7 +26,7 @@ yarn-error.log*
.pnpm-debug.log*
# local env files
.env
.env*.local
# vercel
.vercel
+1 -2
View File
@@ -1,7 +1,6 @@
{
"singleAttributePerLine": false,
"singleQuote": true,
"trailingComma": "all",
"endOfLine": "lf",
"printWidth": 160
}
}
-1
View File
@@ -34,7 +34,6 @@ WORKDIR /usr/src/app
# Include only the release build and production packages.
COPY --from=build-target /usr/src/app/node_modules node_modules
COPY --from=build-target /usr/src/app/.next .next
COPY --from=build-target /usr/src/app/public public
# Expose port 3000 for the application to listen on
EXPOSE 3000
+22 -58
View File
@@ -1,9 +1,9 @@
# `BIG-AGI` 🤖💬
Welcome to `big-AGI` 👋 your personal AGI application
powered by OpenAI GPT-4 and beyond. Designed for smart humans and super-heroes,
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
data Rendering, AGI functions, chats and much more. Comes with plenty of `#big-AGI-energy` 🚀
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=demo)](https://big-agi.com)
@@ -17,20 +17,21 @@ Or fork & run on Vercel
- Engaging AI Personas
- Clean UX, w/ tokens counters
- Private: user-owned API keys and localStorage, self-hostable if you like
- Privacy: user-owned API keys and localStorage
- Human I/O: Advanced voice support (TTS, STT)
- Machine I/O: PDF import & Summarization, code execution
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
- Coming up: automatic-AGI reasoning (Reason+Act) and more
- Coming up: automatic-AGI reasoning
## Support 🙌
[//]: # ([![Official Discord](https://img.shields.io/discord/1098796266906980422?label=discord&logo=discord&logoColor=%23fff&style=for-the-badge)](https://discord.gg/MkH4qj2Jp9))
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
energy!
* send PRs! ...
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
@@ -39,35 +40,15 @@ Or fork & run on Vercel
<br/>
## Latest Drops 💧🎁
## Latest Drops 🚀
#### 🚨 July/Aug: Back with the Cool features 🧠
#### 🚨 May: mature #big-agi-energy
- 🎉 **Camera OCR** - real-world AI - take a picture of a text, and chat with it
- 🎉 **Backup/Restore** - save chats, and restore them later
- 🎉 **[Local model support with Oobabooga server](docs/local-llm-text-web-ui.md)** - run your own LLMs!
- 🎉 **Flatten conversations** - conversations summarizer with 4 modes
- 🎉 **Fork conversations** - create a new chat, to expriment with different endings
- 🎉 New commands: /s to add a System message, and /a for an Assistant message
- 🎉 New Chat modes: Write-only - just appends the message, without assistant response
- 🎉 Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
- 🎉 Fixes on the HTML block - particularly useful to see error pages
- 🎉 **Authentication** basic user authentication framework
#### June: scale UP 🚀
#### April: #big-agi-energy grows
- 🎉 **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
- 🎉 **Cleaner UI** - with rationalized Settings, Modals, and Configurators
- 🎉 **Dynamic Models Configurator** - easy connection with different model vendors
- 🎉 **Multiple Model Vendors Support** framework to support many LLM vendors
- 🎉 **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
- 🎉 Support for GPT-4-32k
- 🎉 Improved Dialogs and Messages
- 🎉 Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
#### April / May: more #big-agi-energy
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
Search
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
@@ -91,8 +72,8 @@ Or fork & run on Vercel
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- 🎉 **Syntax highlighting** - for multiple languages 🌈
- 🎉 **Code Execution: Sandpack** -
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- 🎉 **Code Execution: Sandpack
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
- 🎉 Real-time streaming of AI responses ⚡
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
@@ -102,6 +83,10 @@ Or fork & run on Vercel
<br/>
### Basic Authentication for public deployments 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API key (`OPENAI_API_KEY`), you can [set up basic authentication.](/docs/authentication.md).
## Why this? 💡
Because the official Chat ___lacks important features___, is ___more limited than the api___, at times
@@ -112,7 +97,7 @@ with features that matter to them.
![Much features, so fun](docs/pixels/big-AGI-compo2b.png)
## Develop 🧩
## Code 🧩
![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=&logo=typescript&logoColor=white)
![React](https://img.shields.io/badge/React-61DAFB?style=&logo=react&logoColor=black)
@@ -136,27 +121,6 @@ Now the app should be running on `http://localhost:3000`
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
## Deploy with Docker 🐳
Specific docker information on [docs/deploy-docker.md](docs/deploy-docker.md). In short:
#### Pre-built image
Add your OpenAI API key to the `.env` file, then in a terminal run:
```bash
docker-compose up
```
#### Locally built image
If you wish to build the image yourself, run
```bash
docker build -t big-agi .
docker run --detach 'big-agi'
```
<br/>
This project is licensed under the MIT License.
@@ -168,4 +132,4 @@ This project is licensed under the MIT License.
[//]: # ([![GitHub issues]&#40;https://img.shields.io/github/issues/enricoros/big-agi&#41;]&#40;https://github.com/enricoros/big-agi/issues&#41;)
Made with 💙
Made with 💙
-10
View File
@@ -1,10 +0,0 @@
version: '3.9'
services:
big-agi:
image: ghcr.io/enricoros/big-agi:main
ports:
- "3000:3000"
env_file:
- .env
command: [ "next", "start", "-p", "3000" ]
+37
View File
@@ -0,0 +1,37 @@
### Authentication with NextAuth.js 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API
key (`OPENAI_API_KEY`), you can set up basic authentication using [NextAuth.js](https://next-auth.js.org/).
#### Configuration
Update your `.env` file or Environment Variables with the following variables:
```
# [Optional] Set the authentication type to "credential" to enable basic username/password authentication
AUTH_TYPE=credential
# [Required if AUTH_TYPE == credential] Define credentials for users - you can declare up to 100 users
AUTH_USER_0=your_username
AUTH_PASSWORD_0=your_password
AUTH_USER_1=...
AUTH_PASSWORD_1=...
...
# [Required if AUTH_TYPE == credential and *not in development mode*] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=https://example.com
# [Required if AUTH_TYPE == credential] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=your_nextauth_secret
```
You can add multiple users by incrementing the index, e.g., `AUTH_USER_1`, `AUTH_PASSWORD_1`, and so on. They do not
need to be contiguous.
#### Usage
Once you have set up basic authentication, users will be prompted to enter their credentials when accessing the app.
Only users with valid credentials will be able to use the app and make requests to the OpenAI API.
For more information on configuring and using NextAuth.js, refer to
the [official documentation](https://next-auth.js.org/).
-45
View File
@@ -1,45 +0,0 @@
# Local LLM Integration with `text-web-ui` :llama:
Integrate local Large Language Models (LLMs) using
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
a specialized interface that incorporates a custom variant of the OpenAI API for a seamless integration experience.
_Last changed on Aug 8, 2023, using the CMD_FLAGS.txt file_
### Components
Implementation of local LLMs requires the following components:
* **text-generation-webui**: a python application with Gradio web UI for running Large Language Models
* **local Large Language Models "LLMs"**: use large language models on your own computer and with consumer GPUs or CPUs
* **big-AGI**: LLM UI, offering features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more
## Instructions
This guide presumes that **big-AGI** is already installed on your system - note that the text-generation-webui IP
address must be accessible from the Server running **big-AGI**.
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation)
- Download the one-click installer extract it, and double-click on "start" - 10 min
- Then close it, as we need to change the startup flags
2. Enable the **openai extension**
- Edit `CMD_FLAGS.txt`
- Update the contents from `--chat` to: `--chat --listen --extensions openai`
3. Restart text-generation-webui
- Double-click on "start"
- You will see something like: `OpenAI compatible API ready at: OPENAI_API_BASE=http://0.0.0.0:5001/v1`
- The OpenAI API is now running on port 5001, on both localhost (127.0.0.1) and your local IP address
4. Load your first model
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
- Switch to the **Model** tab
- Download for instance `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - 4.3 GB
- Select the model once loaded
5. Configure big-AGI:
- Models > Add a model source of type: **Oobabooga**
- Enter the address: `http://127.0.0.1:5001`
- replace 127.0.0.1 with the IP of the machine if running remotely - make sure to use the **IP:Port** format
- Load the models
- the active model must be selected on the text-generation-webui, as it doesn't support model switching or parallel requests
- Select model & Chat
Experience the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`! :tada:
-10
View File
@@ -1,10 +0,0 @@
# Scratchpad
Nobody will see this, right?
## Modules
### LLMs
- [ ] How to show server-side-configured OpenAI? - shall it be an auto-conf'd source that can be added?
- Would we allow people to add the key? ideally that conf would be immutable
+16
View File
@@ -0,0 +1,16 @@
import { withAuth } from 'next-auth/middleware';
import { authType } from '@/modules/authentication/auth.server';
// noinspection JSUnusedGlobalSymbols
export const middleware = !authType ? () => null : withAuth({
callbacks: {
authorized({ req, token }) {
// console.log('authorized', req, token);
return !!token;
},
},
});
export const config = { matcher: ['/:path*'] };
+5 -8
View File
@@ -1,13 +1,14 @@
/** @type {import('next').NextConfig} */
let nextConfig = {
const nextConfig = {
reactStrictMode: true,
env: {
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
HAS_SERVER_KEY_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
// for auth only
SERVER_AUTH_TYPE: process.env.AUTH_TYPE,
},
webpack(config, { isServer, dev }) {
// @mui/joy: anything material gets redirected to Joy
@@ -23,8 +24,4 @@ let nextConfig = {
},
};
// conditionally enable the nextjs bundle analyzer
if (process.env.ANALYZE_BUNDLE)
nextConfig = require('@next/bundle-analyzer')()(nextConfig);
module.exports = nextConfig;
+1091 -1176
View File
File diff suppressed because it is too large Load Diff
+21 -32
View File
@@ -1,9 +1,9 @@
{
"name": "big-agi",
"version": "1.3.5",
"version": "0.9.1",
"private": true,
"engines": {
"node": "^18.0.0"
"node": ">=18.0.0"
},
"scripts": {
"dev": "next dev",
@@ -13,45 +13,34 @@
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.1",
"@emotion/server": "^11.11.0",
"@emotion/styled": "^11.11.0",
"@mui/icons-material": "^5.14.3",
"@mui/joy": "^5.0.0-beta.2",
"@next/bundle-analyzer": "^13.4.16",
"@tanstack/react-query": "4.32.6",
"@trpc/client": "^10.37.1",
"@trpc/next": "^10.37.1",
"@trpc/react-query": "^10.37.1",
"@trpc/server": "^10.37.1",
"@vercel/analytics": "^1.0.2",
"browser-fs-access": "^0.34.1",
"@emotion/react": "^11.10.8",
"@emotion/server": "^11.10.0",
"@emotion/styled": "^11.10.8",
"@mui/icons-material": "^5.11.16",
"@mui/joy": "^5.0.0-alpha.77",
"@tanstack/react-query": "^4.29.5",
"@vercel/analytics": "^1.0.0",
"eventsource-parser": "^1.0.0",
"next": "^13.4.16",
"pdfjs-dist": "3.9.179",
"plantuml-encoder": "^1.4.0",
"next": "^13.3.2",
"pdfjs-dist": "^3.5.141",
"next-auth": "^4.21.1",
"prismjs": "^1.29.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^8.0.7",
"remark-gfm": "^3.0.1",
"superjson": "^1.13.1",
"tesseract.js": "^4.1.1",
"uuid": "^9.0.0",
"zod": "3.21.4",
"zustand": "4.3.9"
"zustand": "^4.3.7"
},
"devDependencies": {
"@types/node": "^20.4.10",
"@types/plantuml-encoder": "^1.4.0",
"@types/node": "^18.16.3",
"@types/prismjs": "^1.26.0",
"@types/react": "^18.2.20",
"@types/react-dom": "^18.2.7",
"@types/uuid": "^9.0.2",
"eslint": "^8.47.0",
"eslint-config-next": "^13.4.16",
"prettier": "^3.0.1",
"typescript": "^5.1.6"
"@types/react": "^18.2.0",
"@types/react-dom": "^18.2.1",
"@types/uuid": "^9.0.1",
"eslint": "^8.39.0",
"eslint-config-next": "^13.3.2",
"prettier": "^2.8.8",
"typescript": "^5.0.4"
}
}
+21 -29
View File
@@ -5,13 +5,12 @@ import { AppProps } from 'next/app';
import { CacheProvider, EmotionCache } from '@emotion/react';
import { CssBaseline, CssVarsProvider } from '@mui/joy';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import { Session as NextAuthSession } from 'next-auth';
import { SessionProvider } from 'next-auth/react';
import { apiQuery } from '~/modules/trpc/trpc.client';
import '~/common/styles/CodePrism.css'
import '~/common/styles/GithubMarkdown.css';
import { Brand } from '~/common/brand';
import { createEmotionCache, theme } from '~/common/theme';
import '@/common/styles/GithubMarkdown.css';
import { Brand } from '@/common/brand';
import { createEmotionCache, theme } from '@/common/theme';
// Client-side cache, shared for the whole session of the user in the browser.
@@ -19,37 +18,30 @@ const clientSideEmotionCache = createEmotionCache();
export interface MyAppProps extends AppProps {
emotionCache?: EmotionCache;
session?: NextAuthSession;
}
function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient({
defaultOptions: {
queries: {
retry: false,
},
mutations: {
retry: false,
},
},
}));
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps: { session, ...pageProps } }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient());
return <>
<CacheProvider value={emotionCache}>
<Head>
<title>{Brand.Title.Common}</title>
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
</Head>
{/* Rect-query provider */}
<QueryClientProvider client={queryClient}>
<CssVarsProvider defaultMode='light' theme={theme}>
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
<CssBaseline />
<Component {...pageProps} />
</CssVarsProvider>
</QueryClientProvider>
{/* Next-Auth provider */}
<SessionProvider session={session}>
{/* Rect-query provider */}
<QueryClientProvider client={queryClient}>
{/* JoyUI/Emotion */}
<CssVarsProvider defaultMode='light' theme={theme}>
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
<CssBaseline />
<Component {...pageProps} />
</CssVarsProvider>
</QueryClientProvider>
</SessionProvider>
</CacheProvider>
<VercelAnalytics debug={false} />
</>;
}
// enables the react-query api invocation
export default apiQuery.withTRPC(MyApp);
}
+6 -6
View File
@@ -4,14 +4,13 @@ import { default as Document, DocumentContext, DocumentProps, Head, Html, Main,
import createEmotionServer from '@emotion/server/create-instance';
import { getInitColorSchemeScript } from '@mui/joy/styles';
import { Brand } from '~/common/brand';
import { bodyFontClassName, createEmotionCache } from '~/common/theme';
import { Brand } from '@/common/brand';
import { MyAppProps } from './_app';
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
interface MyDocumentProps extends DocumentProps {
emotionStyleTags: React.JSX.Element[];
emotionStyleTags: JSX.Element[];
}
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
@@ -20,6 +19,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<Head>
{/* Meta (missing Title, set by the App or Page) */}
<meta name='description' content={Brand.Meta.Description} />
<meta name='keywords' content={Brand.Meta.Keywords} />
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
{/* Favicons & PWA */}
@@ -32,7 +32,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
{/* Opengraph */}
<meta property='og:title' content={Brand.Title.Common} />
<meta property='og:title' content={Brand.Meta.Title} />
<meta property='og:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
<meta property='og:url' content={Brand.URIs.Home} />
@@ -42,7 +42,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{/* Twitter */}
<meta property='twitter:card' content='summary_large_image' />
<meta property='twitter:url' content={Brand.URIs.Home} />
<meta property='twitter:title' content={Brand.Title.Common} />
<meta property='twitter:title' content={Brand.Meta.Title} />
<meta property='twitter:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
+20
View File
@@ -0,0 +1,20 @@
import { NextApiRequest, NextApiResponse } from 'next';
import { default as NextAuth } from 'next-auth';
import { authBasicUsers, authCreateProviders, authType } from '@/modules/authentication/auth.server';
const authOptions = {
secret: process.env.NEXTAUTH_SECRET,
providers: authCreateProviders(),
};
export default function handler(req: NextApiRequest, res: NextApiResponse) {
if (!authType)
return res.status(200).send('Auth not enabled');
if (Object.keys(authBasicUsers).length <= 0)
res.status(200).send('Auth enabled but no users have been set up');
return NextAuth(req, res, authOptions);
}
+61 -23
View File
@@ -1,39 +1,77 @@
import { NextRequest, NextResponse } from 'next/server';
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
/* NOTE: Why does this file even exist?
function parseApiParameters(apiKey?: string) {
return {
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
apiHeaders: {
'Content-Type': 'application/json',
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
},
};
}
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
and that would force us to use base64 encoding for the audio data, which would be a waste of
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
and client-side vs. the TRPC implementation. So at lease we recycle the input structures.
async function rethrowElevenLabsError(response: Response) {
if (!response.ok) {
let errorPayload: object | null = null;
try {
errorPayload = await response.json();
} catch (e) {
// ignore
}
console.error('Error in ElevenLabs API:', errorPayload);
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
}
}
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'GET',
headers: apiHeaders,
});
await rethrowElevenLabsError(response);
return await response.json();
}
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'POST',
headers: apiHeaders,
body: JSON.stringify(body),
signal,
});
await rethrowElevenLabsError(response);
return response;
}
*/
export default async function handler(req: NextRequest) {
try {
// construct the upstream request
const { elevenKey, text, voiceId, nonEnglish } = speechInputSchema.parse(await req.json());
const { headers, url } = elevenlabsAccess(elevenKey, `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}`);
const body: ElevenlabsWire.TTSRequest = {
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
text: text,
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
};
// elevenlabs POST
const response = await fetch(url, { headers, method: 'POST', body: JSON.stringify(body) });
const audioArrayBuffer = await response.arrayBuffer();
// return the audio
return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error) {
console.error('api/elevenlabs/speech error:', error);
return new NextResponse(JSON.stringify(`textToSpeech error: ${error?.toString() || 'Network issue'}`), { status: 500 });
console.error('Error posting to ElevenLabs', error);
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const runtime = 'edge';
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
import { getFromElevenLabs } from './speech';
export default async function handler(req: NextRequest) {
try {
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
// bring category != 'premade to the top
voicesList.voices.sort((a, b) => {
if (a.category === 'premade' && b.category !== 'premade') return 1;
if (a.category !== 'premade' && b.category === 'premade') return -1;
return 0;
});
// map to our own response format
const response: ElevenLabs.API.Voices.Response = {
voices: voicesList.voices.map((voice, idx) => ({
id: voice.voice_id,
name: voice.name,
description: voice.description,
previewUrl: voice.preview_url,
category: voice.category,
default: idx === 0,
})),
};
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
} catch (error) {
console.error('Error fetching voices from ElevenLabs:', error);
return new NextResponse(
JSON.stringify({
type: 'error',
error: error?.toString() || error || 'Network issue',
}),
{ status: 500 },
);
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-207
View File
@@ -1,207 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { createParser as createEventsourceParser, EventSourceParser, ParsedEvent, ReconnectInterval } from 'eventsource-parser';
import { AnthropicWire } from '~/modules/llms/anthropic/anthropic.types';
import { OpenAI } from '~/modules/llms/openai/openai.types';
import { anthropicAccess, anthropicCompletionRequest } from '~/modules/llms/anthropic/anthropic.router';
import { chatStreamSchema, openAIAccess, openAIChatCompletionPayload } from '~/modules/llms/openai/openai.router';
/**
* Vendor stream parsers
* - The vendor can decide to terminate the connection (close: true), transmitting anything in 'text' before doing so
* - The vendor can also throw from this function, which will error and terminate the connection
*/
type AIStreamParser = (data: string) => { text: string, close: boolean };
// The peculiarity of our parser is the injection of a JSON structure at the beginning of the stream, to
// communicate parameters before the text starts flowing to the client.
function parseOpenAIStream(): AIStreamParser {
let hasBegun = false;
let hasWarned = false;
return data => {
const json: OpenAI.Wire.ChatCompletion.ResponseStreamingChunk = JSON.parse(data);
// an upstream error will be handled gracefully and transmitted as text (throw to transmit as 'error')
if (json.error)
return { text: `[OpenAI Issue] ${json.error.message || json.error}`, close: true };
if (json.choices.length !== 1)
throw new Error(`[OpenAI Issue] Expected 1 completion, got ${json.choices.length}`);
const index = json.choices[0].index;
if (index !== 0 && index !== undefined /* LocalAI hack/workaround until https://github.com/go-skynet/LocalAI/issues/788 */)
throw new Error(`[OpenAI Issue] Expected completion index 0, got ${index}`);
let text = json.choices[0].delta?.content /*|| json.choices[0]?.text*/ || '';
// hack: prepend the model name to the first packet
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
text = JSON.stringify(firstPacket) + text;
}
// if there's a warning, log it once
if (json.warning && !hasWarned) {
hasWarned = true;
console.log('/api/llms/stream: OpenAI stream warning:', json.warning);
}
// workaround: LocalAI doesn't send the [DONE] event, but similarly to OpenAI, it sends a "finish_reason" delta update
const close = !!json.choices[0].finish_reason;
return { text, close };
};
}
// Anthropic event stream parser
function parseAnthropicStream(): AIStreamParser {
let hasBegun = false;
return data => {
const json: AnthropicWire.Complete.Response = JSON.parse(data);
let text = json.completion;
// hack: prepend the model name to the first packet
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
text = JSON.stringify(firstPacket) + text;
}
return { text, close: false };
};
}
/**
* Creates a TransformStream that parses events from an EventSource stream using a custom parser.
* @returns {TransformStream<Uint8Array, string>} TransformStream parsing events.
*/
export function createEventStreamTransformer(vendorTextParser: AIStreamParser): TransformStream<Uint8Array, Uint8Array> {
const textDecoder = new TextDecoder();
const textEncoder = new TextEncoder();
let eventSourceParser: EventSourceParser;
return new TransformStream({
start: async (controller): Promise<void> => {
eventSourceParser = createEventsourceParser(
(event: ParsedEvent | ReconnectInterval) => {
// ignore 'reconnect-interval' and events with no data
if (event.type !== 'event' || !('data' in event))
return;
// event stream termination, close our transformed stream
if (event.data === '[DONE]') {
controller.terminate();
return;
}
try {
const { text, close } = vendorTextParser(event.data);
if (text)
controller.enqueue(textEncoder.encode(text));
if (close)
controller.terminate();
} catch (error: any) {
// console.log(`/api/llms/stream: parse issue: ${error?.message || error}`);
controller.enqueue(textEncoder.encode(`[Stream Issue] ${error?.message || error}`));
controller.terminate();
}
},
);
},
// stream=true is set because the data is not guaranteed to be final and un-chunked
transform: (chunk: Uint8Array) => {
eventSourceParser.feed(textDecoder.decode(chunk, { stream: true }));
},
});
}
async function throwResponseNotOk(response: Response) {
if (!response.ok) {
const errorPayload: object | null = await response.json().catch(() => null);
throw new Error(`${response.status} · ${response.statusText}${errorPayload ? ' · ' + JSON.stringify(errorPayload) : ''}`);
}
}
function createEmptyReadableStream(): ReadableStream {
return new ReadableStream({
start: (controller) => controller.close(),
});
}
export default async function handler(req: NextRequest): Promise<Response> {
// inputs - reuse the tRPC schema
const { vendorId, access, model, history } = chatStreamSchema.parse(await req.json());
// begin event streaming from the OpenAI API
let upstreamResponse: Response;
let vendorStreamParser: AIStreamParser;
try {
// prepare the API request data
let headersUrl: { headers: HeadersInit, url: string };
let body: object;
switch (vendorId) {
case 'anthropic':
headersUrl = anthropicAccess(access as any, '/v1/complete');
body = anthropicCompletionRequest(model, history, true);
vendorStreamParser = parseAnthropicStream();
break;
case 'openai':
headersUrl = openAIAccess(access as any, '/v1/chat/completions');
body = openAIChatCompletionPayload(model, history, null, 1, true);
vendorStreamParser = parseOpenAIStream();
break;
}
// POST to our API route
upstreamResponse = await fetch(headersUrl.url, {
method: 'POST',
headers: headersUrl.headers,
body: JSON.stringify(body),
});
await throwResponseNotOk(upstreamResponse);
} catch (error: any) {
const fetchOrVendorError = (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
console.log(`/api/llms/stream: fetch issue: ${fetchOrVendorError}`);
return new NextResponse('[OpenAI Issue] ' + fetchOrVendorError, { status: 500 });
}
/* The following code is heavily inspired by the Vercel AI SDK, but simplified to our needs and in full control.
* This replaces the former (custom) implementation that used to return a ReadableStream directly, and upon start,
* it was blindly fetching the upstream response and piping it to the client.
*
* We now use backpressure, as explained on: https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation
*
* NOTE: we have not benchmarked to see if there is performance impact by using this approach - we do want to have
* a 'healthy' level of inventory (i.e., pre-buffering) on the pipe to the client.
*/
const chatResponseStream = (upstreamResponse.body || createEmptyReadableStream())
.pipeThrough(createEventStreamTransformer(vendorStreamParser));
return new NextResponse(chatResponseStream, {
status: 200,
headers: {
'Content-Type': 'text/event-stream; charset=utf-8',
},
});
}
// noinspection JSUnusedGlobalSymbols
export const runtime = 'edge';
+27
View File
@@ -0,0 +1,27 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest) {
try {
const requestBodyJson = await req.json();
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
return new NextResponse(JSON.stringify({
message: upstreamResponse.choices[0].message,
} satisfies OpenAI.API.Chat.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+30
View File
@@ -0,0 +1,30 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
// keep working on this
const requestBodyJson = await req.json();
const { api } = await toApiChatRequest(requestBodyJson);
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
// flatten IDs (most recent first)
return new NextResponse(JSON.stringify({
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
} satisfies OpenAI.API.Models.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+117
View File
@@ -0,0 +1,117 @@
import { NextRequest, NextResponse } from 'next/server';
import { createParser } from 'eventsource-parser';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
// Handle the abort event when the connection is closed by the client
signal.addEventListener('abort', () => {
console.log('Client closed the connection.');
});
// begin event streaming from the OpenAI API
const encoder = new TextEncoder();
let upstreamResponse: Response;
try {
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
} catch (error: any) {
console.log(error);
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
return new ReadableStream({
start: controller => {
controller.enqueue(encoder.encode(message));
controller.close();
},
});
}
// decoding and re-encoding loop
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
let hasBegun = false;
// stream response (SSE) from OpenAI is split into multiple chunks. this function
// will parse the event into a text stream, and re-emit it to the client
const upstreamParser = createParser(event => {
// ignore reconnect interval
if (event.type !== 'event')
return;
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
if (event.data === '[DONE]') {
controller.close();
return;
}
try {
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
// ignore any 'role' delta update
if (json.choices[0].delta?.role)
return;
// stringify and send the first packet as a JSON object
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
}
// transmit the text stream
const text = json.choices[0].delta?.content || '';
controller.enqueue(encoder.encode(text));
} catch (error) {
// maybe parse error
console.error('Error parsing OpenAI response', error);
controller.error(error);
}
});
// https://web.dev/streams/#asynchronous-iteration
const decoder = new TextDecoder();
for await (const upstreamChunk of upstreamResponse.body as any)
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
};
return new ReadableStream({
start: onReadableStreamStart,
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
});
}
export default async function handler(req: NextRequest): Promise<Response> {
try {
const requestBodyJson = await req.json();
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
return new NextResponse(chatResponseStream);
} catch (error: any) {
if (error.name === 'AbortError') {
console.log('Fetch request aborted in handler');
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
} else if (error.code === 'ECONNRESET') {
console.log('Connection reset by the client in handler');
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
} else {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
};
//noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+88
View File
@@ -0,0 +1,88 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
});
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch('https://api.prodia.com/v1/job', {
method: 'POST',
headers: {
...prodiaHeaders(apiKey),
'Content-Type': 'application/json',
},
body: JSON.stringify(jobRequest),
});
if (response.status !== 200) {
console.log('Bad Prodia Response:', await response.text());
throw new Error(`Bad Prodia Response: ${response.status}`);
}
return await response.json();
}
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
headers: prodiaHeaders(apiKey),
});
if (response.status !== 200)
throw new Error(`Bad Prodia Response: ${response.status}`);
return await response.json();
}
export default async function handler(req: NextRequest) {
// timeout, in seconds
const timeout = 15;
const tStart = Date.now();
try {
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
// crate the job, getting back a job ID
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
model: prodiaModelId,
prompt,
...(!!cfgScale && { cfg_scale: cfgScale }),
...(!!steps && { steps }),
...(!!negativePrompt && { negative_prompt: negativePrompt }),
...(!!seed && { seed }),
};
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
// poll the job status until it's done
let sleepDelay = 2000;
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
await new Promise(resolve => setTimeout(resolve, sleepDelay));
job = await getJobStatus(apiKey, job.job);
if (sleepDelay > 250)
sleepDelay /= 2;
}
// check for success
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
if (job.status !== 'succeeded' || !job.imageUrl)
throw new Error(`Prodia image generation failed within ${elapsed}s`);
// respond with the image URL
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
return new NextResponse(JSON.stringify(response));
} catch (error) {
console.error('Handler failed:', error);
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
return new NextResponse(JSON.stringify(response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
// for lack of an API
const HARDCODED_MODELS: Prodia.API.Models.Response = {
models: [
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
],
};
// sort by priority
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
// noinspection JSUnusedLocalSymbols
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+50
View File
@@ -0,0 +1,50 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
/**
* 'Proxy' that uploads a file to paste.gg.
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
*/
export default async function handler(req: NextRequest) {
try {
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
throw new Error('Invalid options');
const paste = await pasteGgPost(title, fileName, fileContent, origin);
console.log(`Posted to paste.gg`, paste);
if (paste?.status !== 'success')
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
return new NextResponse(JSON.stringify({
type: 'success',
url: `https://paste.gg/${paste.result.id}`,
expires: paste.result.expires || 'never',
deletionKey: paste.result.deletion_key || 'none',
created: paste.result.created_at,
} satisfies PasteGG.API.Publish.Response));
} catch (error) {
console.error('Error posting to paste.gg', error);
return new NextResponse(JSON.stringify({
type: 'error',
error: error?.toString() || 'Network issue',
} satisfies PasteGG.API.Publish.Response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+47
View File
@@ -0,0 +1,47 @@
import { NextRequest, NextResponse } from 'next/server';
import { Search } from '@/modules/search/search.types';
import { objectToQueryString } from '@/modules/search/search.client';
export default async function handler(req: NextRequest): Promise<NextResponse> {
const { searchParams } = new URL(req.url);
const customSearchParams: Search.Wire.RequestParams = {
q: searchParams.get('query') || '',
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
num: 5,
};
try {
if (!customSearchParams.key || !customSearchParams.cx) {
// noinspection ExceptionCaughtLocallyJS
throw new Error('Missing API Key or Custom Search Engine ID');
}
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
if (data.error) {
// noinspection ExceptionCaughtLocallyJS
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
}
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
title: result.title,
link: result.link,
snippet: result.snippet,
})) || [];
return new NextResponse(JSON.stringify(apiResponse));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-38
View File
@@ -1,38 +0,0 @@
import { NextRequest } from 'next/server';
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouter } from '~/modules/trpc/trpc.router';
import { createTRPCContext } from '~/modules/trpc/trpc.server';
/*
// NextJS (traditional, non-edge) API handler
import { createNextApiHandler } from '@trpc/server/adapters/next';
import { createTRPCContext } from '~/modules/trpc/trpc.server';
export default createNextApiHandler({
router: appRouter,
createContext: createTRPCContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
*/
export default async function handler(req: NextRequest) {
return fetchRequestHandler({
endpoint: '/api/trpc',
router: appRouter,
req,
createContext: createTRPCContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
}
// noinspection JSUnusedGlobalSymbols
export const runtime = 'edge';
+44 -9
View File
@@ -1,18 +1,53 @@
import * as React from 'react';
import { AppChat } from '../src/apps/chat/AppChat';
import { useShowNewsOnUpdate } from '../src/apps/news/news.hooks';
import { Container, useTheme } from '@mui/joy';
import { AppLayout } from '~/common/layout/AppLayout';
import { NoSSR } from '@/common/components/NoSSR';
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
import { useSettingsStore } from '@/common/state/store-settings';
import { Chat } from '../src/apps/chat/Chat';
import { SettingsModal } from '../src/apps/settings/SettingsModal';
export default function HomePage() {
// show the News page on updates
useShowNewsOnUpdate();
export default function Home() {
// state
const [settingsShown, setSettingsShown] = React.useState(false);
// external state
const theme = useTheme();
const apiKey = useSettingsStore(state => state.apiKey);
const centerMode = useSettingsStore(state => state.centerMode);
// show the Settings Dialog at startup if the API key is required but not set
React.useEffect(() => {
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
setSettingsShown(true);
}, [apiKey]);
return (
<AppLayout>
<AppChat />
</AppLayout>
/**
* Note the global NoSSR wrapper
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
*/
<NoSSR>
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
boxShadow: {
xs: 'none',
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
},
}}>
<Chat onShowSettings={() => setSettingsShown(true)} />
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
</Container>
</NoSSR>
);
}
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import AppLabs from '../src/apps/labs/AppLabs';
import { AppLayout } from '~/common/layout/AppLayout';
export default function LabsPage() {
return (
<AppLayout suspendAutoModelsSetup>
<AppLabs />
</AppLayout>
);
}
-18
View File
@@ -1,18 +0,0 @@
import * as React from 'react';
import AppNews from '../src/apps/news/AppNews';
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
import { AppLayout } from '~/common/layout/AppLayout';
export default function NewsPage() {
// update the last seen news version
useMarkNewsAsSeen();
return (
<AppLayout suspendAutoModelsSetup>
<AppNews />
</AppLayout>
);
}
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import { AppPersonas } from '../src/apps/personas/AppPersonas';
import { AppLayout } from '~/common/layout/AppLayout';
export default function HomePage() {
return (
<AppLayout>
<AppPersonas />
</AppLayout>
);
}
-144
View File
@@ -1,144 +0,0 @@
import * as React from 'react';
import Image from 'next/image';
import { useRouter } from 'next/router';
import { Alert, Box, Button, CircularProgress, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import { useComposerStore } from '../src/apps/chat/components/composer/store-composer';
// import { callBrowseFetchSinglePage } from '~/modules/browse/browse.client';
import { AppLayout } from '~/common/layout/AppLayout';
import { asValidURL } from '~/common/util/urlUtils';
const LogoProgress = (props: { showProgress: boolean }) =>
<Box sx={{
width: 64,
height: 64,
position: 'relative',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}}>
<Box sx={{ position: 'absolute', mt: 0.75 }}>
<Image src='/icons/favicon-32x32.png' alt='App Logo' width={32} height={32} />
</Box>
{props.showProgress && <CircularProgress size='lg' sx={{ position: 'absolute' }} />}
</Box>;
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* Example URL: https://get.big-agi.com/share?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
*/
export default function SharePage() {
// state
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
const [intentText, setIntentText] = React.useState<string | null>(null);
const [intentURL, setIntentURL] = React.useState<string | null>(null);
const [isDownloading, setIsDownloading] = React.useState(false);
// external state
const { query, push: routerPush, replace: routerReplace } = useRouter();
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
useComposerStore.getState().setStartupText(text);
routerReplace('/').then(() => null);
}, [routerReplace]);
// Detect the share Intent from the query
React.useEffect(() => {
// skip when query is not parsed yet
if (!Object.keys(query).length)
return;
// single item from the query
let queryTextItem: string[] | string | null = query.url || query.text || null;
if (Array.isArray(queryTextItem))
queryTextItem = queryTextItem[0];
// check if the item is a URL
const url = asValidURL(queryTextItem);
if (url)
setIntentURL(url);
else if (queryTextItem)
setIntentText(queryTextItem);
else
setErrorMessage('No text or url. Received: ' + JSON.stringify(query));
}, [query.url, query.text, query]);
// Text -> Composer
React.useEffect(() => {
if (intentText)
queueComposerTextAndLaunchApp(intentText);
}, [intentText, queueComposerTextAndLaunchApp]);
// URL -> download -> Composer
React.useEffect(() => {
if (intentURL) {
setIsDownloading(true);
// TEMP: until the Browse module is ready, just use the URL, verbatim
queueComposerTextAndLaunchApp(intentURL);
setIsDownloading(false);
/*callBrowseFetchSinglePage(intentURL)
.then(pageContent => {
if (pageContent)
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + pageContent + '\n```\n');
else
setErrorMessage('Could not read any data');
})
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
.finally(() => setIsDownloading(false));*/
}
}, [intentURL, queueComposerTextAndLaunchApp]);
return (
<AppLayout suspendAutoModelsSetup>
<Box sx={{
backgroundColor: 'background.level2',
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
}}>
{/* Logo with Circular Progress */}
<LogoProgress showProgress={isDownloading} />
{/* Title */}
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
</Typography>
{/* Possible Error */}
{errorMessage && <>
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
<Typography>{errorMessage}</Typography>
</Alert>
<Button
variant='solid' color='danger'
onClick={() => routerPush('/')}
endDecorator={<ArrowBackIcon />}
sx={{ mt: 2 }}
>
Cancel
</Button>
</>}
{/* URL under analysis */}
<Typography level='body-xs'>
{intentURL}
</Typography>
</Box>
</AppLayout>
);
}
+4 -14
View File
@@ -1,8 +1,8 @@
{
"name": "big-AGI",
"short_name": "big-AGI",
"theme_color": "#32383E",
"background_color": "#9FA6AD",
"short_name": "AGI",
"theme_color": "#434356",
"background_color": "#B9B9C6",
"description": "Personal AGI App",
"display": "standalone",
"start_url": "/",
@@ -23,15 +23,5 @@
"sizes": "1024x1024",
"type": "image/png"
}
],
"share_target": {
"action": "/share",
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
"title": "title",
"text": "text",
"url": "url"
}
}
]
}
+1 -1
View File
File diff suppressed because one or more lines are too long
-276
View File
@@ -1,276 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
import { CmdRunReact } from '~/modules/aifn/react/react';
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
import { useModelsStore } from '~/modules/llms/store-llms';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { useLayoutPluggable } from '~/common/layout/store-applayout';
import { ChatDrawerItems } from './components/applayout/ChatDrawerItems';
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
import { ChatMessageList } from './components/ChatMessageList';
import { CmdAddRoleMessage, extractCommands } from './commands';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/Ephemerals';
import { TradeConfig, TradeModal } from './trade/TradeModal';
import { runAssistantUpdatingState } from './editors/chat-stream';
import { runImageGenerationUpdatingState } from './editors/image-generate';
import { runReActUpdatingState } from './editors/react-tangent';
const SPECIAL_ID_ALL_CHATS = 'all-chats';
// definition of chat modes
export type ChatModeId = 'immediate' | 'immediate-follow-up' | 'react' | 'write-user';
export const ChatModeItems: { [key in ChatModeId]: { label: string; description: string | React.JSX.Element; experimental?: boolean } } = {
'immediate': {
label: 'Chat',
description: 'AI-powered responses',
},
'immediate-follow-up': {
label: 'Chat & Follow-up',
description: 'Chat with follow-up questions',
experimental: true,
},
'react': {
label: 'Reason+Act',
description: 'Answer your questions with ReAct and search',
},
'write-user': {
label: 'Write',
description: 'No AI responses',
},
};
export function AppChat() {
// state
const [chatModeId, setChatModeId] = React.useState<ChatModeId>('immediate');
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
const [flattenConversationId, setFlattenConversationId] = React.useState<string | null>(null);
// external state
const { activeConversationId, isConversationEmpty, duplicateConversation, deleteAllConversations, setMessages, systemPurposeId, setAutoTitle } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
return {
activeConversationId: state.activeConversationId,
isConversationEmpty: conversation ? !conversation.messages.length : true,
// conversationsCount: state.conversations.length,
duplicateConversation: state.duplicateConversation,
deleteAllConversations: state.deleteAllConversations,
setMessages: state.setMessages,
systemPurposeId: conversation?.systemPurposeId ?? null,
setAutoTitle: state.setAutoTitle,
};
}, shallow);
// [0 to 1] create a conversation if there's none active
React.useEffect(() => {
if (!activeConversationId)
useChatStore.getState().conversations.length === 0 && useChatStore.getState().createConversation();
}, [activeConversationId]);
const handleExecuteConversation = async (chatModeId: ChatModeId, conversationId: string, history: DMessage[]) => {
const { chatLLMId } = useModelsStore.getState();
if (!conversationId || !chatLLMId) return;
// /command: overrides the chat mode
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const command = pieces[0].value;
const prompt = pieces[1].value;
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatLLMId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatLLMId);
}
if (CmdAddRoleMessage.includes(command)) {
lastMessage.role = command.startsWith('/s') ? 'system' : command.startsWith('/a') ? 'assistant' : 'user';
lastMessage.sender = 'Bot';
lastMessage.text = prompt;
return setMessages(conversationId, history);
}
}
}
// synchronous long-duration tasks, which update the state as they go
if (chatModeId && chatLLMId && systemPurposeId) {
switch (chatModeId) {
case 'immediate':
case 'immediate-follow-up':
return await runAssistantUpdatingState(conversationId, history, chatLLMId, systemPurposeId, true, chatModeId === 'immediate-follow-up');
case 'react':
if (!lastMessage?.text)
break;
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatLLMId);
case 'write-user':
setMessages(conversationId, history);
return;
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
console.log('handleExecuteConversation: issue running', conversationId, lastMessage);
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleSendUserMessage = async (conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(chatModeId, conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleExecuteChatHistory = async (conversationId: string, history: DMessage[]) =>
await handleExecuteConversation(chatModeId, conversationId, history);
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation) {
const prompt = await imaginePromptFromText(messageText);
if (prompt)
return await handleExecuteConversation('immediate', conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
}
};
const handleClearConversation = (conversationId: string) => setClearConfirmationId(conversationId);
const handleConfirmedClearConversation = () => {
if (clearConfirmationId) {
setMessages(clearConfirmationId, []);
setAutoTitle(clearConfirmationId, '');
setClearConfirmationId(null);
}
};
const handleDeleteAllConversations = () => setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
const handleConfirmedDeleteConversation = () => {
if (deleteConfirmationId) {
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
deleteAllConversations();
}// else
// deleteConversation(deleteConfirmationId);
setDeleteConfirmationId(null);
}
};
const handleImportConversation = () => setTradeConfig({ dir: 'import' });
const handleExportConversation = (conversationId: string | null) => setTradeConfig({ dir: 'export', conversationId });
const handleFlattenConversation = (conversationId: string) => setFlattenConversationId(conversationId);
// Pluggable ApplicationBar components
const centerItems = React.useMemo(() =>
<ChatDropdowns conversationId={activeConversationId} />,
[activeConversationId],
);
const drawerItems = React.useMemo(() =>
<ChatDrawerItems
conversationId={activeConversationId}
onImportConversation={handleImportConversation}
onDeleteAllConversations={handleDeleteAllConversations}
/>,
[activeConversationId],
);
const menuItems = React.useMemo(() =>
<ChatMenuItems
conversationId={activeConversationId} isConversationEmpty={isConversationEmpty}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onClearConversation={handleClearConversation}
onDuplicateConversation={duplicateConversation}
onExportConversation={handleExportConversation}
onFlattenConversation={handleFlattenConversation}
/>,
[activeConversationId, duplicateConversation, isConversationEmpty, isMessageSelectionMode],
);
useLayoutPluggable(centerItems, drawerItems, menuItems);
return <>
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteChatHistory={handleExecuteChatHistory}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
backgroundColor: 'background.level1',
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
chatModeId={chatModeId} setChatModeId={setChatModeId}
isDeveloperMode={systemPurposeId === 'Developer'}
onSendMessage={handleSendUserMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
backgroundColor: 'background.surface',
borderTop: `1px solid`,
borderTopColor: 'divider',
p: { xs: 1, md: 2 },
}} />
{/* Import / Export */}
{!!tradeConfig && <TradeModal config={tradeConfig} onClose={() => setTradeConfig(null)} />}
{/* Flatten */}
{!!flattenConversationId && <FlattenerModal conversationId={flattenConversationId} onClose={() => setFlattenConversationId(null)} />}
{/* [confirmation] Reset Conversation */}
{!!clearConfirmationId && <ConfirmationModal
open onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
/>}
{/* [confirmation] Delete All */}
{!!deleteConfirmationId && <ConfirmationModal
open onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
: 'Are you sure you want to delete this conversation?'}
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Yes, delete all'
: 'Delete conversation'}
/>}
</>;
}
+193
View File
@@ -0,0 +1,193 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
import { CmdRunReact } from '@/modules/search/search.client';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { PublishedModal } from '@/modules/pastegg/PublishedModal';
import { callPublish } from '@/modules/pastegg/pastegg.client';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { Link } from '@/common/components/Link';
import { conversationToMarkdown } from '@/common/util/conversationToMarkdown';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { extractCommands } from '@/common/util/extractCommands';
import { useComposerStore } from '@/common/state/store-composer';
import { useSettingsStore } from '@/common/state/store-settings';
import { ApplicationBar } from './components/appbar/ApplicationBar';
import { ChatMessageList } from './components/ChatMessageList';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/ephemerals/Ephemerals';
import { imaginePromptFromText } from './util/ai-functions';
import { runAssistantUpdatingState } from './util/agi-immediate';
import { runImageGenerationUpdatingState } from './util/imagine';
import { runReActUpdatingState } from './util/agi-react';
export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
// state
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
const [publishResponse, setPublishResponse] = React.useState<PasteGG.API.Publish.Response | null>(null);
// external state
const theme = useTheme();
const { sendModeId } = useComposerStore(state => ({ sendModeId: state.sendModeId }), shallow);
const { activeConversationId, setMessages, chatModelId, systemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
return {
activeConversationId: state.activeConversationId,
setMessages: state.setMessages,
chatModelId: conversation?.chatModelId ?? null,
systemPurposeId: conversation?.systemPurposeId ?? null,
};
}, shallow);
const handleExecuteConversation = async (conversationId: string, history: DMessage[]) => {
if (!conversationId) return;
// Command - last user message is a cmd
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const command = pieces[0].value;
const prompt = pieces[1].value;
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatModelId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatModelId);
}
// if (CmdRunSearch.includes(command))
// return await run...
}
}
// synchronous long-duration tasks, which update the state as they go
if (sendModeId && chatModelId && systemPurposeId) {
switch (sendModeId) {
case 'immediate':
return await runAssistantUpdatingState(conversationId, history, chatModelId, systemPurposeId);
case 'react':
if (lastMessage?.text) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatModelId);
}
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleSendUserMessage = async (conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation && chatModelId) {
const prompt = await imaginePromptFromText(messageText, chatModelId);
if (prompt)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
}
};
const handlePublishConversation = (conversationId: string) => setPublishConversationId(conversationId);
const handleConfirmedPublishConversation = async () => {
if (publishConversationId) {
const conversation = _findConversation(publishConversationId);
setPublishConversationId(null);
if (conversation) {
const markdownContent = conversationToMarkdown(conversation, !useSettingsStore.getState().showSystemMessages);
const publishResponse = await callPublish('paste.gg', markdownContent);
setPublishResponse(publishResponse);
}
}
};
return (
<Box
sx={{
display: 'flex', flexDirection: 'column', height: '100vh',
...(props.sx || {}),
}}>
<ApplicationBar
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onPublishConversation={handlePublishConversation}
onShowSettings={props.onShowSettings}
sx={{
zIndex: 20, // position: 'sticky', top: 0,
// ...(process.env.NODE_ENV === 'development' ? { background: theme.vars.palette.danger.solidBg } : {}),
}} />
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteConversation={handleExecuteConversation}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
background: theme.vars.palette.background.level2,
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
isDeveloperMode={systemPurposeId === 'Developer'}
onSendMessage={handleSendUserMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
background: theme.vars.palette.background.surface,
borderTop: `1px solid ${theme.vars.palette.divider}`,
p: { xs: 1, md: 2 },
}} />
{/* Confirmation for Publishing */}
<ConfirmationModal
open={!!publishConversationId} onClose={() => setPublishConversationId(null)} onPositive={handleConfirmedPublishConversation}
confirmationText={<>
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
Are you sure you want to proceed?
</>} positiveActionText={'Understood, upload to paste.gg'}
/>
{/* Show the Published details */}
{!!publishResponse && (
<PublishedModal open onClose={() => setPublishResponse(null)} response={publishResponse} />
)}
</Box>
);
}
+21 -38
View File
@@ -4,40 +4,29 @@ import { shallow } from 'zustand/shallow';
import { Box, List } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { useChatLLM } from '~/modules/llms/store-llms';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { ChatMessage } from './message/ChatMessage';
import { CleanerMessage, MessagesSelectionHeader } from './message/CleanerMessage';
import { PersonaSelector } from './persona-selector/PersonaSelector';
import { ChatMessageSelectable, MessagesSelectionHeader } from './message/ChatMessageSelectable';
import { PurposeSelector } from './PurposeSelector';
/**
* A list of ChatMessages
*/
export function ChatMessageList(props: {
conversationId: string | null,
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
onExecuteChatHistory: (conversationId: string, history: DMessage[]) => void,
onImagineFromText: (conversationId: string, userText: string) => void,
sx?: SxProps
}) {
export function ChatMessageList(props: { conversationId: string | null, isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void, onExecuteConversation: (conversationId: string, history: DMessage[]) => void, onImagineFromText: (conversationId: string, userText: string) => void, sx?: SxProps }) {
// state
const [selectedMessages, setSelectedMessages] = React.useState<Set<string>>(new Set());
// external state
const showSystemMessages = useUIPreferencesStore(state => state.showSystemMessages);
const { messages, editMessage, deleteMessage, historyTokenCount } = useChatStore(state => {
const showSystemMessages = useSettingsStore(state => state.showSystemMessages);
const { editMessage, deleteMessage } = useChatStore(state => ({ editMessage: state.editMessage, deleteMessage: state.deleteMessage }), shallow);
const messages = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
messages: conversation ? conversation.messages : [],
editMessage: state.editMessage, deleteMessage: state.deleteMessage,
historyTokenCount: conversation ? conversation.tokenCount : 0,
};
return conversation ? conversation.messages : [];
}, shallow);
const { chatLLM } = useChatLLM();
const handleMessageDelete = (messageId: string) =>
props.conversationId && deleteMessage(props.conversationId, messageId);
@@ -50,11 +39,11 @@ export function ChatMessageList(props: {
const handleRestartFromMessage = (messageId: string, offset: number) => {
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + offset + 1);
props.conversationId && props.onExecuteChatHistory(props.conversationId, truncatedHistory);
props.conversationId && props.onExecuteConversation(props.conversationId, truncatedHistory);
};
const handleRunExample = (text: string) =>
props.conversationId && props.onExecuteChatHistory(props.conversationId, [...messages, createDMessage('user', text)]);
props.conversationId && props.onExecuteConversation(props.conversationId, [...messages, createDMessage('user', text)]);
// hide system messages if the user chooses so
@@ -65,7 +54,7 @@ export function ChatMessageList(props: {
if (!filteredMessages.length)
return props.conversationId ? (
<Box sx={props.sx || {}}>
<PersonaSelector conversationId={props.conversationId} runExample={handleRunExample} />
<PurposeSelector conversationId={props.conversationId} runExample={handleRunExample} />
</Box>
) : null;
@@ -79,14 +68,14 @@ export function ChatMessageList(props: {
const handleSelectAllMessages = (selected: boolean) => {
const newSelected = new Set<string>();
if (selected)
for (const message of messages)
for (let message of messages)
newSelected.add(message.id);
setSelectedMessages(newSelected);
};
const handleDeleteSelectedMessages = () => {
if (props.conversationId)
for (const selectedMessage of selectedMessages)
for (let selectedMessage of selectedMessages)
deleteMessage(props.conversationId, selectedMessage);
setSelectedMessages(new Set());
};
@@ -97,15 +86,15 @@ export function ChatMessageList(props: {
// '&::-webkit-scrollbar': {
// md: {
// width: 8,
// background: theme.palette.neutral.plainHoverBg,
// background: theme.vars.palette.neutral.plainHoverBg,
// },
// },
// '&::-webkit-scrollbar-thumb': {
// background: theme.palette.neutral.solidBg,
// background: theme.vars.palette.neutral.solidBg,
// borderRadius: 6,
// },
// '&::-webkit-scrollbar-thumb:hover': {
// background: theme.palette.neutral.solidHoverBg,
// background: theme.vars.palette.neutral.solidHoverBg,
// },
// };
@@ -120,24 +109,19 @@ export function ChatMessageList(props: {
{filteredMessages.map((message, idx) =>
props.isMessageSelectionMode ? (
<CleanerMessage
<ChatMessageSelectable
key={'sel-' + message.id} message={message}
isBottom={idx === 0} remainingTokens={(chatLLM ? chatLLM.contextTokens : 0) - historyTokenCount}
isBottom={idx === 0}
selected={selectedMessages.has(message.id)} onToggleSelected={handleToggleSelected}
/>
) : (
<ChatMessage
key={'msg-' + message.id} message={message}
isBottom={idx === 0}
onMessageDelete={() => handleMessageDelete(message.id)}
onMessageEdit={newText => handleMessageEdit(message.id, newText)}
onMessageRunFrom={(offset: number) => handleRestartFromMessage(message.id, offset)}
onImagine={handleImagineFromText}
/>
onImagine={handleImagineFromText} />
),
)}
@@ -146,7 +130,6 @@ export function ChatMessageList(props: {
<MessagesSelectionHeader
hasSelected={selectedMessages.size > 0}
isBottom={filteredMessages.length === 0}
sumTokens={historyTokenCount}
onClose={() => props.setIsMessageSelectionMode(false)}
onSelectAll={handleSelectAllMessages}
onDeleteMessages={handleDeleteSelectedMessages}
@@ -0,0 +1,227 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, Checkbox, Grid, IconButton, Input, Stack, Textarea, Typography, useTheme } from '@mui/joy';
import ClearIcon from '@mui/icons-material/Clear';
import SearchIcon from '@mui/icons-material/Search';
import { SystemPurposeId, SystemPurposes } from '../../../data';
import { useChatStore } from '@/common/state/store-chats';
import { usePurposeStore } from '@/common/state/store-purposes';
import { useSettingsStore } from '@/common/state/store-settings';
// Constants for tile sizes / grid width - breakpoints need to be computed here to work around
// the "flex box cannot shrink over wrapped content" issue
//
// Absolutely dislike this workaround, but it's the only way I found to make it work
const bpTileSize = { xs: 116, md: 125, xl: 130 };
const tileCols = [3, 4, 6];
const tileSpacing = 1;
const bpMaxWidth = Object.entries(bpTileSize).reduce((acc, [key, value], index) => {
acc[key] = tileCols[index] * (value + 8 * tileSpacing) - 8 * tileSpacing;
return acc;
}, {} as Record<string, number>);
const bpTileGap = { xs: 2, md: 3 };
// Add this utility function to get a random array element
const getRandomElement = <T extends any>(array: T[]): T | undefined =>
array.length > 0 ? array[Math.floor(Math.random() * array.length)] : undefined;
/**
* Purpose selector for the current chat. Clicking on any item activates it for the current chat.
*/
export function PurposeSelector(props: { conversationId: string, runExample: (example: string) => void }) {
// state
const [searchQuery, setSearchQuery] = React.useState('');
const [filteredIDs, setFilteredIDs] = React.useState<SystemPurposeId[] | null>(null);
const [editMode, setEditMode] = React.useState(false);
// external state
const theme = useTheme();
const showPurposeFinder = useSettingsStore(state => state.showPurposeFinder);
const { systemPurposeId, setSystemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
systemPurposeId: conversation ? conversation.systemPurposeId : null,
setSystemPurposeId: conversation ? state.setSystemPurposeId : null,
};
}, shallow);
const { hiddenPurposeIDs, toggleHiddenPurposeId } = usePurposeStore(state => ({ hiddenPurposeIDs: state.hiddenPurposeIDs, toggleHiddenPurposeId: state.toggleHiddenPurposeId }), shallow);
// safety check - shouldn't happen
if (!systemPurposeId || !setSystemPurposeId)
return null;
const handleSearchClear = () => {
setSearchQuery('');
setFilteredIDs(null);
};
const handleSearchOnChange = (e: React.ChangeEvent<HTMLInputElement>) => {
const query = e.target.value;
if (!query)
return handleSearchClear();
setSearchQuery(query);
// Filter results based on search term
const ids = Object.keys(SystemPurposes)
.filter(key => SystemPurposes.hasOwnProperty(key))
.filter(key => {
const purpose = SystemPurposes[key as SystemPurposeId];
return purpose.title.toLowerCase().includes(query.toLowerCase())
|| (typeof purpose.description === 'string' && purpose.description.toLowerCase().includes(query.toLowerCase()));
});
setFilteredIDs(ids as SystemPurposeId[]);
// If there's a search term, activate the first item
if (ids.length && !ids.includes(systemPurposeId))
handlePurposeChanged(ids[0] as SystemPurposeId);
};
const handleSearchOnKeyDown = (e: React.KeyboardEvent<HTMLInputElement>): void => {
if (e.key == 'Escape')
handleSearchClear();
};
const toggleEditMode = () => setEditMode(!editMode);
const handlePurposeChanged = (purposeId: SystemPurposeId | null) => {
if (purposeId)
setSystemPurposeId(props.conversationId, purposeId);
};
const handleCustomSystemMessageChange = (v: React.ChangeEvent<HTMLTextAreaElement>): void => {
// TODO: persist this change? Right now it's reset every time.
// maybe we shall have a "save" button just save on a state to persist between sessions
SystemPurposes['Custom'].systemMessage = v.target.value;
};
// we show them all if the filter is clear (null)
const unfilteredPurposeIDs = (filteredIDs && showPurposeFinder) ? filteredIDs : Object.keys(SystemPurposes);
const purposeIDs = editMode ? unfilteredPurposeIDs : unfilteredPurposeIDs.filter(id => !hiddenPurposeIDs.includes(id));
const selectedPurpose = purposeIDs.length ? (SystemPurposes[systemPurposeId] ?? null) : null;
const selectedExample = selectedPurpose?.examples && getRandomElement(selectedPurpose.examples) || null;
return <>
{showPurposeFinder && <Box sx={{ p: 2 * tileSpacing }}>
<Input
fullWidth
variant='outlined' color='neutral'
value={searchQuery} onChange={handleSearchOnChange}
onKeyDown={handleSearchOnKeyDown}
placeholder='Search for purpose…'
startDecorator={<SearchIcon />}
endDecorator={searchQuery && (
<IconButton variant='plain' color='neutral' onClick={handleSearchClear}>
<ClearIcon />
</IconButton>
)}
sx={{
boxShadow: theme.vars.shadow.sm,
}}
/>
</Box>}
<Stack direction='column' sx={{ minHeight: '60vh', justifyContent: 'center', alignItems: 'center' }}>
<Box sx={{ maxWidth: bpMaxWidth }}>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'baseline', justifyContent: 'space-between', gap: 2, mb: 1 }}>
<Typography level='body2' color='neutral'>
Select an AI purpose
</Typography>
<Button variant='plain' color='neutral' size='sm' onClick={toggleEditMode}>
{editMode ? 'Done' : 'Edit'}
</Button>
</Box>
<Grid container spacing={tileSpacing} sx={{ justifyContent: 'flex-start' }}>
{purposeIDs.map((spId) => (
<Grid key={spId}>
<Button
variant={(!editMode && systemPurposeId === spId) ? 'solid' : 'soft'}
color={(!editMode && systemPurposeId === spId) ? 'primary' : SystemPurposes[spId as SystemPurposeId]?.highlighted ? 'warning' : 'neutral'}
onClick={() => !editMode && handlePurposeChanged(spId as SystemPurposeId)}
sx={{
flexDirection: 'column',
fontWeight: 500,
gap: bpTileGap,
height: bpTileSize,
width: bpTileSize,
...((editMode || systemPurposeId !== spId) ? {
boxShadow: theme.vars.shadow.md,
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { background: theme.vars.palette.background.level1 }),
} : {}),
}}
>
{editMode && (
<Checkbox
label={<Typography level='body2'>show</Typography>}
checked={!hiddenPurposeIDs.includes(spId)} onChange={() => toggleHiddenPurposeId(spId)}
sx={{ alignSelf: 'flex-start' }}
/>
)}
<div style={{ fontSize: '2rem' }}>
{SystemPurposes[spId as SystemPurposeId]?.symbol}
</div>
<div>
{SystemPurposes[spId as SystemPurposeId]?.title}
</div>
</Button>
</Grid>
))}
</Grid>
<Typography
level='body2'
sx={{
mt: selectedExample ? 1 : 3,
display: 'flex', alignItems: 'center', gap: 1,
// justifyContent: 'center',
'&:hover > button': { opacity: 1 },
}}>
{!selectedPurpose
? 'Oops! No AI purposes found for your search.'
: (selectedExample
? <>
<i>{selectedExample}</i>
<IconButton
variant='plain' color='neutral' size='md'
onClick={() => props.runExample(selectedExample)}
sx={{ opacity: 0, transition: 'opacity 0.3s' }}
>
💬
</IconButton>
</>
: selectedPurpose.description
)}
</Typography>
{systemPurposeId === 'Custom' && (
<Textarea
variant='outlined' autoFocus placeholder={'Craft your custom system message here…'}
minRows={3}
defaultValue={SystemPurposes['Custom']?.systemMessage} onChange={handleCustomSystemMessageChange}
sx={{
background: theme.vars.palette.background.level1,
lineHeight: 1.75,
mt: 1,
}} />
)}
</Box>
</Stack>
</>;
}
@@ -0,0 +1,47 @@
import * as React from 'react';
import { Option, Select } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
/**
* A Select component that blends-in nicely (cleaner, easier to the eyes)
*/
export const AppBarDropdown = <TValue extends string>(props: { value: TValue, items: Record<string, { title: string }>, onChange: (event: any, value: TValue | null) => void, sx?: SxProps }) =>
<Select
variant='solid' color='neutral' size='md'
value={props.value} onChange={props.onChange}
indicator={<KeyboardArrowDownIcon />}
slotProps={{
root: {
sx: {
backgroundColor: 'transparent',
},
},
listbox: {
variant: 'plain', color: 'neutral', size: 'lg',
disablePortal: false,
sx: {
minWidth: 160,
},
},
indicator: {
sx: {
opacity: 0.5,
},
},
}}
sx={{
mx: 0,
/*fontFamily: theme.vars.fontFamily.code,*/
fontWeight: 500,
...(props.sx || {}),
}}
>
{Object.keys(props.items).map((key: string) => (
<Option key={key} value={key}>
{props.items[key].title}
</Option>
))}
</Select>;
@@ -0,0 +1,31 @@
import * as React from 'react';
import { AppBarDropdown } from './AppBarDropdown';
import { SxProps } from '@mui/joy/styles/types';
/**
* Wrapper for AppBarDropdown that adds a symbol in front of the title
*/
type Props<TValue extends string> = {
value: TValue;
items: Record<string, { title: string, symbol: string }>;
onChange: (event: any, value: TValue | null) => void;
sx?: SxProps;
};
export const AppBarDropdownWithSymbol = <TValue extends string>({ value, items, onChange, sx }: Props<TValue>) => {
const itemsWithSymbol = Object.keys(items).map((key: string) => ({
key,
value: (!!items[key].symbol ? items[key].symbol + ' ' : '') + items[key].title,
}));
return (
<AppBarDropdown
value={value}
items={Object.fromEntries(itemsWithSymbol.map(({ key, value }) => [key, { title: value }]))}
onChange={onChange}
sx={sx}
/>
);
};
@@ -0,0 +1,356 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { signIn, signOut, useSession } from 'next-auth/react';
import { Badge, Box, Button, IconButton, ListDivider, ListItem, ListItemDecorator, Menu, MenuItem, Sheet, Stack, SvgIcon, Switch, Typography, useColorScheme, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
import ClearIcon from '@mui/icons-material/Clear';
import DarkModeIcon from '@mui/icons-material/DarkMode';
import ExitToAppIcon from '@mui/icons-material/ExitToApp';
import FileDownloadIcon from '@mui/icons-material/FileDownload';
import GitHubIcon from '@mui/icons-material/GitHub';
import LoginIcon from '@mui/icons-material/Login';
import LogoutIcon from '@mui/icons-material/Logout';
import MenuIcon from '@mui/icons-material/Menu';
import MoreVertIcon from '@mui/icons-material/MoreVert';
import SettingsOutlinedIcon from '@mui/icons-material/SettingsOutlined';
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
import { buildTimeAuthEnabled } from '@/modules/authentication/auth.client';
import { Brand } from '@/common/brand';
import { ChatModelId, ChatModels, SystemPurposeId, SystemPurposes } from '../../../../data';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { Link } from '@/common/components/Link';
import { cssRainbowColorKeyframes } from '@/common/theme';
import { downloadConversationJson, restoreConversationFromJson, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { AppBarDropdown } from './AppBarDropdown';
import { AppBarDropdownWithSymbol } from './AppBarDropdownWithSymbol';
import { ImportedModal, ImportedOutcome } from './ImportedModal';
import { PagesMenu } from './PagesMenu';
// missing from MUI, using Tabler for Discord
function DiscordIcon(props: { sx?: SxProps }) {
return <SvgIcon viewBox='0 0 24 24' width='24' height='24' stroke='currentColor' fill='none' stroke-linecap='round' stroke-linejoin='round' {...props}>
<path stroke='none' d='M0 0h24v24H0z' fill='none'></path>
<path d='M14.983 3l.123 .006c2.014 .214 3.527 .672 4.966 1.673a1 1 0 0 1 .371 .488c1.876 5.315 2.373 9.987 1.451 12.28c-1.003 2.005 -2.606 3.553 -4.394 3.553c-.94 0 -2.257 -1.596 -2.777 -2.969l-.02 .005c.838 -.131 1.69 -.323 2.572 -.574a1 1 0 1 0 -.55 -1.924c-3.32 .95 -6.13 .95 -9.45 0a1 1 0 0 0 -.55 1.924c.725 .207 1.431 .373 2.126 .499l.444 .074c-.477 1.37 -1.695 2.965 -2.627 2.965c-1.743 0 -3.276 -1.555 -4.267 -3.644c-.841 -2.206 -.369 -6.868 1.414 -12.174a1 1 0 0 1 .358 -.49c1.392 -1.016 2.807 -1.475 4.717 -1.685a1 1 0 0 1 .938 .435l.063 .107l.652 1.288l.16 -.019c.877 -.09 1.718 -.09 2.595 0l.158 .019l.65 -1.287a1 1 0 0 1 .754 -.54l.123 -.01zm-5.983 6a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15zm6 0a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15z' strokeWidth='0' fill='currentColor'></path>
</SvgIcon>;
}
function BringTheLove(props: { text: string, link: string, icon: JSX.Element }) {
const [loved, setLoved] = React.useState(false);
const icon = loved ? '❤️' : props.icon; // '❤️' : '🤍';
return <Button
color='neutral'
component={Link} noLinkStyle href={props.link} target='_blank'
onClick={() => setLoved(true)}
endDecorator={icon}
sx={{
background: 'transparent',
// '&:hover': { background: props.theme.palette.neutral.solidBg },
'&:hover': { animation: `${cssRainbowColorKeyframes} 5s linear infinite` },
}}>
{props.text}
</Button>;
}
function SupportItem() {
const theme = useTheme();
const fadedColor = theme.palette.neutral.plainDisabledColor;
const iconColor = '';
return (
<ListItem
variant='solid' color='neutral'
sx={{
mb: -1, // absorb the bottom margin of the list
mt: 1,
// background: theme.palette.neutral.solidActiveBg,
display: 'flex', flexDirection: 'row', gap: 1,
justifyContent: 'space-between',
}}>
<Box
sx={{
mx: { xs: 1, sm: 2 },
fontWeight: 600,
color: fadedColor,
}}>
{Brand.Meta.SiteName}
</Box>
<BringTheLove text='Discord' icon={<DiscordIcon sx={{ color: iconColor }} />} link={Brand.URIs.SupportInvite} />
<BringTheLove text='GitHub' icon={<GitHubIcon sx={{ color: iconColor }} />} link={Brand.URIs.OpenRepo} />
</ListItem>
);
}
/**
* The top bar of the application, with the model and purpose selection, and menu/settings icons
*/
export function ApplicationBar(props: {
conversationId: string | null;
isMessageSelectionMode: boolean; setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void;
onPublishConversation: (conversationId: string) => void;
onShowSettings: () => void;
sx?: SxProps
}) {
// state
const [actionsMenuAnchor, setActionsMenuAnchor] = React.useState<HTMLElement | null>(null);
const [pagesMenuAnchor, setPagesMenuAnchor] = React.useState<HTMLElement | null>(null);
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
const [conversationImportOutcome, setConversationImportOutcome] = React.useState<ImportedOutcome | null>(null);
const conversationFileInputRef = React.useRef<HTMLInputElement>(null);
// center buttons
const { data: authSession } = useSession();
const handleChatModelChange = (event: any, value: ChatModelId | null) =>
value && props.conversationId && setChatModelId(props.conversationId, value);
const handleSystemPurposeChange = (event: any, value: SystemPurposeId | null) =>
value && props.conversationId && setSystemPurposeId(props.conversationId, value);
// quick actions
const closeActionsMenu = () => setActionsMenuAnchor(null);
const { mode: colorMode, setMode: setColorMode } = useColorScheme();
const { showSystemMessages, setShowSystemMessages, zenMode } = useSettingsStore(state => ({
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
zenMode: state.zenMode,
}), shallow);
const handleDarkModeToggle = () => setColorMode(colorMode === 'dark' ? 'light' : 'dark');
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
const handleActionShowSettings = (e: React.MouseEvent) => {
e.stopPropagation();
props.onShowSettings();
closeActionsMenu();
};
// conversation actions
const { conversationsCount, isConversationEmpty, chatModelId, systemPurposeId, setMessages, setChatModelId, setSystemPurposeId, setAutoTitle, importConversation } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
conversationsCount: state.conversations.length,
isConversationEmpty: conversation ? !conversation.messages.length : true,
chatModelId: conversation ? conversation.chatModelId : null,
systemPurposeId: conversation ? conversation.systemPurposeId : null,
setMessages: state.setMessages,
setChatModelId: state.setChatModelId,
setSystemPurposeId: state.setSystemPurposeId,
setAutoTitle: state.setAutoTitle,
importConversation: state.importConversation,
};
}, shallow);
const handleConversationPublish = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
props.conversationId && props.onPublishConversation(props.conversationId);
};
const handleConversationDownload = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
const conversation = useChatStore.getState().conversations.find(conversation => conversation.id === props.conversationId);
if (conversation)
downloadConversationJson(conversation);
};
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
e.stopPropagation();
closeActionsMenu();
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
};
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
setClearConfirmationId(props.conversationId);
};
const handleConfirmedClearConversation = () => {
if (clearConfirmationId) {
setMessages(clearConfirmationId, []);
setAutoTitle(clearConfirmationId, '');
setClearConfirmationId(null);
}
};
// pages actions
const closePagesMenu = () => setPagesMenuAnchor(null);
const handleConversationUpload = () => conversationFileInputRef.current?.click();
const handleLoadConversations = async (e: React.ChangeEvent<HTMLInputElement>) => {
const files = e.target?.files;
if (!files || files.length < 1)
return;
// try to restore conversations from the selected files
const outcomes: ImportedOutcome = { conversations: [] };
for (const file of files) {
const fileName = file.name || 'unknown file';
try {
const conversation = restoreConversationFromJson(await file.text());
if (conversation) {
importConversation(conversation);
outcomes.conversations.push({ fileName, success: true, conversationId: conversation.id });
} else {
const fileDesc = `(${file.type}) ${file.size.toLocaleString()} bytes`;
outcomes.conversations.push({ fileName, success: false, error: `Invalid file: ${fileDesc}` });
}
} catch (error) {
console.error(error);
outcomes.conversations.push({ fileName, success: false, error: (error as any)?.message || error?.toString() || 'unknown error' });
}
}
// show the outcome of the import
setConversationImportOutcome(outcomes);
// this is needed to allow the same file to be selected again
e.target.value = '';
};
return <>
{/* Top Bar with 2 icons and Model/Purpose selectors */}
<Sheet
variant='solid' color='neutral' invertedColors
sx={{
p: 1,
display: 'flex', flexDirection: 'row', justifyContent: 'space-between',
...(props.sx || {}),
}}>
<IconButton variant='plain' onClick={event => setPagesMenuAnchor(event.currentTarget)}>
<Badge variant='solid' size='sm' badgeContent={conversationsCount < 2 ? 0 : conversationsCount}>
<MenuIcon />
</Badge>
</IconButton>
<Stack direction='row' sx={{ my: 'auto' }}>
{chatModelId && <AppBarDropdown items={ChatModels} value={chatModelId} onChange={handleChatModelChange} />}
{systemPurposeId && (zenMode === 'cleaner'
? <AppBarDropdown items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
: <AppBarDropdownWithSymbol items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
)}
</Stack>
<Stack direction='row'>
{buildTimeAuthEnabled && (
authSession?.user ? (
<IconButton onClick={() => signOut()}>
<LogoutIcon style={{ marginRight: '0.33em' }} />
<Typography level='body3'>Sign out {authSession.user?.name ?? ''}</Typography>
</IconButton>
) : (
<IconButton onClick={() => signIn()}>
<LoginIcon style={{ marginRight: '0.33em' }} />
<Typography>Sign in </Typography>
</IconButton>
)
)}
<IconButton variant='plain' onClick={event => setActionsMenuAnchor(event.currentTarget)}>
<MoreVertIcon />
</IconButton>
</Stack>
</Sheet>
{/* Left menu content */}
<PagesMenu
conversationId={props.conversationId}
pagesMenuAnchor={pagesMenuAnchor}
onClose={closePagesMenu}
onImportConversation={handleConversationUpload}
/>
{/* Right menu content */}
<Menu
variant='plain' color='neutral' size='lg' placement='bottom-end' sx={{ minWidth: 280 }}
open={!!actionsMenuAnchor} anchorEl={actionsMenuAnchor} onClose={closeActionsMenu}
disablePortal={false}>
<MenuItem onClick={handleDarkModeToggle}>
<ListItemDecorator><DarkModeIcon /></ListItemDecorator>
Dark
<Switch checked={colorMode === 'dark'} onChange={handleDarkModeToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleSystemMessagesToggle}>
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
System text
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleActionShowSettings}>
<ListItemDecorator><SettingsOutlinedIcon /></ListItemDecorator>
Settings
</MenuItem>
<ListDivider />
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationPublish}>
<ListItemDecorator>
{/*<Badge size='sm' color='primary'>*/}
<ExitToAppIcon />
{/*</Badge>*/}
</ListItemDecorator>
Share via paste.gg
</MenuItem>
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationDownload}>
<ListItemDecorator>
<FileDownloadIcon />
</ListItemDecorator>
Export conversation
</MenuItem>
<ListDivider />
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleToggleMessageSelectionMode}>
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
Cleanup ...
</MenuItem>
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationClear}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Clear conversation
</MenuItem>
<SupportItem />
</Menu>
{/* Modals */}
<ConfirmationModal
open={!!clearConfirmationId} onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
/>
{!!conversationImportOutcome && (
<ImportedModal open outcome={conversationImportOutcome} onClose={() => setConversationImportOutcome(null)} />
)}
{/* Files */}
<input type='file' multiple hidden accept='.json' ref={conversationFileInputRef} onChange={handleLoadConversations} />
</>;
}
@@ -0,0 +1,68 @@
import * as React from 'react';
import { Box, Button, Divider, List, ListItem, Modal, ModalDialog, Typography } from '@mui/joy';
export interface ImportedOutcome {
conversations: {
fileName: string;
success: boolean;
conversationId?: string;
error?: string;
}[];
}
/**
* Displays the result of an import operation as a modal dialog.
*
* Import operations supported:
* - JSON Chat
*/
export function ImportedModal(props: { open: boolean, outcome: ImportedOutcome, onClose: () => void, }) {
const { conversations } = props.outcome;
const successes = conversations.filter(c => c.success);
const failures = conversations.filter(c => !c.success);
const hasAnyResults = successes.length > 0 || failures.length > 0;
const hasAnyFailures = failures.length > 0;
return (
<Modal open={props.open} onClose={props.onClose}>
<ModalDialog variant='outlined' color='neutral' sx={{ maxWidth: '100vw' }}>
<Typography level='h5'>
{hasAnyResults ? hasAnyFailures ? 'Import issues' : 'Import successful' : 'Import failed'}
</Typography>
<Divider sx={{ my: 2 }} />
{successes.length >= 1 && <>
<Typography>
Imported {successes.length} conversation{successes.length === 1 ? '' : 's'}.
</Typography>
<Typography>
{successes.length === 1 ? 'It' : 'They'} can be found in the Pages menu. Opening {successes.length === 1 ? 'it' : 'the last one'}.
</Typography>
</>}
{failures.length >= 1 && <>
<Typography variant='soft' color='danger'>
Issues importing {failures.length} conversation{failures.length === 1 ? '' : 's'}:
</Typography>
<List>
{failures.map((f, idx) =>
<ListItem color='warning' key={'fail-' + idx}>{f.fileName}: {f.error}</ListItem>,
)}
</List>
</>}
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
<Button variant='soft' color='neutral' onClick={props.onClose}>
Close
</Button>
</Box>
</ModalDialog>
</Modal>
);
}
@@ -0,0 +1,162 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, ListDivider, ListItemDecorator, Menu, MenuItem, Tooltip, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { MAX_CONVERSATIONS, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { PagesMenuItem } from './PagesMenuItem';
const SPECIAL_ID_ALL_CHATS = 'all-chats';
/**
* FIXME: use a proper Pages drawer instead of this menu
*/
export function PagesMenu(props: { conversationId: string | null, pagesMenuAnchor: HTMLElement | null, onClose: () => void, onImportConversation: () => void }) {
// state
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
// external state
const conversationIDs = useChatStore(state => state.conversations.map(conversation => conversation.id), shallow);
const { setActiveConversationId, createConversation, deleteConversation, newConversationId } = useChatStore(state => ({
setActiveConversationId: state.setActiveConversationId,
createConversation: state.createConversation,
deleteConversation: state.deleteConversation,
newConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
}), shallow);
const showSymbols = useSettingsStore(state => state.zenMode) !== 'cleaner';
const hasChats = conversationIDs.length > 0;
const singleChat = conversationIDs.length === 1;
const maxReached = conversationIDs.length >= MAX_CONVERSATIONS;
const handleNew = () => {
// if the first in the stack is a new conversation, just activate it
if (newConversationId)
setActiveConversationId(newConversationId);
else
createConversation();
props.onClose();
};
const handleConversationActivate = (conversationId: string) => setActiveConversationId(conversationId);
const handleConversationDelete = (e: React.MouseEvent, conversationId: string) => {
if (!singleChat) {
e.stopPropagation();
// NOTE: the old behavior was good, keeping it for reference - now we'll only ask for confirmation when deleting all chats
// // if the chat is empty, just delete it
// if (conversationId === newConversationId)
// deleteConversation(conversationId);
// // otherwise, ask for confirmation
// else {
// setActiveConversationId(conversationId);
// setDeleteConfirmationId(conversationId);
// }
if (conversationId)
deleteConversation(conversationId);
}
};
const handleConfirmedDeleteConversation = () => {
if (hasChats && deleteConfirmationId) {
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
createConversation();
conversationIDs.forEach(conversationId => deleteConversation(conversationId));
} else
deleteConversation(deleteConfirmationId);
setDeleteConfirmationId(null);
}
};
const handleDeleteAll = (e: React.MouseEvent) => {
e.stopPropagation();
setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
};
const NewPrefix = maxReached && <Tooltip title={`Maximum limit: ${MAX_CONVERSATIONS} chats. Proceeding will remove the oldest chat.`}><Box sx={{ mr: 2 }}></Box></Tooltip>;
return <>
<Menu
variant='plain' color='neutral' size='lg' placement='bottom-start' sx={{ minWidth: 320 }}
open={!!props.pagesMenuAnchor} anchorEl={props.pagesMenuAnchor} onClose={props.onClose}
disablePortal={false}>
{/*<ListItem>*/}
{/* <Typography level='body2'>*/}
{/* Active chats*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem onClick={handleNew} disabled={!!newConversationId && newConversationId === props.conversationId}>
<ListItemDecorator><AddIcon /></ListItemDecorator>
{NewPrefix}New
</MenuItem>
<ListDivider />
{conversationIDs.map(conversationId =>
<PagesMenuItem
key={'c-id-' + conversationId}
conversationId={conversationId}
isActive={conversationId === props.conversationId}
isSingle={singleChat}
showSymbols={showSymbols}
conversationActivate={handleConversationActivate}
conversationDelete={handleConversationDelete}
/>)}
<ListDivider />
<MenuItem onClick={props.onImportConversation}>
<ListItemDecorator>
<FileUploadIcon />
</ListItemDecorator>
Import conversation
</MenuItem>
<MenuItem disabled={!hasChats} onClick={handleDeleteAll}>
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
<Typography>
Delete all
</Typography>
</MenuItem>
{/*<ListItem>*/}
{/* <Typography level='body2'>*/}
{/* Scratchpad*/}
{/* </Typography>*/}
{/*</ListItem>*/}
{/*<MenuItem>*/}
{/* <ListItemDecorator />*/}
{/* <Typography sx={{ opacity: 0.5 }}>*/}
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
{/* </Typography>*/}
{/*</MenuItem>*/}
</Menu>
{/* Confirmations */}
<ConfirmationModal
open={!!deleteConfirmationId} onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
: 'Are you sure you want to delete this conversation?'}
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Yes, delete all'
: 'Delete conversation'}
/>
</>;
}
@@ -6,37 +6,30 @@ import { SxProps } from '@mui/joy/styles/types';
import CloseIcon from '@mui/icons-material/Close';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import { DConversation, useChatStore } from '~/common/state/store-chats';
import { InlineTextarea } from '~/common/components/InlineTextarea';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { InlineTextarea } from '@/common/components/InlineTextarea';
import { SystemPurposes } from '../../../../data';
import { conversationTitle, useChatStore } from '@/common/state/store-chats';
const DEBUG_CONVERSATION_IDs = false;
const conversationTitle = (conversation: DConversation): string =>
conversation.userTitle || conversation.autoTitle || 'new conversation'; // 👋💬🗨️
export function ConversationItem(props: {
export function PagesMenuItem(props: {
conversationId: string,
isActive: boolean, isSingle: boolean, showSymbols: boolean, maxChatMessages: number,
conversationActivate: (conversationId: string, closeMenu: boolean) => void,
conversationDelete: (conversationId: string) => void,
isActive: boolean, isSingle: boolean, showSymbols: boolean,
conversationActivate: (conversationId: string) => void,
conversationDelete: (e: React.MouseEvent, conversationId: string) => void,
}) {
// state
const [isEditingTitle, setIsEditingTitle] = React.useState(false);
const [deleteArmed, setDeleteArmed] = React.useState(false);
const doubleClickToEdit = useUIPreferencesStore(state => state.doubleClickToEdit);
// bind to conversation
const cState = useChatStore(state => {
const conversation = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return conversation && {
isNew: conversation.messages.length === 0,
messageCount: conversation.messages.length,
assistantTyping: !!conversation.abortController,
systemPurposeId: conversation.systemPurposeId,
title: conversationTitle(conversation),
@@ -44,19 +37,14 @@ export function ConversationItem(props: {
};
}, shallow);
// auto-close the arming menu when clicking away
// NOTE: there currently is a bug (race condition) where the menu closes on a new item right after opening
// because the isActive prop is not yet updated
// auto-close the menu when clicking away
React.useEffect(() => {
if (deleteArmed && !props.isActive)
setDeleteArmed(false);
}, [deleteArmed, props.isActive]);
// sanity check: shouldn't happen, but just in case
if (!cState) return null;
const { isNew, messageCount, assistantTyping, setUserTitle, systemPurposeId, title } = cState;
const handleActivate = () => props.conversationActivate(props.conversationId, true);
if (!conversation) return null;
const handleEditBegin = () => setIsEditingTitle(true);
@@ -65,50 +53,33 @@ export function ConversationItem(props: {
setUserTitle(props.conversationId, text);
};
const handleDeleteBegin = (e: React.MouseEvent) => {
e.stopPropagation();
if (!props.isActive)
props.conversationActivate(props.conversationId, false);
else
setDeleteArmed(true);
};
const handleDeleteBegin = () => setDeleteArmed(true);
const handleDeleteConfirm = (e: React.MouseEvent) => {
if (deleteArmed) {
setDeleteArmed(false);
e.stopPropagation();
props.conversationDelete(props.conversationId);
props.conversationDelete(e, props.conversationId);
}
};
const handleDeleteCancel = () => setDeleteArmed(false);
const textSymbol = (systemPurposeId && SystemPurposes[systemPurposeId]?.symbol) || '❓';
const buttonSx: SxProps = { ml: 1, ...(props.isActive ? { color: 'white' } : {}) };
const progress = props.maxChatMessages ? 100 * messageCount / props.maxChatMessages : 0;
const { assistantTyping, setUserTitle, systemPurposeId, title } = conversation;
const textSymbol = SystemPurposes[systemPurposeId]?.symbol || '❓';
const buttonSx: SxProps = { ml: 1, ...(props.isActive ? { color: 'white' } : {}) };
return (
<MenuItem
variant={props.isActive ? 'solid' : 'plain'} color='neutral'
selected={props.isActive}
onClick={handleActivate}
onClick={() => props.conversationActivate(props.conversationId)}
sx={{
// py: 0,
position: 'relative',
border: 'none', // note, there's a default border of 1px and invisible.. hmm
'&:hover > button': { opacity: 1 },
}}
>
{/* Optional prgoress bar */}
{progress > 0 && (
<Box sx={{
backgroundColor: 'neutral.softActiveBg',
position: 'absolute', left: 0, bottom: 0, width: progress + '%', height: 4,
}} />
)}
{/* Icon */}
{props.showSymbols && <ListItemDecorator>
{assistantTyping
@@ -119,12 +90,12 @@ export function ConversationItem(props: {
sx={{
width: 24,
height: 24,
borderRadius: 'var(--joy-radius-sm)',
borderRadius: 8,
}}
/>
) : (
<Typography sx={{ fontSize: '18px' }}>
{isNew ? '' : textSymbol}
{conversation.isNew ? '' : textSymbol}
</Typography>
)}
</ListItemDecorator>}
@@ -132,7 +103,7 @@ export function ConversationItem(props: {
{/* Text */}
{!isEditingTitle ? (
<Box onDoubleClick={() => doubleClickToEdit ? handleEditBegin() : null} sx={{ flexGrow: 1 }}>
<Box onDoubleClick={handleEditBegin} sx={{ flexGrow: 1 }}>
{DEBUG_CONVERSATION_IDs ? props.conversationId.slice(0, 10) : title}{assistantTyping && '...'}
</Box>
@@ -142,7 +113,6 @@ export function ConversationItem(props: {
)}
{/* // TODO: Commented code */}
{/* Edit */}
{/*<IconButton*/}
{/* variant='plain' color='neutral'*/}
@@ -156,7 +126,7 @@ export function ConversationItem(props: {
{/* Delete Arming */}
{!props.isSingle && !deleteArmed && (
<IconButton
variant={props.isActive ? 'solid' : 'outlined'} color='neutral'
variant='outlined' color='neutral'
size='sm' sx={{ opacity: { xs: 1, sm: 0 }, transition: 'opacity 0.3s', ...buttonSx }}
onClick={handleDeleteBegin}>
<DeleteOutlineIcon />
@@ -168,11 +138,10 @@ export function ConversationItem(props: {
<IconButton size='sm' variant='solid' color='danger' sx={buttonSx} onClick={handleDeleteConfirm}>
<DeleteOutlineIcon />
</IconButton>
<IconButton size='sm' variant='solid' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
<IconButton size='sm' variant='plain' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
<CloseIcon />
</IconButton>
</>}
</MenuItem>
);
}
@@ -1,166 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, ListDivider, ListItemDecorator, MenuItem, Tooltip, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import { MAX_CONVERSATIONS, useChatStore } from '~/common/state/store-chats';
import { setLayoutDrawerAnchor } from '~/common/layout/store-applayout';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { ConversationItem } from './ConversationItem';
import { OpenAIIcon } from '~/modules/llms/openai/OpenAIIcon';
type ListGrouping = 'off' | 'persona';
export function ChatDrawerItems(props: {
conversationId: string | null
onDeleteAllConversations: () => void,
onImportConversation: () => void,
}) {
// local state
const [grouping] = React.useState<ListGrouping>('off');
// external state
const conversationIDs = useChatStore(state => state.conversations.map(
conversation => conversation.id,
), shallow);
const { topNewConversationId, maxChatMessages, setActiveConversationId, createConversation, deleteConversation } = useChatStore(state => ({
topNewConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
maxChatMessages: state.conversations.reduce((longest, conversation) => Math.max(longest, conversation.messages.length), 0),
setActiveConversationId: state.setActiveConversationId,
createConversation: state.createConversation,
deleteConversation: state.deleteConversation,
}), shallow);
const { experimentalLabs, showSymbols } = useUIPreferencesStore(state => ({
experimentalLabs: state.experimentalLabs,
showSymbols: state.zenMode !== 'cleaner',
}), shallow);
const hasChats = conversationIDs.length > 0;
const singleChat = conversationIDs.length === 1;
const maxReached = conversationIDs.length >= MAX_CONVERSATIONS;
const closeDrawerMenu = () => setLayoutDrawerAnchor(null);
const handleNew = () => {
// if the first in the stack is a new conversation, just activate it
if (topNewConversationId)
setActiveConversationId(topNewConversationId);
else
createConversation();
closeDrawerMenu();
};
const handleConversationActivate = React.useCallback((conversationId: string, closeMenu: boolean) => {
setActiveConversationId(conversationId);
if (closeMenu)
closeDrawerMenu();
}, [setActiveConversationId]);
const handleConversationDelete = React.useCallback((conversationId: string) => {
if (!singleChat && conversationId)
deleteConversation(conversationId);
}, [deleteConversation, singleChat]);
const NewPrefix = maxReached && <Tooltip title={`Maximum limit: ${MAX_CONVERSATIONS} chats. Proceeding will remove the oldest chat.`}><Box sx={{ mr: 2 }}></Box></Tooltip>;
// grouping
let sortedIds = conversationIDs;
if (grouping === 'persona') {
const conversations = useChatStore.getState().conversations;
// group conversations by persona
const groupedConversations: { [personaId: string]: string[] } = {};
conversations.forEach(conversation => {
const persona = conversation.systemPurposeId;
if (persona) {
if (!groupedConversations[persona])
groupedConversations[persona] = [];
groupedConversations[persona].push(conversation.id);
}
});
// flatten grouped conversations
sortedIds = Object.values(groupedConversations).flat();
}
return <>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Active chats*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem disabled={maxReached || (!!topNewConversationId && topNewConversationId === props.conversationId)} onClick={handleNew}>
<ListItemDecorator><AddIcon /></ListItemDecorator>
{NewPrefix}New
</MenuItem>
<ListDivider sx={{ mb: 0 }} />
<Box sx={{ flex: 1, overflowY: 'auto' }}>
{/*<ListItem sticky sx={{ justifyContent: 'space-between', boxShadow: 'sm' }}>*/}
{/* <Typography level='body-sm'>*/}
{/* Conversations*/}
{/* </Typography>*/}
{/* <ToggleButtonGroup variant='soft' size='sm' value={grouping} onChange={(_event, newValue) => newValue && setGrouping(newValue)}>*/}
{/* <IconButton value='off'>*/}
{/* <AccessTimeIcon />*/}
{/* </IconButton>*/}
{/* <IconButton value='persona'>*/}
{/* <PersonIcon />*/}
{/* </IconButton>*/}
{/* </ToggleButtonGroup>*/}
{/*</ListItem>*/}
{sortedIds.map(conversationId =>
<ConversationItem
key={'c-id-' + conversationId}
conversationId={conversationId}
isActive={conversationId === props.conversationId}
isSingle={singleChat}
showSymbols={showSymbols}
maxChatMessages={experimentalLabs ? maxChatMessages : 0}
conversationActivate={handleConversationActivate}
conversationDelete={handleConversationDelete}
/>)}
</Box>
<ListDivider sx={{ mt: 0 }} />
<MenuItem onClick={props.onImportConversation}>
<ListItemDecorator>
<FileUploadIcon />
</ListItemDecorator>
Import chats
<OpenAIIcon sx={{ fontSize: 'xl', ml: 'auto' }} />
</MenuItem>
<MenuItem disabled={!hasChats} onClick={props.onDeleteAllConversations}>
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
<Typography>
Delete all
</Typography>
</MenuItem>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Scratchpad*/}
{/* </Typography>*/}
{/*</ListItem>*/}
{/*<MenuItem>*/}
{/* <ListItemDecorator />*/}
{/* <Typography sx={{ opacity: 0.5 }}>*/}
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
{/* </Typography>*/}
{/*</MenuItem>*/}
</>;
}
@@ -1,92 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { ListItemButton, ListItemDecorator, Typography } from '@mui/joy';
import BuildCircleIcon from '@mui/icons-material/BuildCircle';
import SettingsIcon from '@mui/icons-material/Settings';
import { DLLMId, DModelSourceId } from '~/modules/llms/llm.types';
import { SystemPurposeId, SystemPurposes } from '../../../../data';
import { useModelsStore } from '~/modules/llms/store-llms';
import { AppBarDropdown, DropdownItems } from '~/common/layout/AppBarDropdown';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore, useUIStateStore } from '~/common/state/store-ui';
export function ChatDropdowns(props: {
conversationId: string | null
}) {
// external state
const { chatLLMId, setChatLLMId, llms } = useModelsStore(state => ({
chatLLMId: state.chatLLMId,
setChatLLMId: state.setChatLLMId,
llms: state.llms,
}), shallow);
const { zenMode } = useUIPreferencesStore(state => ({ zenMode: state.zenMode }), shallow);
const { systemPurposeValue, setSystemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
systemPurposeValue: conversation?.systemPurposeId ?? null,
setSystemPurposeId: state.setSystemPurposeId,
};
}, shallow);
const { openLLMOptions, openModelsSetup } = useUIStateStore(state => ({
openLLMOptions: state.openLLMOptions, openModelsSetup: state.openModelsSetup,
}), shallow);
const handleChatModelChange = (event: any, value: DLLMId | null) =>
value && props.conversationId && setChatLLMId(value);
const handleSystemPurposeChange = (event: any, value: SystemPurposeId | null) =>
value && props.conversationId && setSystemPurposeId(props.conversationId, value);
const handleOpenLLMOptions = () => chatLLMId && openLLMOptions(chatLLMId);
// build model menu items, filtering-out hidden models, and add Source separators
const llmItems: DropdownItems = {};
let prevSourceId: DModelSourceId | null = null;
for (const llm of llms) {
if (!llm.hidden || llm.id === chatLLMId) {
if (!prevSourceId || llm.sId !== prevSourceId) {
if (prevSourceId)
llmItems[`sep-${llm.id}`] = { type: 'separator', title: llm.sId };
prevSourceId = llm.sId;
}
llmItems[llm.id] = { title: llm.label };
}
}
return <>
{/* Model selector */}
<AppBarDropdown
items={llmItems}
value={chatLLMId} onChange={handleChatModelChange}
placeholder='Models …'
appendOption={<>
{chatLLMId && (
<ListItemButton key='menu-opt' onClick={handleOpenLLMOptions}>
<ListItemDecorator><SettingsIcon color='success' /></ListItemDecorator><Typography>Options</Typography>
</ListItemButton>
)}
<ListItemButton key='menu-llms' onClick={openModelsSetup}>
<ListItemDecorator><BuildCircleIcon color='success' /></ListItemDecorator><Typography>Models</Typography>
</ListItemButton>
</>}
/>
{/* Persona selector */}
<AppBarDropdown
items={SystemPurposes} showSymbols={zenMode !== 'cleaner'}
value={systemPurposeValue} onChange={handleSystemPurposeChange}
placeholder='Personas …'
/>
</>;
}
@@ -1,123 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { ListDivider, ListItemDecorator, MenuItem, Switch } from '@mui/joy';
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
import ClearIcon from '@mui/icons-material/Clear';
import CompressIcon from '@mui/icons-material/Compress';
import FileDownloadIcon from '@mui/icons-material/FileDownload';
import ForkRightIcon from '@mui/icons-material/ForkRight';
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
import { setLayoutMenuAnchor } from '~/common/layout/store-applayout';
import { useUIPreferencesStore } from '~/common/state/store-ui';
export function ChatMenuItems(props: {
conversationId: string | null, isConversationEmpty: boolean,
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
onClearConversation: (conversationId: string) => void,
onDuplicateConversation: (conversationId: string) => void,
onExportConversation: (conversationId: string | null) => void,
onFlattenConversation: (conversationId: string) => void,
}) {
// external state
const { showSystemMessages, setShowSystemMessages } = useUIPreferencesStore(state => ({
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
}), shallow);
// derived state
const disabled = !props.conversationId || props.isConversationEmpty;
const closeContextMenu = () => setLayoutMenuAnchor(null);
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
const handleConversationExport = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeContextMenu();
props.onExportConversation(!disabled ? props.conversationId : null);
};
const handleConversationDuplicate = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeContextMenu();
props.conversationId && props.onDuplicateConversation(props.conversationId);
};
const handleConversationFlatten = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeContextMenu();
props.conversationId && props.onFlattenConversation(props.conversationId);
};
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
e.stopPropagation();
closeContextMenu();
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
};
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
props.conversationId && props.onClearConversation(props.conversationId);
};
return <>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Conversation*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem onClick={handleSystemMessagesToggle}>
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
System message
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<ListDivider inset='startContent' />
<MenuItem disabled={disabled} onClick={handleConversationDuplicate}>
<ListItemDecorator>
{/*<Badge size='sm' color='success'>*/}
<ForkRightIcon color='success' />
{/*</Badge>*/}
</ListItemDecorator>
Duplicate
</MenuItem>
<MenuItem disabled={disabled} onClick={handleConversationFlatten}>
<ListItemDecorator>
{/*<Badge size='sm' color='success'>*/}
<CompressIcon color='success' />
{/*</Badge>*/}
</ListItemDecorator>
Flatten
</MenuItem>
<ListDivider inset='startContent' />
<MenuItem disabled={disabled} onClick={handleToggleMessageSelectionMode}>
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
<span style={props.isMessageSelectionMode ? { fontWeight: 800 } : {}}>
Cleanup ...
</span>
</MenuItem>
<MenuItem onClick={handleConversationExport}>
<ListItemDecorator>
<FileDownloadIcon />
</ListItemDecorator>
Export
</MenuItem>
<MenuItem disabled={disabled} onClick={handleConversationClear}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Reset
</MenuItem>
</>;
}
@@ -1,35 +0,0 @@
import * as React from 'react';
import { Button, IconButton } from '@mui/joy';
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
import { hideOnDesktop, hideOnMobile } from '~/common/theme';
import { CameraCaptureModal } from './CameraCaptureModal';
const showOnDesktop = false; // process.env.NODE_ENV === 'development';
export function CameraCaptureButton(props: { onOCR: (ocrText: string) => void }) {
// state
const [open, setOpen] = React.useState(false);
return <>
{/* The Button */}
<IconButton variant='plain' color='neutral' onClick={() => setOpen(true)} sx={hideOnDesktop}>
<AddAPhotoIcon />
</IconButton>
{/* Also show a button on desktop while in development */}
{showOnDesktop && <Button
fullWidth variant='plain' color='neutral' onClick={() => setOpen(true)} startDecorator={<AddAPhotoIcon />}
sx={{ ...hideOnMobile, justifyContent: 'flex-start' }}>
OCR
</Button>}
{/* The actual capture dialog, which will stream the video */}
{open && <CameraCaptureModal onCloseModal={() => setOpen(false)} onOCR={props.onOCR} />}
</>;
}
@@ -1,155 +0,0 @@
import * as React from 'react';
import { Box, Button, CircularProgress, IconButton, LinearProgress, Modal, ModalClose, Option, Select, Sheet, Typography } from '@mui/joy';
import DownloadIcon from '@mui/icons-material/Download';
import InfoIcon from '@mui/icons-material/Info';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
import { InlineError } from '~/common/components/InlineError';
import { useCameraCapture } from '~/common/components/useCameraCapture';
function renderVideoFrameToCanvas(videoElement: HTMLVideoElement): HTMLCanvasElement {
// paint the video on a canvas, to save it
const canvas = document.createElement('canvas');
canvas.width = videoElement.videoWidth || 640;
canvas.height = videoElement.videoHeight || 480;
const ctx = canvas.getContext('2d');
ctx?.drawImage(videoElement, 0, 0);
return canvas;
}
function downloadVideoFrameAsPNG(videoElement: HTMLVideoElement) {
// video to canvas to png
const renderedFrame = renderVideoFrameToCanvas(videoElement);
const imageDataURL = renderedFrame.toDataURL('image/png');
// auto-download
const link = document.createElement('a');
link.download = 'image.png';
link.href = imageDataURL;
link.click();
}
export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (ocrText: string) => void }) {
// state
const [ocrProgress, setOCRProgress] = React.useState<number | null>(null);
const [showInfo, setShowInfo] = React.useState(false);
// camera operations
const {
videoRef,
cameras, cameraIdx, setCameraIdx,
zoomControl, info, error,
resetVideo,
} = useCameraCapture();
const stopAndClose = () => {
resetVideo();
props.onCloseModal();
};
const handleVideoOCRClicked = async () => {
if (!videoRef.current) return;
const renderedFrame = renderVideoFrameToCanvas(videoRef.current);
setOCRProgress(0);
const { recognize } = await import('tesseract.js');
const result = await recognize(renderedFrame, undefined, {
logger: m => {
// noinspection SuspiciousTypeOfGuard
if (typeof m.progress === 'number')
setOCRProgress(m.progress);
},
errorHandler: e => console.error(e),
});
setOCRProgress(null);
stopAndClose();
props.onOCR(result.data.text);
};
const handleVideoDownloadClicked = () => {
if (!videoRef.current) return;
downloadVideoFrameAsPNG(videoRef.current);
};
return (
<Modal open onClose={stopAndClose} sx={{ display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Box sx={{
display: 'flex', flexDirection: 'column', m: 1,
borderRadius: 'md', overflow: 'hidden',
boxShadow: 'sm',
}}>
{/* Top bar */}
<Sheet variant='solid' invertedColors sx={{ zIndex: 10, display: 'flex', justifyContent: 'space-between', p: 1 }}>
<Select
variant='solid' color='neutral'
value={cameraIdx} onChange={(_event: any, value: number | null) => setCameraIdx(value === null ? -1 : value)}
indicator={<KeyboardArrowDownIcon />}
>
<Option value={-1}>
No Camera
</Option>
{cameras.map((device: MediaDeviceInfo, camIndex) => (
<Option key={'video-dev-' + camIndex} value={camIndex}>
{device.label}
</Option>
))}
</Select>
<ModalClose onClick={stopAndClose} sx={{ position: 'static' }} />
</Sheet>
{/* (main) Video */}
<Box sx={{ position: 'relative' }}>
<video
ref={videoRef} autoPlay playsInline
style={{
display: 'block', width: '100%', maxHeight: 'calc(100vh - 200px)',
background: '#8888', opacity: ocrProgress !== null ? 0.5 : 1,
}}
/>
{showInfo && !!info && <Typography
sx={{
position: 'absolute', top: 0, left: 0, right: 0, bottom: 0, zIndex: 1,
background: 'rgba(0,0,0,0.5)', color: 'white',
whiteSpace: 'pre', overflowY: 'scroll',
}}>
{info}
</Typography>}
{ocrProgress !== null && <CircularProgress sx={{ position: 'absolute', top: 'calc(50% - 34px / 2)', left: 'calc(50% - 34px / 2)', zIndex: 2 }} />}
</Box>
{/* Bottom controls (zoom, ocr, download) & progress */}
<Sheet variant='soft' sx={{ display: 'flex', flexDirection: 'column', zIndex: 20, gap: 1, p: 1 }}>
{!!error && <InlineError error={error} />}
{zoomControl}
{ocrProgress !== null && <LinearProgress color='primary' determinate value={100 * ocrProgress} sx={{ px: 2 }} />}
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'space-between' }}>
<IconButton disabled={!info} variant='soft' color='neutral' size='lg' onClick={() => setShowInfo(info => !info)} sx={{ zIndex: 30 }}>
<InfoIcon />
</IconButton>
<Button disabled={ocrProgress !== null} fullWidth variant='solid' size='lg' onClick={handleVideoOCRClicked} sx={{ flex: 1, maxWidth: 260 }}>
Extract Text
</Button>
<IconButton variant='soft' color='neutral' size='lg' onClick={handleVideoDownloadClicked}>
<DownloadIcon />
</IconButton>
</Box>
</Sheet>
</Box>
</Modal>
);
}
@@ -1,34 +0,0 @@
import * as React from 'react';
import { Box, MenuItem, Radio, Typography } from '@mui/joy';
import { ChatModeId, ChatModeItems } from '../../AppChat';
import { CloseableMenu } from '~/common/components/CloseableMenu';
export const ChatModeMenu = (props: { anchorEl: HTMLAnchorElement | null, onClose: () => void, experimental: boolean, chatModeId: ChatModeId, onSetChatModeId: (chatMode: ChatModeId) => void }) =>
<CloseableMenu
placement='top-end' sx={{ minWidth: 320 }}
open anchorEl={props.anchorEl} onClose={props.onClose}
>
{/*<MenuItem color='neutral' selected>*/}
{/* Conversation Mode*/}
{/*</MenuItem>*/}
{/**/}
{/*<ListDivider />*/}
{/* ChatMode items */}
{Object.entries(ChatModeItems).filter(([, { experimental }]) => props.experimental || !experimental).map(([key, data]) =>
<MenuItem key={'chat-mode-' + key} onClick={() => props.onSetChatModeId(key as ChatModeId)}>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'center', gap: 2 }}>
<Radio checked={key === props.chatModeId} />
<Box>
<Typography>{data.label}</Typography>
<Typography level='body-sm'>{data.description}</Typography>
</Box>
</Box>
</MenuItem>)}
</CloseableMenu>;
+141 -238
View File
@@ -1,44 +1,36 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, ButtonGroup, Card, Grid, IconButton, ListDivider, ListItemDecorator, MenuItem, Stack, Textarea, Tooltip, Typography, useTheme } from '@mui/joy';
import { Box, Button, Card, Grid, IconButton, ListDivider, ListItemDecorator, Menu, MenuItem, Radio, Stack, Textarea, Tooltip, Typography, useTheme } from '@mui/joy';
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
import AttachFileOutlinedIcon from '@mui/icons-material/AttachFileOutlined';
import ClearIcon from '@mui/icons-material/Clear';
import ContentPasteGoIcon from '@mui/icons-material/ContentPasteGo';
import DataArrayIcon from '@mui/icons-material/DataArray';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
import FormatAlignCenterIcon from '@mui/icons-material/FormatAlignCenter';
import KeyboardArrowUpIcon from '@mui/icons-material/KeyboardArrowUp';
import MicIcon from '@mui/icons-material/Mic';
import PanToolIcon from '@mui/icons-material/PanTool';
import PictureAsPdfIcon from '@mui/icons-material/PictureAsPdf';
import PsychologyIcon from '@mui/icons-material/Psychology';
import SendIcon from '@mui/icons-material/Send';
import StopOutlinedIcon from '@mui/icons-material/StopOutlined';
import TelegramIcon from '@mui/icons-material/Telegram';
import UploadFileIcon from '@mui/icons-material/UploadFile';
import { ContentReducer } from '~/modules/aifn/summarize/ContentReducer';
import { LLMOptionsOpenAI } from '~/modules/llms/openai/openai.vendor';
import { useChatLLM } from '~/modules/llms/store-llms';
import { ChatModels, SendModeId, SendModes } from '../../../../data';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { countModelTokens } from '@/common/llm-util/token-counter';
import { htmlTableToMarkdown } from '@/common/util/htmlTableToMarkdown';
import { pdfToText } from '@/common/util/pdfToText';
import { useChatStore } from '@/common/state/store-chats';
import { useComposerStore } from '@/common/state/store-composer';
import { useSettingsStore } from '@/common/state/store-settings';
import { useSpeechRecognition } from '@/common/components/useSpeechRecognition';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
import { countModelTokens } from '~/common/util/token-counter';
import { extractFilePathsWithCommonRadix } from '~/common/util/dropTextUtils';
import { hideOnDesktop, hideOnMobile } from '~/common/theme';
import { htmlTableToMarkdown } from '~/common/util/htmlTableToMarkdown';
import { pdfToText } from '~/common/util/pdfToText';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { CameraCaptureButton } from './CameraCaptureButton';
import { ChatModeId } from '../../AppChat';
import { ChatModeMenu } from './ChatModeMenu';
import { ContentReducerModal } from './ContentReducerModal';
import { TokenBadge } from './TokenBadge';
import { TokenProgressbar } from './TokenProgressbar';
import { useComposerStore } from './store-composer';
import { hideOnDesktop, hideOnMobile } from '@/common/theme';
// import { isValidProdiaApiKey, requireUserKeyProdia } from '@/modules/prodia/prodia.client';
/// Text template helpers
@@ -58,26 +50,26 @@ const expandPromptTemplate = (template: string, dict: object) => (inputValue: st
const attachFileLegend =
<Stack sx={{ p: 1, gap: 1 }}>
<Stack sx={{ p: 1, gap: 1, fontSize: '16px', fontWeight: 400 }}>
<Box sx={{ mb: 1, textAlign: 'center' }}>
<b>Attach a file to the message</b>
Attach a file to the message
</Box>
<table>
<tbody>
<tr>
<td width={32}><PictureAsPdfIcon /></td>
<td width={36}><PictureAsPdfIcon sx={{ width: 24, height: 24 }} /></td>
<td><b>PDF</b></td>
<td width={36} align='center' style={{ opacity: 0.5 }}></td>
<td>📝 Text (summarized)</td>
<td>📝 Text (split manually)</td>
</tr>
<tr>
<td><DataArrayIcon /></td>
<td><DataArrayIcon sx={{ width: 24, height: 24 }} /></td>
<td><b>Code</b></td>
<td align='center' style={{ opacity: 0.5 }}></td>
<td>📚 Markdown</td>
</tr>
<tr>
<td><FormatAlignCenterIcon /></td>
<td><FormatAlignCenterIcon sx={{ width: 24, height: 24 }} /></td>
<td><b>Text</b></td>
<td align='center' style={{ opacity: 0.5 }}></td>
<td>📝 As-is</td>
@@ -90,7 +82,7 @@ const attachFileLegend =
</Stack>;
const pasteClipboardLegend =
<Box sx={{ p: 1 }}>
<Box sx={{ p: 1, fontSize: '14px', fontWeight: 400 }}>
Converts Code and Tables to 📚 Markdown
</Box>;
@@ -103,43 +95,57 @@ const MicButton = (props: { variant: VariantProp, color: ColorPaletteProp, onCli
</Tooltip>;
const SendModeMenu = (props: { anchorEl: HTMLAnchorElement, sendMode: SendModeId, onSetSendMode: (sendMode: SendModeId) => void, onClose: () => void, }) =>
<Menu
variant='plain' color='neutral' size='md' placement='top-end' sx={{ minWidth: 320, overflow: 'auto' }}
open anchorEl={props.anchorEl} onClose={props.onClose}>
<MenuItem color='neutral' selected>Conversation Mode</MenuItem>
<ListDivider />
{Object.entries(SendModes).map(([key, data]) =>
<MenuItem key={'send-mode-' + key} onClick={() => props.onSetSendMode(key as SendModeId)}>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'center', gap: 2 }}>
<Radio checked={key === props.sendMode} />
<Box>
<Typography>{data.label}</Typography>
<Typography level='body2'>{data.description}</Typography>
</Box>
</Box>
</MenuItem>)}
</Menu>;
const SentMessagesMenu = (props: {
anchorEl: HTMLAnchorElement, onClose: () => void,
messages: { date: number; text: string; count: number }[],
onPaste: (text: string) => void,
onClear: () => void,
}) =>
<CloseableMenu
placement='top-end' maxHeightGapPx={56 * 3} noTopPadding sx={{ minWidth: 320, maxWidth: '100dvw' }}
open={!!props.anchorEl} anchorEl={props.anchorEl} onClose={props.onClose}
>
<Menu
variant='plain' color='neutral' size='md' placement='top-end' sx={{ minWidth: 320, overflow: 'auto' }}
open anchorEl={props.anchorEl} onClose={props.onClose}>
<MenuItem variant='solid' selected>
Reuse messages 💬
</MenuItem>
<MenuItem color='neutral' selected>Reuse messages 💬</MenuItem>
<Box sx={{ display: 'flex', flexDirection: 'column', overflowY: 'auto' }}>
{props.messages.map((item, index) =>
<MenuItem
key={'composer-sent-' + index}
onClick={() => {
props.onPaste(item.text);
props.onClose();
}}
sx={{ textOverflow: 'ellipsis', whiteSpace: 'nowrap', display: 'inline-block', overflowX: 'hidden' }}
>
{item.count > 1 && <span style={{ marginRight: 1 }}>({item.count})</span>} {item.text?.length > 70 ? item.text.slice(0, 68) + '...' : item.text}
</MenuItem>)}
</Box>
<ListDivider />
{props.messages.map((item, index) =>
<MenuItem key={'composer-sent-' + index} onClick={() => props.onPaste(item.text)}>
{item.count > 1 && <Typography level='body2' color='neutral' sx={{ mr: 1 }}>({item.count})</Typography>}
{item.text?.length > 60 ? item.text.slice(0, 58) + '...' : item.text}
</MenuItem>)}
<ListDivider />
<MenuItem onClick={props.onClear}>
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
Clear sent messages history
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Clear all
</MenuItem>
</CloseableMenu>;
</Menu>;
/**
@@ -155,55 +161,44 @@ const SentMessagesMenu = (props: {
*/
export function Composer(props: {
conversationId: string | null; messageId: string | null;
chatModeId: ChatModeId, setChatModeId: (chatModeId: ChatModeId) => void;
isDeveloperMode: boolean;
onSendMessage: (conversationId: string, text: string) => void;
sx?: SxProps;
}) {
// state
const [composeText, setComposeText] = React.useState('');
const [speechInterimResult, setSpeechInterimResult] = React.useState<SpeechResult | null>(null);
const [isDragging, setIsDragging] = React.useState(false);
const [reducerText, setReducerText] = React.useState('');
const [reducerTextTokens, setReducerTextTokens] = React.useState(0);
const [chatModeMenuAnchor, setChatModeMenuAnchor] = React.useState<HTMLAnchorElement | null>(null);
const [sendModeMenuAnchor, setSendModeMenuAnchor] = React.useState<HTMLAnchorElement | null>(null);
const [sentMessagesAnchor, setSentMessagesAnchor] = React.useState<HTMLAnchorElement | null>(null);
const [confirmClearSent, setConfirmClearSent] = React.useState(false);
const attachmentFileInputRef = React.useRef<HTMLInputElement>(null);
// external state
const theme = useTheme();
const { enterToSend, experimentalLabs } = useUIPreferencesStore(state => ({
enterToSend: state.enterToSend,
experimentalLabs: state.experimentalLabs,
}), shallow);
const { sentMessages, appendSentMessage, clearSentMessages, startupText, setStartupText } = useComposerStore();
const { assistantTyping, tokenCount: conversationTokenCount, stopTyping } = useChatStore(state => {
const { sendModeId, setSendModeId, sentMessages, appendSentMessage, clearSentMessages } = useComposerStore();
const stopTyping = useChatStore(state => state.stopTyping);
const modelMaxResponseTokens = useSettingsStore(state => state.modelMaxResponseTokens);
const { assistantTyping, chatModelId, tokenCount: conversationTokenCount } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
assistantTyping: conversation ? !!conversation.abortController : false,
chatModelId: conversation ? conversation.chatModelId : null,
tokenCount: conversation ? conversation.tokenCount : 0,
stopTyping: state.stopTyping,
};
}, shallow);
const { chatLLMId, chatLLM } = useChatLLM();
// Effect: load initial text if queued up (e.g. by /share)
React.useEffect(() => {
if (startupText) {
setStartupText(null);
setComposeText(startupText);
}
}, [startupText, setStartupText]);
// derived state
const tokenLimit = chatLLM?.contextTokens || 0;
const tokenLimit = chatModelId ? ChatModels[chatModelId]?.contextWindowSize || 8192 : 0;
const directTokens = React.useMemo(() => {
return (!composeText || !chatLLMId) ? 4 : 4 + countModelTokens(composeText, chatLLMId, 'composer text');
}, [chatLLMId, composeText]);
const historyTokens = conversationTokenCount;
const responseTokens = (chatLLM?.options as LLMOptionsOpenAI /* FIXME: BIG ASSUMPTION */)?.llmResponseTokens || 0;
const remainingTokens = tokenLimit - directTokens - historyTokens - responseTokens;
return (!composeText || !chatModelId) ? 0 : 4 + countModelTokens(composeText, chatModelId, 'composer text');
}, [chatModelId, composeText]);
const indirectTokens = modelMaxResponseTokens + conversationTokenCount;
const remainingTokens = tokenLimit - directTokens - indirectTokens;
const handleSendClicked = () => {
@@ -215,75 +210,63 @@ export function Composer(props: {
}
};
const handleToggleChatMode = (event: React.MouseEvent<HTMLAnchorElement>) =>
setChatModeMenuAnchor(anchor => anchor ? null : event.currentTarget);
const handleShowSendMode = (event: React.MouseEvent<HTMLAnchorElement>) => setSendModeMenuAnchor(event.currentTarget);
const handleHideChatMode = () => setChatModeMenuAnchor(null);
const handleSetChatModeId = (chatModeId: ChatModeId) => {
handleHideChatMode();
props.setChatModeId(chatModeId);
};
const handleHideSendMode = () => setSendModeMenuAnchor(null);
const handleStopClicked = () => props.conversationId && stopTyping(props.conversationId);
const handleTextareaKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter') {
const shiftOrAlt = e.shiftKey || e.altKey;
if (enterToSend ? !shiftOrAlt : shiftOrAlt) {
if (!assistantTyping)
handleSendClicked();
e.preventDefault();
}
const handleKeyPress = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey && !e.altKey) {
if (!assistantTyping)
handleSendClicked();
e.preventDefault();
}
};
const onSpeechResultCallback = React.useCallback((result: SpeechResult) => {
setSpeechInterimResult(result.done ? null : { ...result });
if (result.done) {
setComposeText(prevText => {
prevText = prevText.trim();
const transcript = result.transcript.trim();
return prevText ? prevText + ' ' + transcript : transcript;
});
}
const onSpeechResultCallback = React.useCallback((transcript: string) => {
setComposeText(current => {
current = current.trim();
transcript = transcript.trim();
if ((!current || current.endsWith('.') || current.endsWith('!') || current.endsWith('?')) && transcript.length)
transcript = transcript[0].toUpperCase() + transcript.slice(1);
return current ? current + ' ' + transcript : transcript;
});
}, []);
const { isSpeechEnabled, isSpeechError, isRecordingAudio, isRecordingSpeech, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 2000, 'm');
const { isSpeechEnabled, isSpeechError, isRecordingAudio, isRecordingSpeech, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 'm');
const handleMicClicked = () => toggleRecording();
const micColor: ColorPaletteProp = isSpeechError ? 'danger' : isRecordingSpeech ? 'primary' : isRecordingAudio ? 'neutral' : 'neutral';
const micVariant: VariantProp = isRecordingSpeech ? 'solid' : isRecordingAudio ? 'outlined' : 'plain';
const micColor = isSpeechError ? 'danger' : isRecordingSpeech ? 'warning' : isRecordingAudio ? 'warning' : 'neutral';
const micVariant = isRecordingSpeech ? 'solid' : isRecordingAudio ? 'solid' : 'plain';
async function loadAndAttachFiles(files: FileList, overrideFileNames: string[]) {
async function loadAndAttachFiles(files: FileList) {
// NOTE: we tried to get the common 'root prefix' of the files here, so that we could attach files with a name that's relative
// to the common root, but the files[].webkitRelativePath property is not providing that information
// perform loading and expansion
let newText = '';
for (let i = 0; i < files.length; i++) {
const file = files[i];
const fileName = overrideFileNames.length === files.length ? overrideFileNames[i] : file.name;
for (let file of files) {
let fileText = '';
try {
if (file.type === 'application/pdf')
fileText = await pdfToText(file);
else
fileText = await file.text();
newText = expandPromptTemplate(PromptTemplates.PasteFile, { fileName: fileName, fileText })(newText);
newText = expandPromptTemplate(PromptTemplates.PasteFile, { fileName: file.name, fileText })(newText);
} catch (error) {
// show errors in the prompt box itself - FUTURE: show in a toast
console.error(error);
newText = `${newText}\n\nError loading file ${fileName}: ${error}\n`;
newText = `${newText}\n\nError loading file ${file.name}: ${error}\n`;
}
}
// see how we fare on budget
if (chatLLMId) {
const newTextTokens = countModelTokens(newText, chatLLMId, 'reducer trigger');
if (chatModelId) {
const newTextTokens = countModelTokens(newText, chatModelId, 'reducer trigger');
// simple trigger for the reduction dialog
if (newTextTokens > remainingTokens) {
@@ -311,18 +294,17 @@ export function Composer(props: {
const handleLoadAttachment = async (e: React.ChangeEvent<HTMLInputElement>) => {
const files = e.target?.files;
if (files && files.length >= 1)
await loadAndAttachFiles(files, []);
await loadAndAttachFiles(files);
// this is needed to allow the same file to be selected again
e.target.value = '';
};
const handleCameraOCR = (text: string) => text && setComposeText(expandPromptTemplate(PromptTemplates.PasteMarkdown, { clipboard: text }));
const handlePasteButtonClicked = async () => {
for (const clipboardItem of await navigator.clipboard.read()) {
const handlePasteFromClipboard = async () => {
for (let clipboardItem of await navigator.clipboard.read()) {
// when pasting html, only process tables as markdown (e.g. from Excel), or fallback to text
// find the text/html item if any
try {
const htmlItem = await clipboardItem.getType('text/html');
const htmlString = await htmlItem.text();
@@ -334,7 +316,7 @@ export function Composer(props: {
}
// TODO: paste html to markdown (tried Turndown, but the gfm plugin is not good - need to find another lib with minimal footprint)
} catch (error) {
// ignore missing html: fallback to text/plain
// ignore missing html
}
// find the text/plain item if any
@@ -352,18 +334,6 @@ export function Composer(props: {
}
};
const handleTextareaCtrlV = async (e: React.ClipboardEvent) => {
// paste local files
if (e.clipboardData.files.length > 0) {
e.preventDefault();
await loadAndAttachFiles(e.clipboardData.files, []);
return;
}
// paste not intercepted, continue with default behavior
};
const showSentMessages = (event: React.MouseEvent<HTMLAnchorElement>) => setSentMessagesAnchor(event.currentTarget);
@@ -386,7 +356,7 @@ export function Composer(props: {
e.stopPropagation();
};
const handleTextareaDragEnter = (e: React.DragEvent) => {
const handleMessageDragEnter = (e: React.DragEvent) => {
eatDragEvent(e);
setIsDragging(true);
};
@@ -406,19 +376,12 @@ export function Composer(props: {
setIsDragging(false);
// dropped files
if (e.dataTransfer.files?.length >= 1) {
// Workaround: as we don't have the full path in the File object, we need to get it from the text/plain data
let overrideFileNames: string[] = [];
if (e.dataTransfer.types?.includes('text/plain')) {
const plainText = e.dataTransfer.getData('text/plain');
overrideFileNames = extractFilePathsWithCommonRadix(plainText);
}
return loadAndAttachFiles(e.dataTransfer.files, overrideFileNames);
}
if (e.dataTransfer.files?.length >= 1)
return loadAndAttachFiles(e.dataTransfer.files);
// special case: detect failure of dropping from VSCode
// VSCode: Drag & Drop does not transfer the File object: https://github.com/microsoft/vscode/issues/98629#issuecomment-634475572
if (e.dataTransfer.types?.includes('codeeditors'))
if ('codeeditors' in e.dataTransfer.types)
return setComposeText(test => test + 'Pasting from VSCode is not supported! Fixme. Anyone?');
// dropped text
@@ -433,23 +396,10 @@ export function Composer(props: {
// const prodiaApiKey = isValidProdiaApiKey(useSettingsStore(state => state.prodiaApiKey));
// const isProdiaConfigured = !requireUserKeyProdia || prodiaApiKey;
const textPlaceholder: string = props.isDeveloperMode
? 'Chat with me · drop source files · attach code...'
? 'Tell me what you need, and drop source files...'
: /*isProdiaConfigured ?*/ 'Chat · /react · /imagine · drop text files...' /*: 'Chat · /react · drop text files...'*/;
// const isImmediate = props.chatModeId === 'immediate';
const isFollowUp = props.chatModeId === 'immediate-follow-up';
const isReAct = props.chatModeId === 'react';
const isWriteUser = props.chatModeId === 'write-user';
const chatButton = (
<Button
fullWidth variant={isWriteUser ? 'soft' : 'solid'} color={isReAct ? 'success' : isFollowUp ? 'warning' : 'primary'} disabled={!props.conversationId || !chatLLM}
onClick={handleSendClicked} onDoubleClick={handleToggleChatMode}
endDecorator={isWriteUser ? <SendIcon sx={{ fontSize: 18 }} /> : isReAct ? <PsychologyIcon /> : <TelegramIcon />}
>
{isWriteUser ? 'Write' : isReAct ? 'ReAct' : isFollowUp ? 'Chat+' : 'Chat'}
</Button>
);
const isReAct = sendModeId === 'react';
return (
<Box sx={props.sx}>
@@ -459,35 +409,35 @@ export function Composer(props: {
<Grid xs={12} md={9}><Stack direction='row' spacing={{ xs: 1, md: 2 }}>
{/* Vertical Buttons Bar */}
<Box sx={{ display: 'flex', flexDirection: 'column', gap: { xs: 0, md: 2 } }}>
<Stack>
{/*<Typography level='body-xs' sx={{mb: 2}}>Context</Typography>*/}
{/*<Typography level='body3' sx={{mb: 2}}>Context</Typography>*/}
{isSpeechEnabled && <Box sx={hideOnDesktop}>
{isSpeechEnabled && <Box sx={{ mb: { xs: 1, md: 2 }, ...hideOnDesktop }}>
<MicButton variant={micVariant} color={micColor} onClick={handleMicClicked} />
</Box>}
<CameraCaptureButton onOCR={handleCameraOCR} />
<IconButton onClick={handleShowFilePicker} sx={{ ...hideOnDesktop }}>
<AttachFileOutlinedIcon />
<IconButton variant='plain' color='neutral' onClick={handleShowFilePicker} sx={{ ...hideOnDesktop }}>
<UploadFileIcon />
</IconButton>
<Tooltip
variant='solid' placement='top-start'
title={attachFileLegend}>
<Button fullWidth variant='plain' color='neutral' onClick={handleShowFilePicker} startDecorator={<AttachFileOutlinedIcon />}
<Button fullWidth variant='plain' color='neutral' onClick={handleShowFilePicker} startDecorator={<UploadFileIcon />}
sx={{ ...hideOnMobile, justifyContent: 'flex-start' }}>
Attach
</Button>
</Tooltip>
<IconButton onClick={handlePasteButtonClicked} sx={{ ...hideOnDesktop }}>
<Box sx={{ mt: { xs: 1, md: 2 } }} />
<IconButton variant='plain' color='neutral' onClick={handlePasteFromClipboard} sx={{ ...hideOnDesktop }}>
<ContentPasteGoIcon />
</IconButton>
<Tooltip
variant='solid' placement='top-start'
title={pasteClipboardLegend}>
<Button fullWidth variant='plain' color='neutral' startDecorator={<ContentPasteGoIcon />} onClick={handlePasteButtonClicked}
<Button fullWidth variant='plain' color='neutral' startDecorator={<ContentPasteGoIcon />} onClick={handlePasteFromClipboard}
sx={{ ...hideOnMobile, justifyContent: 'flex-start' }}>
{props.isDeveloperMode ? 'Paste code' : 'Paste'}
</Button>
@@ -495,7 +445,7 @@ export function Composer(props: {
<input type='file' multiple hidden ref={attachmentFileInputRef} onChange={handleLoadAttachment} />
</Box>
</Stack>
{/* Edit box, with Drop overlay */}
<Box sx={{ flexGrow: 1, position: 'relative' }}>
@@ -503,18 +453,15 @@ export function Composer(props: {
<Box sx={{ position: 'relative' }}>
<Textarea
variant='outlined' color={isReAct ? 'success' : 'neutral'}
variant='outlined' color={isReAct ? 'info' : 'neutral'}
autoFocus
minRows={5} maxRows={10}
minRows={4} maxRows={12}
onKeyDown={handleKeyPress}
onDragEnter={handleMessageDragEnter}
placeholder={textPlaceholder}
value={composeText}
onChange={(e) => setComposeText(e.target.value)}
onDragEnter={handleTextareaDragEnter}
onKeyDown={handleTextareaKeyDown}
onPasteCapture={handleTextareaCtrlV}
value={composeText} onChange={(e) => setComposeText(e.target.value)}
slotProps={{
textarea: {
enterKeyHint: enterToSend ? 'send' : 'enter',
sx: {
...(isSpeechEnabled ? { pr: { md: 5 } } : {}),
mb: 0.5,
@@ -522,47 +469,18 @@ export function Composer(props: {
},
}}
sx={{
backgroundColor: 'background.level1',
'&:focus-within': {
backgroundColor: 'background.popup',
},
// fontSize: '16px',
background: theme.vars.palette.background.level1,
fontSize: '16px',
lineHeight: 1.75,
}} />
{tokenLimit > 0 && (directTokens > 0 || (historyTokens + responseTokens) > 0) && <TokenProgressbar history={historyTokens} response={responseTokens} direct={directTokens} limit={tokenLimit} />}
{tokenLimit > 0 && (directTokens > 0 || indirectTokens > 0) && <TokenProgressbar direct={directTokens} indirect={indirectTokens} limit={tokenLimit} />}
</Box>
{isSpeechEnabled && (
<MicButton variant={micVariant} color={micColor} onClick={handleMicClicked} sx={{
...hideOnMobile,
position: 'absolute', top: 0, right: 0,
zIndex: 21,
m: 1,
}} />
)}
{isSpeechEnabled && <MicButton variant={micVariant} color={micColor} onClick={handleMicClicked} sx={{ ...hideOnMobile, position: 'absolute', top: 0, right: 0, margin: 1 }} />}
{!!tokenLimit && <TokenBadge directTokens={directTokens} indirectTokens={historyTokens + responseTokens} tokenLimit={tokenLimit} absoluteBottomRight />}
{!!speechInterimResult && (
<Card
color='primary' invertedColors variant='soft'
sx={{
display: 'flex',
position: 'absolute', bottom: 0, left: 0, right: 0, top: 0,
// alignItems: 'center', justifyContent: 'center',
border: `1px solid ${theme.palette.primary.solidBg}`,
borderRadius: theme.radius.xs,
zIndex: 20,
px: 1.5, py: 1,
}}>
<Typography>
{speechInterimResult.transcript}{' '}
<span style={{ opacity: 0.5 }}>{speechInterimResult.interimTranscript}</span>
</Typography>
</Card>
)}
{!!tokenLimit && <TokenBadge directTokens={directTokens} indirectTokens={indirectTokens} tokenLimit={tokenLimit} absoluteBottomRight />}
<Card
color='primary' invertedColors variant='soft'
@@ -571,14 +489,13 @@ export function Composer(props: {
position: 'absolute', bottom: 0, left: 0, right: 0, top: 0,
alignItems: 'center', justifyContent: 'space-evenly',
border: '2px dashed',
borderRadius: theme.radius.xs,
zIndex: 10,
}}
onDragLeave={handleOverlayDragLeave}
onDragOver={handleOverlayDragOver}
onDrop={handleOverlayDrop}>
<PanToolIcon sx={{ width: 40, height: 40, pointerEvents: 'none' }} />
<Typography level='body-sm' sx={{ pointerEvents: 'none' }}>
<Typography level='body2' sx={{ pointerEvents: 'none' }}>
I will hold on to this for you
</Typography>
</Card>
@@ -595,35 +512,25 @@ export function Composer(props: {
{/* [mobile-only] Sent messages arrow */}
{sentMessages.length > 0 && (
<IconButton disabled={!!sentMessagesAnchor} onClick={showSentMessages} sx={{ ...hideOnDesktop, mr: { xs: 1, md: 2 } }}>
<IconButton variant='plain' color='neutral' onClick={showSentMessages} sx={{ ...hideOnDesktop, mr: { xs: 1, md: 2 } }}>
<KeyboardArrowUpIcon />
</IconButton>
)}
{/* Send / Stop */}
{assistantTyping
? (
<Button
fullWidth variant='soft' color={isReAct ? 'success' : 'primary'} disabled={!props.conversationId}
onClick={handleStopClicked}
endDecorator={<StopOutlinedIcon />}
>
Stop
</Button>
) : (
<ButtonGroup variant={isWriteUser ? 'solid' : 'solid'} color={isReAct ? 'success' : isFollowUp ? 'warning' : 'primary'} sx={{ flexGrow: 1 }}>
{chatButton}
<IconButton disabled={!props.conversationId || !chatLLM || !!chatModeMenuAnchor} onClick={handleToggleChatMode}>
<ExpandLessIcon />
</IconButton>
</ButtonGroup>
)}
? <Button fullWidth variant='soft' color={isReAct ? 'info' : 'primary'} disabled={!props.conversationId} onClick={handleStopClicked} endDecorator={<StopOutlinedIcon />}>
Stop
</Button>
: <Button fullWidth variant='solid' color={isReAct ? 'info' : 'primary'} disabled={!props.conversationId} onClick={handleSendClicked} onDoubleClick={handleShowSendMode} endDecorator={isReAct ? <PsychologyIcon /> : <TelegramIcon />}>
{isReAct ? 'ReAct' : 'Chat'}
</Button>}
</Box>
{/* [desktop-only] row with Sent Messages button */}
<Stack direction='row' spacing={1} sx={{ ...hideOnMobile, flexDirection: { xs: 'column', md: 'row' }, justifyContent: 'flex-end' }}>
{sentMessages.length > 0 && (
<Button disabled={!!sentMessagesAnchor} fullWidth variant='plain' color='neutral' startDecorator={<KeyboardArrowUpIcon />} onClick={showSentMessages}>
<Button fullWidth variant='plain' color='neutral' startDecorator={<KeyboardArrowUpIcon />} onClick={showSentMessages}>
History
</Button>
)}
@@ -634,12 +541,8 @@ export function Composer(props: {
{/* Mode selector */}
{!!chatModeMenuAnchor && (
<ChatModeMenu
anchorEl={chatModeMenuAnchor} onClose={handleHideChatMode}
experimental={experimentalLabs}
chatModeId={props.chatModeId} onSetChatModeId={handleSetChatModeId}
/>
{!!sendModeMenuAnchor && (
<SendModeMenu anchorEl={sendModeMenuAnchor} sendMode={sendModeId} onSetSendMode={setSendModeId} onClose={handleHideSendMode} />
)}
{/* Sent messages menu */}
@@ -651,8 +554,8 @@ export function Composer(props: {
)}
{/* Content reducer modal */}
{reducerText?.length >= 1 &&
<ContentReducer
{reducerText?.length >= 1 && chatModelId &&
<ContentReducerModal
initialText={reducerText} initialTokens={reducerTextTokens} tokenLimit={remainingTokens}
onReducedText={handleContentReducerText} onClose={handleContentReducerClose}
/>
@@ -1,16 +1,13 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Alert, Box, Button, CircularProgress, Divider, FormControl, FormHelperText, FormLabel, Modal, ModalClose, ModalDialog, Option, Select, Slider, Stack, Textarea, Typography } from '@mui/joy';
import { DLLM, DLLMId } from '~/modules/llms/llm.types';
import { summerizeToFitContextBudget } from '~/modules/aifn/summarize/summerize';
import { useModelsStore } from '~/modules/llms/store-llms';
import { ChatModelId, ChatModels, fastChatModelId } from '../../../../data';
import { Section } from '@/common/components/Section';
import { countModelTokens } from '@/common/llm-util/token-counter';
import { summerizeToFitContextBudget } from '@/common/llm-util/summerize';
import { Section } from '~/common/components/Section';
import { countModelTokens } from '~/common/util/token-counter';
import { TokenBadge } from '../../../apps/chat/components/composer/TokenBadge';
import { TokenBadge } from './TokenBadge';
function TokenUsageAlert({ usedTokens, tokenLimit }: { usedTokens: number, tokenLimit: number }) {
@@ -27,7 +24,7 @@ function TokenUsageAlert({ usedTokens, tokenLimit }: { usedTokens: number, token
/**
* Dialog to compress a PDF
*/
export function ContentReducer(props: {
export function ContentReducerModal(props: {
initialText: string,
initialTokens: number,
tokenLimit: number,
@@ -35,30 +32,26 @@ export function ContentReducer(props: {
onReducedText: (text: string) => void,
}) {
// external state
const { llms, fastLLMId } = useModelsStore(state => ({ llms: state.llms, fastLLMId: state.fastLLMId }), shallow);
// state
const [reducerModelId, setReducerModelId] = React.useState<DLLMId | null>(fastLLMId);
const [reducerModelId, setReducerModelId] = React.useState<ChatModelId>(fastChatModelId);
const [compressionLevel, setCompressionLevel] = React.useState(3);
const [reducedText, setReducedText] = React.useState('');
const [processing, setProcessing] = React.useState(false);
// derived state
const reducedTokens = reducerModelId ? countModelTokens(reducedText, reducerModelId, 'content reducer reduce') : 0;
const reducedTokens = countModelTokens(reducedText, reducerModelId, 'content reducer reduce');
const remainingTokens = props.tokenLimit - reducedTokens;
const handleReducerModelChange = (event: any, value: DLLMId | null) => value && setReducerModelId(value);
const handleChatModelChange = (event: any, value: ChatModelId | null) => value && setReducerModelId(value);
const handleCompressionLevelChange = (event: Event, newValue: number | number[]) => setCompressionLevel(newValue as number);
const handlePreviewClicked = async () => {
setProcessing(true);
if (reducerModelId) {
const reducedText = await summerizeToFitContextBudget(props.initialText, props.tokenLimit, reducerModelId);
setReducedText(reducedText);
}
const reducedText = await summerizeToFitContextBudget(props.initialText, props.tokenLimit, reducerModelId);
setReducedText(reducedText);
setProcessing(false);
};
@@ -82,7 +75,7 @@ export function ContentReducer(props: {
<ModalClose />
<Typography level='title-lg'>Content Reducer (preview)</Typography>
<Typography level='h5'>Content Reducer (preview)</Typography>
<Divider sx={{ my: 2 }} />
@@ -91,7 +84,7 @@ export function ContentReducer(props: {
<Section>
<Stack direction='column' sx={{ gap: 2 }}>
<Typography level='body-sm'>
<Typography level='body2'>
Input: <b>{props.initialTokens.toLocaleString()}</b> tokens · Limit: <b>{props.tokenLimit.toLocaleString()}</b> tokens
<br />
compression needed <b>{props.tokenLimit ? Math.round(100 * props.initialTokens / props.tokenLimit) : 0}</b> %
@@ -100,12 +93,12 @@ export function ContentReducer(props: {
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box sx={{ minWidth: 120 }}>
<FormLabel>Reducer model</FormLabel>
<FormHelperText>{llms.find(llm => llm.id === reducerModelId)?.description?.slice(0, 10) ?? null}</FormHelperText>
<FormHelperText>{ChatModels[reducerModelId]?.tradeoff}</FormHelperText>
</Box>
{reducerModelId && <Select value={reducerModelId} onChange={handleReducerModelChange} sx={{ minWidth: 140 }}>
{llms.map((llm: DLLM) => (
<Option key={llm.id} value={llm.id}>
{llm.label} {llm.id === fastLLMId && '*'}
{reducerModelId && <Select value={reducerModelId} onChange={handleChatModelChange} sx={{ minWidth: 140 }}>
{Object.keys(ChatModels).map((key: string) => (
<Option key={key} value={key}>
{ChatModels[key as ChatModelId].title}
</Option>
))}
</Select>}
@@ -157,8 +150,8 @@ export function ContentReducer(props: {
{processing && (
<Box sx={{ position: 'absolute', top: 0, left: 0, right: 0, bottom: 0, display: 'flex', alignItems: 'center', justifyContent: 'center', flexDirection: 'column' }}>
<CircularProgress />
<Typography level='body-sm' sx={{ mt: 1 }}>Reduction in progress.</Typography>
<Typography level='body-xs'>This can take a few minutes</Typography>
<Typography level='body2' sx={{ mt: 1 }}>Reduction in progress.</Typography>
<Typography level='body3'>This can take a few minutes</Typography>
</Box>
)}
@@ -1,25 +1,9 @@
import * as React from 'react';
import { Badge, ColorPaletteProp, Tooltip, useTheme } from '@mui/joy';
import { Badge, Tooltip, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, indirectTokens?: number): { message: string, color: ColorPaletteProp } {
const usedTokens = directTokens + (indirectTokens || 0);
const remainingTokens = tokenLimit - usedTokens;
let message: string = (tokenLimit && remainingTokens < 0) ? '⚠️ ' : '';
if (!tokenLimit) {
message += `Requested: ${usedTokens.toLocaleString()} tokens`;
} else if (indirectTokens) {
message += `${remainingTokens.toLocaleString()} available tokens\n\n= Model capacity: ${tokenLimit.toLocaleString()}\n- Request: ${usedTokens.toLocaleString()} tokens`;
message += ` (Chat: ${directTokens.toLocaleString()}${indirectTokens ? ', History + Response: ' + indirectTokens?.toLocaleString() : ''})`;
} else
message += `${(tokenLimit + usedTokens).toLocaleString()} available tokens = Currently free: ${tokenLimit.toLocaleString()} + This message: ${usedTokens.toLocaleString()} tokens`;
const color: ColorPaletteProp = (tokenLimit && remainingTokens < 1) ? 'danger' : remainingTokens < tokenLimit / 4 ? 'warning' : 'primary';
return { message, color };
}
/**
* Simple little component to show the token count (and a tooltip on hover)
*/
@@ -28,14 +12,24 @@ export function TokenBadge({ directTokens, indirectTokens, tokenLimit, absoluteB
// external state
const theme = useTheme();
// derived state
const usedTokens = directTokens + (indirectTokens || 0);
const remainingTokens = tokenLimit - usedTokens;
let message: string = remainingTokens < 0 ? '⚠️ ' : '';
if (indirectTokens) {
message += `${remainingTokens.toLocaleString()} remaining tokens · Model capacity: ${tokenLimit.toLocaleString()} - Request: ${usedTokens.toLocaleString()} tokens`;
message += ` (Chat: ${directTokens.toLocaleString()}${indirectTokens ? ', History & Response: ' + indirectTokens?.toLocaleString() + ')' : ''})`;
} else
message += `${remainingTokens.toLocaleString()} remaining tokens · Allowed: ${tokenLimit.toLocaleString()} - Requested: ${usedTokens.toLocaleString()} tokens`;
const color = remainingTokens < 1 ? 'danger' : remainingTokens < tokenLimit / 4 ? 'warning' : 'primary';
const fontSx: SxProps = { fontFamily: theme.fontFamily.code, ...(sx || {}) };
const outerSx: SxProps = absoluteBottomRight ? { position: 'absolute', bottom: 8, right: 8 } : {};
const innerSx: SxProps = (absoluteBottomRight || inline) ? { position: 'static', transform: 'none', ...fontSx } : fontSx;
const { message, color } = tokensPrettyMath(tokenLimit, directTokens, indirectTokens);
const badgeContent = directTokens > 0
? <Tooltip title={<span style={{ whiteSpace: 'pre' }}>{message}</span>} color={color} sx={fontSx}><span>{directTokens.toLocaleString()}</span></Tooltip>
? <Tooltip title={message} color={color} sx={fontSx}><span>{directTokens.toLocaleString()}</span></Tooltip>
: null;
return (
@@ -1,8 +1,6 @@
import * as React from 'react';
import { Box, Tooltip, useTheme } from '@mui/joy';
import { tokensPrettyMath } from './TokenBadge';
import { Box, useTheme } from '@mui/joy';
/**
@@ -10,75 +8,53 @@ import { tokensPrettyMath } from './TokenBadge';
*
* The Textarea contains it within the Composer (at least).
*/
export function TokenProgressbar(props: { history: number, response: number, direct: number, limit: number }) {
export function TokenProgressbar(props: { indirect: number, direct: number, limit: number }) {
// external state
const theme = useTheme();
if (!(props.limit > 0) || (!props.direct && !props.history && !props.response)) return null;
if (!(props.limit > 0) || (!props.direct && !props.indirect)) return null;
// compute percentages
let historyPct = 100 * props.history / props.limit;
let responsePct = 100 * props.response / props.limit;
let directPct = 100 * props.direct / props.limit;
const totalPct = historyPct + responsePct + directPct;
let indirectPct = 100 * props.indirect / props.limit;
let totalPct = 100 * (props.indirect + props.direct) / props.limit;
const isOverflow = totalPct >= 100;
if (isOverflow) {
let scale = 100 / totalPct;
scale *= scale; // make proportional space for the 'danger' (overflow) representation
historyPct *= scale;
responsePct *= scale;
directPct *= scale;
indirectPct *= 100 / totalPct;
totalPct = 100 * 100 / totalPct;
}
// bar colors
const historyColor = theme.palette.neutral.softHoverBg;
const directColor = theme.palette.primary.solidBg;
const responseColor = theme.palette.neutral.softHoverBg;
const overflowColor = theme.palette.danger.solidBg;
const directColor = theme.vars.palette.primary.softHoverBg;
const indirectColor = theme.vars.palette.neutral.softHoverBg;
const overflowColor = theme.vars.palette.danger.solidBg;
// tooltip message/color
const { message, color } = tokensPrettyMath(props.limit, props.direct, props.history + props.response);
// sizes
const containerHeight = 8;
const height = isOverflow ? 8 : 4;
return (
<Tooltip title={<span style={{ whiteSpace: 'pre' }}>{message}</span>} color={color} sx={{ fontFamily: theme.fontFamily.code }}>
<Box sx={{
position: 'absolute', left: 1, right: 1, bottom: 1, height: containerHeight,
overflow: 'hidden', borderBottomLeftRadius: 7, borderBottomRightRadius: 7,
}}>
<Box sx={{
position: 'absolute', left: 1, right: 1, bottom: 1, height: containerHeight,
overflow: 'hidden', borderBottomLeftRadius: 7, borderBottomRightRadius: 7,
}}>
{/* Indirect */}
{indirectPct > 0 && <Box sx={{
background: indirectColor,
position: 'absolute', left: 0, bottom: 0, width: indirectPct + '%', height,
}} />}
{/* History */}
{historyPct > 0 && <Box sx={{
background: historyColor,
position: 'absolute', left: 0, bottom: 0, width: historyPct + '%', height,
}} />}
{/* Direct */}
{totalPct > indirectPct && <Box sx={{
background: directColor,
position: 'absolute', left: indirectPct + '%', bottom: 0, width: (totalPct - indirectPct) + '%', height,
}} />}
{/* Direct */}
{directPct > 0 && <Box sx={{
background: directColor,
position: 'absolute', left: historyPct + '%', bottom: 0, width: directPct + '%', height,
}} />}
{/* Overflow */}
{isOverflow && <Box sx={{
background: overflowColor,
position: 'absolute', left: totalPct + '%', right: 0, bottom: 0, height,
}} />}
{/* Response */}
{responsePct > 0 && <Box sx={{
background: responseColor,
position: 'absolute', left: (totalPct > 100 ? (historyPct + directPct) : (100 - responsePct)) + '%', bottom: 0, width: responsePct + '%', height,
}} />}
{/* Overflow */}
{isOverflow && <Box sx={{
background: overflowColor,
position: 'absolute', left: (historyPct + directPct + responsePct) + '%', right: 0, bottom: 0, height,
}} />}
</Box>
</Tooltip>
</Box>
);
}
@@ -5,7 +5,7 @@ import { Box, Grid, IconButton, Sheet, Stack, styled, Typography, useTheme } fro
import { SxProps } from '@mui/joy/styles/types';
import CloseIcon from '@mui/icons-material/Close';
import { DEphemeral, useChatStore } from '~/common/state/store-chats';
import { DEphemeral, useChatStore } from '@/common/state/store-chats';
const StateLine = styled(Typography)(({ theme }) => ({
@@ -42,7 +42,7 @@ function ListRenderer({ name, list }: { name: string, list: any[] }) {
return <StateLine><b>{name}</b>[{list.length ? list.length : ''}]: {list.length ? '(not displayed)' : 'empty'}</StateLine>;
}
function ObjectRenderer({ name }: { name: string }) {
function ObjectRenderer({ name, value }: { name: string, value: object }) {
return <StateLine><b>{name}</b>: <i>object not displayed</i></StateLine>;
}
@@ -55,19 +55,19 @@ function StateRenderer(props: { state: object }) {
return (
<Stack>
<Typography level='body-sm' sx={{ mb: 1 }}>
<Typography level='body2' sx={{ mb: 1 }}>
Internal State
</Typography>
<Sheet>
{!entries && <Typography level='body-sm'>No state variables</Typography>}
{!entries && <Typography level='body2'>No state variables</Typography>}
{entries.map(([key, value]) =>
isPrimitive(value)
? <PrimitiveRender key={'state-' + key} name={key} value={value} />
: Array.isArray(value)
? <ListRenderer key={'state-' + key} name={key} list={value} />
: typeof value === 'object'
? <ObjectRenderer key={'state-' + key} name={key} />
: <Typography key={'state-' + key} level='body-sm'>{key}: {value}</Typography>,
? <ObjectRenderer key={'state-' + key} name={key} value={value} />
: <Typography key={'state-' + key} level='body2'>{key}: {value}</Typography>,
)}
</Sheet>
</Stack>
@@ -81,7 +81,7 @@ function EphemeralItem({ conversationId, ephemeral }: { conversationId: string,
sx={{
p: { xs: 1, md: 2 },
position: 'relative',
// border: (i < ephemerals.length - 1) ? `2px solid ${theme.palette.divider}` : undefined,
// border: (i < ephemerals.length - 1) ? `2px solid ${theme.vars.palette.divider}` : undefined,
'&:hover > button': { opacity: 1 },
}}>
@@ -138,9 +138,10 @@ export function Ephemerals(props: { conversationId: string | null, sx?: SxProps
return (
<Sheet
variant='soft' color='success' invertedColors
variant='soft' color='info' invertedColors
sx={{
border: `4px dashed ${theme.palette.divider}`,
border: `4px dashed ${theme.vars.palette.divider}`,
boxShadow: `inset 0 0 12px ${theme.vars.palette.background.popup}`,
...(props.sx || {}),
}}>
+397 -149
View File
@@ -1,7 +1,20 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Avatar, Box, Button, CircularProgress, IconButton, ListDivider, ListItem, ListItemDecorator, MenuItem, Stack, Theme, Tooltip, Typography, useTheme } from '@mui/joy';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import Prism from 'prismjs';
import 'prismjs/themes/prism.css';
import 'prismjs/components/prism-bash';
import 'prismjs/components/prism-css';
import 'prismjs/components/prism-java';
import 'prismjs/components/prism-javascript';
import 'prismjs/components/prism-json';
import 'prismjs/components/prism-markdown';
import 'prismjs/components/prism-python';
import 'prismjs/components/prism-typescript';
import { Alert, Avatar, Box, Button, Chip, CircularProgress, IconButton, ListDivider, ListItem, ListItemDecorator, Menu, MenuItem, Stack, Theme, Tooltip, Typography, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import ClearIcon from '@mui/icons-material/Clear';
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
@@ -14,44 +27,346 @@ import PaletteOutlinedIcon from '@mui/icons-material/PaletteOutlined';
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
import ReplayIcon from '@mui/icons-material/Replay';
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
import ShapeLineOutlinedIcon from '@mui/icons-material/ShapeLineOutlined';
import SmartToyOutlinedIcon from '@mui/icons-material/SmartToyOutlined';
import ZoomOutMapIcon from '@mui/icons-material/ZoomOutMap';
import { canUseElevenLabs, speakText } from '~/modules/elevenlabs/elevenlabs.client';
import { canUseProdia } from '~/modules/prodia/prodia.client';
import { requireUserKeyElevenLabs, speakText } from '@/modules/elevenlabs/elevenlabs.client';
import { requireUserKeyProdia } from '@/modules/prodia/prodia.client';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import { DMessage } from '~/common/state/store-chats';
import { InlineError } from '~/common/components/InlineError';
import { InlineTextarea } from '~/common/components/InlineTextarea';
import { Link } from '~/common/components/Link';
import { DMessage } from '@/common/state/store-chats';
import { InlineTextarea } from '@/common/components/InlineTextarea';
import { Link } from '@/common/components/Link';
import { SystemPurposeId, SystemPurposes } from '../../../../data';
import { copyToClipboard } from '~/common/util/copyToClipboard';
import { cssRainbowColorKeyframes } from '~/common/theme';
import { prettyBaseModel } from '../../trade/trade.markdown';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { cssRainbowColorKeyframes } from '@/common/theme';
import { extractCommands } from '@/common/util/extractCommands';
import { prettyBaseModel } from '@/common/util/conversationToMarkdown';
import { useSettingsStore } from '@/common/state/store-settings';
import { RenderCode } from './RenderCode';
import { RenderHtml } from './RenderHtml';
import { RenderImage } from './RenderImage';
import { RenderMarkdown } from './RenderMarkdown';
import { RenderText } from './RenderText';
import { parseBlocks } from './blocks';
import { OpenInCodepen } from './OpenInCodepen';
import { OpenInReplit } from './OpenInReplit';
/// Utilities to parse messages into blocks of text and code
type Block = TextBlock | CodeBlock | ImageBlock;
type TextBlock = { type: 'text'; content: string; };
type CodeBlock = { type: 'code'; content: string; language: string | null; complete: boolean; code: string; };
type ImageBlock = { type: 'image'; url: string; };
const inferCodeLanguage = (markdownLanguage: string, code: string): string | null => {
let detectedLanguage;
// we have an hint
if (markdownLanguage) {
// no dot: assume is the syntax-highlight name
if (!markdownLanguage.includes('.'))
return markdownLanguage;
// dot: there's probably a file extension
const extension = markdownLanguage.split('.').pop();
if (extension) {
const languageMap: { [key: string]: string } = {
cs: 'csharp', html: 'html', java: 'java', js: 'javascript', json: 'json', jsx: 'javascript',
md: 'markdown', py: 'python', sh: 'bash', ts: 'typescript', tsx: 'typescript', xml: 'xml',
};
detectedLanguage = languageMap[extension];
if (detectedLanguage)
return detectedLanguage;
}
}
// based on how the code starts, return the language
const codeStarts = [
{ starts: ['<!DOCTYPE html', '<html'], language: 'html' },
{ starts: ['<'], language: 'xml' },
{ starts: ['from '], language: 'python' },
{ starts: ['import ', 'export '], language: 'typescript' }, // or python
{ starts: ['interface ', 'function '], language: 'typescript' }, // ambiguous
{ starts: ['package '], language: 'java' },
{ starts: ['using '], language: 'csharp' },
];
for (const codeStart of codeStarts) {
if (codeStart.starts.some((start) => code.startsWith(start))) {
return codeStart.language;
}
}
// If no language detected based on code start, use Prism to tokenize and detect language
const languages = ['bash', 'css', 'java', 'javascript', 'json', 'markdown', 'python', 'typescript']; // matches Prism component imports
let maxTokens = 0;
languages.forEach((language) => {
const grammar = Prism.languages[language];
const tokens = Prism.tokenize(code, grammar);
const tokenCount = tokens.filter((token) => typeof token !== 'string').length;
if (tokenCount > maxTokens) {
maxTokens = tokenCount;
detectedLanguage = language;
}
});
return detectedLanguage || null;
};
/**
* FIXME: expensive function, especially as it's not been used in incremental fashion
*/
const parseBlocks = (forceText: boolean, text: string): Block[] => {
if (forceText)
return [{ type: 'text', content: text }];
if (text.startsWith('https://images.prodia.xyz/') && text.endsWith('.png') && text.length > 60 && text.length < 70)
return [{ type: 'image', url: text.trim() }];
const codeBlockRegex = /`{3,}([\w\\.+-_]+)?\n([\s\S]*?)(`{3,}|$)/g;
const result: Block[] = [];
let lastIndex = 0;
let match;
while ((match = codeBlockRegex.exec(text)) !== null) {
const markdownLanguage = (match[1] || '').trim();
const code = match[2].trim();
const blockEnd: string = match[3];
// Load the specified language if it's not loaded yet
// NOTE: this is commented out because it inflates the size of the bundle by 200k
// if (!Prism.languages[language]) {
// try {
// require(`prismjs/components/prism-${language}`);
// } catch (e) {
// console.warn(`Prism language '${language}' not found, falling back to 'typescript'`);
// }
// }
const codeLanguage = inferCodeLanguage(markdownLanguage, code);
const highlightLanguage = codeLanguage || 'typescript';
const highlightedCode = Prism.highlight(
code,
Prism.languages[highlightLanguage] || Prism.languages.typescript,
highlightLanguage,
);
result.push({ type: 'text', content: text.slice(lastIndex, match.index) });
result.push({ type: 'code', content: highlightedCode, language: codeLanguage, complete: blockEnd.startsWith('```'), code });
lastIndex = match.index + match[0].length;
}
if (lastIndex < text.length) {
result.push({ type: 'text', content: text.slice(lastIndex) });
}
return result;
};
/// Renderers for the different types of message blocks
function RenderCode(props: { codeBlock: CodeBlock, sx?: SxProps }) {
const [showSVG, setShowSVG] = React.useState(true);
const hasSVG = props.codeBlock.code.startsWith('<svg') && props.codeBlock.code.endsWith('</svg>');
const renderSVG = hasSVG && showSVG;
const languagesCodepen = ['html', 'css', 'javascript', 'json', 'typescript'];
const hasCodepenLanguage = hasSVG || (props.codeBlock.language && languagesCodepen.includes(props.codeBlock.language));
const languagesReplit = ['python', 'java', 'csharp'];
const hasReplitLanguage = props.codeBlock.language && languagesReplit.includes(props.codeBlock.language);
const handleCopyToClipboard = (e: React.MouseEvent) => {
e.stopPropagation();
copyToClipboard(props.codeBlock.code);
};
return (
<Box
component='code'
sx={{
position: 'relative', mx: 0, p: 1.5, // this block gets a thicker border
display: 'block', fontWeight: 500,
whiteSpace: 'break-spaces',
'&:hover > .code-buttons': { opacity: 1 },
...(props.sx || {}),
}}>
{/* Buttons */}
<Box
className='code-buttons'
sx={{
backdropFilter: 'blur(6px) grayscale(0.8)',
position: 'absolute', top: 0, right: 0, zIndex: 10, pt: 0.5, pr: 0.5,
display: 'flex', flexDirection: 'row', gap: 1,
opacity: 0, transition: 'opacity 0.3s',
}}>
{hasSVG && (
<Tooltip title={renderSVG ? 'Show Code' : 'Render SVG'} variant='solid'>
<IconButton variant={renderSVG ? 'solid' : 'soft'} color='neutral' onClick={() => setShowSVG(!showSVG)}>
<ShapeLineOutlinedIcon />
</IconButton>
</Tooltip>
)}
{hasCodepenLanguage &&
<OpenInCodepen codeBlock={{ code: props.codeBlock.code, language: props.codeBlock.language || undefined }} />
}
{hasReplitLanguage &&
<OpenInReplit codeBlock={{ code: props.codeBlock.code, language: props.codeBlock.language || undefined }} />
}
<Tooltip title='Copy Code' variant='solid'>
<IconButton variant='outlined' color='neutral' onClick={handleCopyToClipboard}>
<ContentCopyIcon />
</IconButton>
</Tooltip>
</Box>
{/* Highlighted Code / SVG render */}
<Box
dangerouslySetInnerHTML={{ __html: renderSVG ? props.codeBlock.code : props.codeBlock.content }}
sx={renderSVG ? { lineHeight: 0 } : {}}
/>
</Box>
);
}
const RenderMarkdown = ({ textBlock }: { textBlock: TextBlock }) => {
const theme = useTheme();
return <Box
className={`markdown-body ${theme.palette.mode === 'dark' ? 'markdown-body-dark' : 'markdown-body-light'}`}
sx={{
mx: '12px !important', // margin: 1.5 like other blocks
'& table': { width: 'inherit !important' }, // un-break auto-width (tables have 'max-content', which overflows)
'--color-canvas-default': 'transparent !important', // remove the default background color
fontFamily: `inherit !important`, // use the default font family
lineHeight: '1.75 !important', // line-height: 1.75 like the text block
}}>
<ReactMarkdown remarkPlugins={[remarkGfm]}>{textBlock.content}</ReactMarkdown>
</Box>;
};
const RenderText = ({ textBlock, sx }: { textBlock: TextBlock; sx?: SxProps; }) => {
const elements = extractCommands(textBlock.content);
return (
<Typography
sx={{
lineHeight: 1.75,
mx: 1.5,
display: 'flex', alignItems: 'baseline',
overflowWrap: 'anywhere',
whiteSpace: 'break-spaces',
...(sx || {}),
}}
>
{elements.map((element, index) =>
element.type === 'cmd'
? <Chip key={index} component='span' size='md' variant='solid' color='neutral' sx={{ mr: 1 }}>{element.value}</Chip>
: <span key={index}>{element.value}</span>,
)}
</Typography>
);
};
const RenderImage = (props: { imageBlock: ImageBlock, allowRunAgain: boolean, onRunAgain: (e: React.MouseEvent) => void }) =>
<Box
sx={theme => ({
display: 'flex', justifyContent: 'center', alignItems: 'center', position: 'relative',
mx: 1.5,
// p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1,
minWidth: 32, minHeight: 32, boxShadow: theme.vars.shadow.md,
background: theme.palette.neutral.solidBg,
'& picture': { display: 'flex' },
'& img': { maxWidth: '100%', maxHeight: '100%' },
'&:hover > .image-buttons': { opacity: 1 },
})}>
{/* External Image */}
<picture><img src={props.imageBlock.url} alt='Generated Image' /></picture>
{/* Image Buttons */}
<Box
className='image-buttons'
sx={{
position: 'absolute', top: 0, right: 0, zIndex: 10, pt: 0.5, px: 0.5,
display: 'flex', flexDirection: 'row', gap: 0.5,
opacity: 0, transition: 'opacity 0.3s',
}}>
{props.allowRunAgain && (
<Tooltip title='Draw again' variant='solid'>
<IconButton variant='solid' color='neutral' onClick={props.onRunAgain}>
<ReplayIcon />
</IconButton>
</Tooltip>
)}
<IconButton component={Link} href={props.imageBlock.url} target='_blank' variant='solid' color='neutral'>
<ZoomOutMapIcon />
</IconButton>
</Box>
</Box>;
function copyToClipboard(text: string) {
if (typeof navigator !== 'undefined')
navigator.clipboard.writeText(text)
.then(() => console.log('Message copied to clipboard'))
.catch((err) => console.error('Failed to copy message: ', err));
}
function explainErrorInMessage(text: string, isAssistant: boolean, modelId?: string) {
let errorMessage: JSX.Element | null = null;
const isAssistantError = isAssistant && (text.startsWith('[Issue] ') || text.startsWith('[OpenAI Issue]'));
if (isAssistantError) {
if (text.startsWith('OpenAI API error: 429 Too Many Requests')) {
// TODO: retry at the api/chat level a few times instead of showing this error
errorMessage = <>
The model appears to be occupied at the moment. Kindly select <b>GPT-3.5 Turbo</b>,
or give it another go by selecting <b>Run again</b> from the message menu.
</>;
} else if (text.includes('"model_not_found"')) {
// note that "model_not_found" is different than "The model `gpt-xyz` does not exist" message
errorMessage = <>
The API key appears to be unauthorized for {modelId || 'this model'}. You can change to <b>GPT-3.5
Turbo</b> and simultaneously <Link noLinkStyle href='https://openai.com/waitlist/gpt-4-api' target='_blank'>request
access</Link> to the desired model.
</>;
} else if (text.includes('"context_length_exceeded"')) {
// TODO: propose to summarize or split the input?
const pattern: RegExp = /maximum context length is (\d+) tokens.+you requested (\d+) tokens/;
const match = pattern.exec(text);
const usedText = match ? <b>{parseInt(match[2] || '0').toLocaleString()} tokens &gt; {parseInt(match[1] || '0').toLocaleString()}</b> : '';
errorMessage = <>
This thread <b>surpasses the maximum size</b> allowed for {modelId || 'this model'}. {usedText}.
Please consider removing some earlier messages from the conversation, start a new conversation,
choose a model with larger context, or submit a shorter new message.
</>;
} else if (text.includes('"invalid_api_key"')) {
errorMessage = <>
The API key appears to not be correct or to have expired.
Please <Link noLinkStyle href='https://openai.com/account/api-keys' target='_blank'>check your API key</Link> and
update it in the <b>Settings</b> menu.
</>;
} else if (text.includes('"insufficient_quota"')) {
errorMessage = <>
The API key appears to have <b>insufficient quota</b>. Please
check <Link noLinkStyle href='https://platform.openai.com/account/usage' target='_blank'>your usage</Link> and
make sure the usage is under <Link noLinkStyle href='https://platform.openai.com/account/billing/limits' target='_blank'>the limits</Link>.
</>;
}
}
return { errorMessage, isAssistantError };
}
export function messageBackground(theme: Theme, messageRole: DMessage['role'], wasEdited: boolean, unknownAssistantIssue: boolean): string {
const defaultBackground = theme.palette.background.surface;
const defaultBackground = theme.vars.palette.background.surface;
switch (messageRole) {
case 'system':
return wasEdited ? theme.palette.warning.softHoverBg : defaultBackground;
return wasEdited ? theme.vars.palette.warning.plainHoverBg : defaultBackground;
case 'user':
return theme.palette.primary.plainHoverBg; // was .background.level1
return theme.vars.palette.primary.plainHoverBg; // .background.level1
case 'assistant':
return unknownAssistantIssue ? theme.palette.danger.softBg : defaultBackground;
return unknownAssistantIssue ? theme.vars.palette.danger.softBg : defaultBackground;
}
return defaultBackground;
}
export function makeAvatar(messageAvatar: string | null, messageRole: DMessage['role'], messageOriginLLM: string | undefined, messagePurposeId: SystemPurposeId | undefined, messageSender: string, messageTyping: boolean, size: 'sm' | undefined = undefined): React.JSX.Element {
export function makeAvatar(messageAvatar: string | null, messageRole: DMessage['role'], messageOriginLLM: string | undefined, messagePurposeId: SystemPurposeId | undefined, messageSender: string, messageTyping: boolean, size: 'sm' | undefined = undefined): JSX.Element {
if (typeof messageAvatar === 'string' && messageAvatar)
return <Avatar alt={messageSender} src={messageAvatar} />;
const iconSx = { width: 40, height: 40 };
@@ -70,7 +385,7 @@ export function makeAvatar(messageAvatar: string | null, messageRole: DMessage['
: messageOriginLLM?.startsWith('react-')
? 'https://i.giphy.com/media/l44QzsOLXxcrigdgI/giphy.webp'
: 'https://i.giphy.com/media/jJxaUysjzO9ri/giphy.webp'}
sx={{ ...mascotSx, borderRadius: 'var(--joy-radius-sm)' }}
sx={{ ...mascotSx, borderRadius: 8 }}
/>;
}
// display the purpose symbol
@@ -96,49 +411,6 @@ export function makeAvatar(messageAvatar: string | null, messageRole: DMessage['
return <Avatar alt={messageSender} />;
}
function explainErrorInMessage(text: string, isAssistant: boolean, modelId?: string) {
let errorMessage: React.JSX.Element | null = null;
const isAssistantError = isAssistant && (text.startsWith('[Issue] ') || text.startsWith('[OpenAI Issue]'));
if (isAssistantError) {
if (text.startsWith('OpenAI API error: 429 Too Many Requests')) {
// TODO: retry at the api/chat level a few times instead of showing this error
errorMessage = <>
The model appears to be occupied at the moment. Kindly select <b>GPT-3.5 Turbo</b>,
or give it another go by selecting <b>Run again</b> from the message menu.
</>;
} else if (text.includes('"model_not_found"')) {
// note that "model_not_found" is different than "The model `gpt-xyz` does not exist" message
errorMessage = <>
The API key appears to be unauthorized for {modelId || 'this model'}. You can change to <b>GPT-3.5
Turbo</b> and simultaneously <Link noLinkStyle href='https://openai.com/waitlist/gpt-4-api' target='_blank'>request
access</Link> to the desired model.
</>;
} else if (text.includes('"context_length_exceeded"')) {
// TODO: propose to summarize or split the input?
const pattern = /maximum context length is (\d+) tokens.+you requested (\d+) tokens/;
const match = pattern.exec(text);
const usedText = match ? <b>{parseInt(match[2] || '0').toLocaleString()} tokens &gt; {parseInt(match[1] || '0').toLocaleString()}</b> : '';
errorMessage = <>
This thread <b>surpasses the maximum size</b> allowed for {modelId || 'this model'}. {usedText}.
Please consider removing some earlier messages from the conversation, start a new conversation,
choose a model with larger context, or submit a shorter new message.
</>;
} else if (text.includes('"invalid_api_key"')) {
errorMessage = <>
The API key appears to not be correct or to have expired.
Please <Link noLinkStyle href='https://openai.com/account/api-keys' target='_blank'>check your API key</Link> and
update it in the <b>Settings</b> menu.
</>;
} else if (text.includes('"insufficient_quota"')) {
errorMessage = <>
The API key appears to have <b>insufficient quota</b>. Please
check <Link noLinkStyle href='https://platform.openai.com/account/usage' target='_blank'>your usage</Link> and
make sure the usage is under <Link noLinkStyle href='https://platform.openai.com/account/billing/limits' target='_blank'>the limits</Link>.
</>;
}
}
return { errorMessage, isAssistantError };
}
/**
* The Message component is a customizable chat message UI component that supports
@@ -174,16 +446,11 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
// external state
const theme = useTheme();
const { showAvatars, renderMarkdown: _renderMarkdown, doubleClickToEdit } = useUIPreferencesStore(state => ({
showAvatars: state.zenMode !== 'cleaner',
renderMarkdown: state.renderMarkdown,
doubleClickToEdit: state.doubleClickToEdit,
}), shallow);
const renderMarkdown = _renderMarkdown && !fromSystem;
const isImaginable = canUseProdia();
const showAvatars = useSettingsStore(state => state.zenMode) !== 'cleaner';
const renderMarkdown = useSettingsStore(state => state.renderMarkdown) && !fromSystem;
const isImaginable = !!useSettingsStore(state => state.prodiaModelId) || !requireUserKeyProdia;
const isImaginableEnabled = messageText?.length > 5 && !messageText.startsWith('https://images.prodia.xyz/') && !(messageText.startsWith('/imagine') || messageText.startsWith('/img'));
const isSpeakable = canUseElevenLabs();
const isSpeakableEnabled = isImaginableEnabled;
const isSpeakable = !!useSettingsStore(state => state.elevenLabsVoiceId) || !requireUserKeyElevenLabs;
const closeOperationsMenu = () => setMenuAnchor(null);
@@ -194,7 +461,6 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
};
const handleMenuEdit = (e: React.MouseEvent) => {
if (messageTyping && !isEditing) return; // don't allow editing while typing
setIsEditing(!isEditing);
e.preventDefault();
closeOperationsMenu();
@@ -236,27 +502,24 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
const { isAssistantError, errorMessage } = explainErrorInMessage(messageText, fromAssistant, messageOriginLLM);
// style
const background = messageBackground(theme, messageRole, wasEdited, isAssistantError && !errorMessage);
let background = messageBackground(theme, messageRole, wasEdited, isAssistantError && !errorMessage);
// avatar
const avatarEl: React.JSX.Element | null = React.useMemo(
const avatarEl: JSX.Element | null = React.useMemo(
() => showAvatars ? makeAvatar(messageAvatar, messageRole, messageOriginLLM, messagePurposeId, messageSender, messageTyping) : null,
[messageAvatar, messageOriginLLM, messagePurposeId, messageRole, messageSender, messageTyping, showAvatars],
);
// per-blocks css
const blockSx: SxProps = {
// text box css
const cssBlocks = {
my: 'auto',
};
const codeSx: SxProps = {
// backgroundColor: fromAssistant ? 'background.level1' : 'background.level1',
backgroundColor: fromAssistant ? 'neutral.plainHoverBg' : 'primary.plainActiveBg',
boxShadow: 'xs',
const cssCode = {
background: theme.vars.palette.background.level1,
fontFamily: theme.fontFamily.code,
fontSize: '14px',
fontVariantLigatures: 'none',
lineHeight: 1.75,
borderRadius: 'var(--joy-radius-sm)',
};
// user message truncation
@@ -274,10 +537,10 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
return (
<ListItem sx={{
display: 'flex', flexDirection: !fromAssistant ? 'row-reverse' : 'row', alignItems: 'flex-start',
gap: { xs: 0, md: 1 }, px: { xs: 1, md: 2 }, py: 2,
gap: 1, px: { xs: 1, md: 2 }, py: 2,
background,
borderBottom: '1px solid',
borderBottomColor: 'divider',
borderBottom: `1px solid ${theme.vars.palette.divider}`,
// borderBottomColor: `rgba(${theme.vars.palette.neutral.mainChannel} / 0.2)`,
position: 'relative',
...(props.isBottom && { mb: 'auto' }),
'&:hover > button': { opacity: 1 },
@@ -285,7 +548,7 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
{/* Avatar */}
{showAvatars && <Stack
sx={{ alignItems: 'center', minWidth: { xs: 50, md: 64 }, maxWidth: 80, textAlign: 'center' }}
sx={{ alignItems: 'center', minWidth: { xs: 50, md: 64 }, textAlign: 'center' }}
onMouseEnter={() => setIsHovering(true)} onMouseLeave={() => setIsHovering(false)}
onClick={event => setMenuAnchor(event.currentTarget)}>
@@ -297,14 +560,12 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
avatarEl
)}
{/* Assistant model name */}
{fromAssistant && (
<Tooltip title={messageOriginLLM || 'unk-model'} variant='solid'>
<Typography level='body-sm' sx={{
fontSize: { xs: 'xs', sm: 'sm' }, fontWeight: 500,
overflowWrap: 'anywhere',
...(messageTyping ? { animation: `${cssRainbowColorKeyframes} 5s linear infinite` } : {}),
}}>
<Typography level='body2' sx={messageTyping
? { animation: `${cssRainbowColorKeyframes} 5s linear infinite`, fontWeight: 500 }
: { fontWeight: 500 }
}>
{prettyBaseModel(messageOriginLLM)}
</Typography>
</Tooltip>
@@ -316,52 +577,35 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
{/* Edit / Blocks */}
{!isEditing ? (
<Box
onDoubleClick={(e) => doubleClickToEdit ? handleMenuEdit(e) : null}
sx={{
...blockSx,
flexGrow: 0,
overflowX: 'auto',
}}>
<Box sx={{ ...cssBlocks, flexGrow: 0 }} onDoubleClick={handleMenuEdit}>
{/* Warn about user-edited system message */}
{fromSystem && wasEdited && (
<Typography level='body-sm' color='warning' sx={{ mt: 1, mx: 1.5 }}>modified by user - auto-update disabled</Typography>
<Typography level='body2' color='warning' sx={{ mt: 1, mx: 1.5 }}>modified by user - auto-update disabled</Typography>
)}
{!errorMessage && parseBlocks(fromSystem, collapsedText).map((block, index) =>
block.type === 'html'
? <RenderHtml key={'html-' + index} htmlBlock={block} sx={codeSx} />
: block.type === 'code'
? <RenderCode key={'code-' + index} codeBlock={block} sx={codeSx} />
: block.type === 'image'
? <RenderImage key={'image-' + index} imageBlock={block} allowRunAgain={props.isBottom} onRunAgain={handleMenuRunAgain} />
: renderMarkdown
? <RenderMarkdown key={'text-md-' + index} textBlock={block} />
: <RenderText key={'text-' + index} textBlock={block} />,
block.type === 'code'
? <RenderCode key={'code-' + index} codeBlock={block} sx={cssCode} />
: block.type === 'image'
? <RenderImage key={'image-' + index} imageBlock={block} allowRunAgain={props.isBottom} onRunAgain={handleMenuRunAgain} />
: renderMarkdown
? <RenderMarkdown key={'text-md-' + index} textBlock={block} />
: <RenderText key={'text-' + index} textBlock={block} />,
)}
{errorMessage && (
<Tooltip title={<Typography sx={{ maxWidth: 800 }}>{collapsedText}</Typography>} variant='soft'>
<InlineError error={errorMessage} />
<Alert variant='soft' color='warning' sx={{ mt: 1 }}><Typography>{errorMessage}</Typography></Alert>
</Tooltip>
)}
{isCollapsed && (
<Button variant='plain' onClick={handleExpand}>... expand ...</Button>
)}
{/* import VisibilityIcon from '@mui/icons-material/Visibility'; */}
{/*<br />*/}
{/*<Chip variant='outlined' color='warning' sx={{ mt: 1, fontSize: '0.75em' }} startDecorator={<VisibilityIcon />}>*/}
{/* BlockAction*/}
{/*</Chip>*/}
{isCollapsed && <Button variant='plain' onClick={handleExpand}>... expand ...</Button>}
</Box>
) : (
<InlineTextarea initialText={messageText} onEdit={handleTextEdited} sx={{ ...blockSx, lineHeight: 1.75, flexGrow: 1 }} />
<InlineTextarea initialText={messageText} onEdit={handleTextEdited} sx={{ ...cssBlocks, flexGrow: 1 }} />
)}
@@ -383,44 +627,48 @@ export function ChatMessage(props: { message: DMessage, isBottom: boolean, onMes
{/* Message Operations menu */}
{!!menuAnchor && (
<CloseableMenu
placement='bottom-end' sx={{ minWidth: 280 }}
open anchorEl={menuAnchor} onClose={closeOperationsMenu}
>
<Box sx={{ display: 'flex', alignItems: 'center' }}>
<MenuItem variant='plain' onClick={handleMenuEdit} sx={{ flex: 1 }}>
<ListItemDecorator><EditIcon /></ListItemDecorator>
{isEditing ? 'Discard' : 'Edit'}
{/*{!isEditing && <span style={{ opacity: 0.5, marginLeft: '8px' }}>{doubleClickToEdit ? '(double-click)' : ''}</span>}*/}
</MenuItem>
<MenuItem onClick={handleMenuCopy} sx={{ flex: 1 }}>
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
Copy
</MenuItem>
</Box>
<ListDivider />
<MenuItem onClick={handleMenuRunAgain}>
<ListItemDecorator>{fromAssistant ? <ReplayIcon /> : <FastForwardIcon />}</ListItemDecorator>
{fromAssistant ? 'Retry' : 'Run from here'}
<Menu
variant='plain' color='neutral' size='lg' placement='bottom-end' sx={{ minWidth: 280 }}
open anchorEl={menuAnchor} onClose={closeOperationsMenu}>
<MenuItem onClick={handleMenuCopy}>
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
Copy
</MenuItem>
<MenuItem onClick={handleMenuEdit}>
<ListItemDecorator><EditIcon /></ListItemDecorator>
{isEditing ? 'Discard' : 'Edit'}
{!isEditing && <span style={{ opacity: 0.5, marginLeft: '8px' }}> (double-click)</span>}
</MenuItem>
{isImaginable && isImaginableEnabled && (
<MenuItem onClick={handleMenuImagine} disabled={!isImaginableEnabled || isImagining}>
<ListItemDecorator>{isImagining ? <CircularProgress size='sm' /> : <FormatPaintIcon color='success' />}</ListItemDecorator>
<ListItemDecorator>{isImagining ? <CircularProgress size='sm' /> : <FormatPaintIcon />}</ListItemDecorator>
Imagine
</MenuItem>
)}
{isSpeakable && isSpeakableEnabled && (
{isSpeakable && (
<MenuItem onClick={handleMenuSpeak} disabled={isSpeaking}>
<ListItemDecorator>{isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverIcon color='success' />}</ListItemDecorator>
<ListItemDecorator>{isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverIcon />}</ListItemDecorator>
Speak
</MenuItem>
)}
<ListDivider />
{fromAssistant && (
<MenuItem onClick={handleMenuRunAgain}>
<ListItemDecorator><ReplayIcon /></ListItemDecorator>
Retry
</MenuItem>
)}
{fromUser && (
<MenuItem onClick={handleMenuRunAgain}>
<ListItemDecorator><FastForwardIcon /></ListItemDecorator>
Run Again
</MenuItem>
)}
<MenuItem onClick={props.onMessageDelete} disabled={false /*fromSystem*/}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Delete
</MenuItem>
</CloseableMenu>
</Menu>
)}
</ListItem>
@@ -4,7 +4,7 @@ import { Box, Button, Checkbox, IconButton, ListItem, Sheet, Typography, useThem
import ClearIcon from '@mui/icons-material/Clear';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import { DMessage } from '~/common/state/store-chats';
import { DMessage } from '@/common/state/store-chats';
import { TokenBadge } from '../composer/TokenBadge';
import { makeAvatar, messageBackground } from './ChatMessage';
@@ -13,16 +13,14 @@ import { makeAvatar, messageBackground } from './ChatMessage';
/**
* Header bar for controlling the operations during the Selection mode
*/
export const MessagesSelectionHeader = (props: { hasSelected: boolean, isBottom: boolean, sumTokens: number, onClose: () => void, onSelectAll: (selected: boolean) => void, onDeleteMessages: () => void }) =>
<Sheet color='warning' variant='solid' invertedColors sx={{
export const MessagesSelectionHeader = (props: { hasSelected: boolean, isBottom: boolean, onClose: () => void, onSelectAll: (selected: boolean) => void, onDeleteMessages: () => void }) =>
<Sheet color='neutral' variant='solid' invertedColors sx={{
display: 'flex', flexDirection: 'row', alignItems: 'center',
position: 'fixed', top: 0, left: 0, right: 0, zIndex: 101,
boxShadow: 'md',
gap: { xs: 1, sm: 2 }, px: { xs: 1, md: 2 }, py: 1,
}}>
<Checkbox size='md' onChange={event => props.onSelectAll(event.target.checked)} sx={{ minWidth: 24, justifyContent: 'center' }} />
<Box>Select all ({props.sumTokens})</Box>
<Box>Select All</Box>
<Button variant='solid' disabled={!props.hasSelected} onClick={props.onDeleteMessages} sx={{ ml: 'auto', mr: 'auto', minWidth: 150 }} endDecorator={<DeleteOutlineIcon />}>
Delete
@@ -39,7 +37,7 @@ export const MessagesSelectionHeader = (props: { hasSelected: boolean, isBottom:
*
* Shall look similarly to the main ChatMessage, for consistency, but just allow a simple checkbox selection
*/
export function CleanerMessage(props: { message: DMessage, isBottom: boolean, selected: boolean, remainingTokens?: number, onToggleSelected?: (messageId: string, selected: boolean) => void }) {
export function ChatMessageSelectable(props: { message: DMessage, isBottom: boolean, selected: boolean, onToggleSelected: (messageId: string, selected: boolean) => void }) {
// external state
const theme = useTheme();
@@ -62,42 +60,41 @@ export function CleanerMessage(props: { message: DMessage, isBottom: boolean, se
const background = messageBackground(theme, messageRole, !!messageUpdated, isAssistantError);
const avatarEl: React.JSX.Element | null = React.useMemo(() =>
const avatarEl: JSX.Element | null = React.useMemo(() =>
makeAvatar(messageAvatar, messageRole, messageOriginLLM, messagePurposeId, messageSender, messageTyping, 'sm'),
[messageAvatar, messageOriginLLM, messagePurposeId, messageRole, messageSender, messageTyping],
);
const handleCheckedChange = (event: React.ChangeEvent<HTMLInputElement>) =>
props.onToggleSelected && props.onToggleSelected(messageId, event.target.checked);
const handleCheckedChange = (event: React.ChangeEvent<HTMLInputElement>) => props.onToggleSelected(messageId, event.target.checked);
return (
<ListItem sx={{
display: 'flex', flexDirection: !fromAssistant ? 'row' : 'row', alignItems: 'center',
gap: { xs: 1, sm: 2 }, px: { xs: 1, md: 2 }, py: 2,
background,
borderBottom: `1px solid ${theme.palette.divider}`,
borderBottom: `1px solid ${theme.vars.palette.divider}`,
// position: 'relative',
...(props.isBottom && { mb: 'auto' }),
'&:hover > button': { opacity: 1 },
}}>
{!!props.onToggleSelected && <Box sx={{ display: 'flex', minWidth: 24, justifyContent: 'center' }}>
<Box sx={{ display: 'flex', minWidth: 24, justifyContent: 'center' }}>
<Checkbox size='md' checked={props.selected} onChange={handleCheckedChange} />
</Box>}
</Box>
<Box sx={{ display: 'flex', minWidth: { xs: 40, sm: 48 }, justifyContent: 'center' }}>
{avatarEl}
</Box>
<Typography level='body-sm' sx={{ minWidth: 64 }}>
<Typography level='body2' sx={{ minWidth: 64 }}>
{messageRole}
</Typography>
{props.remainingTokens !== undefined && <Box sx={{ display: 'flex', minWidth: { xs: 32, sm: 45 }, justifyContent: 'flex-end' }}>
<TokenBadge directTokens={messageTokenCount} tokenLimit={props.remainingTokens} inline />
</Box>}
<Box sx={{ display: 'flex', minWidth: { xs: 32, sm: 45 }, justifyContent: 'flex-end' }}>
<TokenBadge directTokens={messageTokenCount} tokenLimit={12345} inline />
</Box>
<Typography sx={{ flexGrow: 1, textOverflow: 'ellipsis', overflow: 'hidden', whiteSpace: 'nowrap' }}>
<Typography level='body1' sx={{ flexGrow: 1, textOverflow: 'ellipsis', overflow: 'hidden', whiteSpace: 'nowrap' }}>
{messageText}
</Typography>
@@ -1,5 +1,3 @@
import * as React from 'react';
import { Button, Tooltip } from '@mui/joy';
interface CodeBlockProps {
@@ -9,7 +7,7 @@ interface CodeBlockProps {
};
}
export function OpenInCodepen({ codeBlock }: CodeBlockProps): React.JSX.Element {
export function OpenInCodepen({ codeBlock }: CodeBlockProps): JSX.Element {
const { code, language } = codeBlock;
const hasCSS = language === 'css';
const hasJS = ['javascript', 'json', 'typescript'].includes(language || '');
@@ -1,5 +1,3 @@
import * as React from 'react';
import { Button, Tooltip } from '@mui/joy';
interface CodeBlockProps {
@@ -9,7 +7,7 @@ interface CodeBlockProps {
};
}
export function OpenInReplit({ codeBlock }: CodeBlockProps): React.JSX.Element {
export function OpenInReplit({ codeBlock }: CodeBlockProps): JSX.Element {
const { language } = codeBlock;
const replitLanguageMap: { [key: string]: string } = {
@@ -1,122 +0,0 @@
import * as React from 'react';
import { useQuery } from '@tanstack/react-query';
import { Box, IconButton, Tooltip } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
import SchemaIcon from '@mui/icons-material/Schema';
import ShapeLineOutlinedIcon from '@mui/icons-material/ShapeLineOutlined';
import { copyToClipboard } from '~/common/util/copyToClipboard';
import { CodeBlock } from './blocks';
import { OpenInCodepen } from './OpenInCodepen';
import { OpenInReplit } from './OpenInReplit';
export function RenderCode(props: { codeBlock: CodeBlock, sx?: SxProps }) {
const [showSVG, setShowSVG] = React.useState(true);
const [showPlantUML, setShowPlantUML] = React.useState(true);
const hasSVG = props.codeBlock.code.startsWith('<svg') && props.codeBlock.code.endsWith('</svg>');
const renderSVG = hasSVG && showSVG;
const hasPlantUML = props.codeBlock.code.startsWith('@startuml') && props.codeBlock.code.endsWith('@enduml');
let renderPlantUML = hasPlantUML && showPlantUML;
const { data: plantUmlSvgData } = useQuery({
enabled: renderPlantUML,
queryKey: ['plantuml', props.codeBlock.code],
queryFn: async () => {
try {
// Dynamically import the PlantUML encoder - it's a large library that slows down app loading
const { encode: plantUmlEncode } = await import('plantuml-encoder');
// retrieve and manually adapt the SVG, to remove the background
const encodedPlantUML: string = plantUmlEncode(props.codeBlock.code);
const response = await fetch(`https://www.plantuml.com/plantuml/svg/${encodedPlantUML}`);
const svg = await response.text();
const start = svg.indexOf('<svg ');
const end = svg.indexOf('</svg>');
if (start < 0 || end <= start)
return null;
return svg.slice(start, end + 6).replace('background:#FFFFFF;', '');
} catch (e) {
// ignore errors, and disable the component in that case
return null;
}
},
staleTime: 24 * 60 * 60 * 1000, // 1 day
});
renderPlantUML = renderPlantUML && !!plantUmlSvgData;
const languagesCodepen = ['html', 'css', 'javascript', 'json', 'typescript'];
const hasCodepenLanguage = hasSVG || (props.codeBlock.language && languagesCodepen.includes(props.codeBlock.language));
const languagesReplit = ['python', 'java', 'csharp'];
const hasReplitLanguage = props.codeBlock.language && languagesReplit.includes(props.codeBlock.language);
const handleCopyToClipboard = (e: React.MouseEvent) => {
e.stopPropagation();
copyToClipboard(props.codeBlock.code);
};
return (
<Box
component='code'
className={`language-${props.codeBlock.language}`}
sx={{
position: 'relative', mx: 0, p: 1.5, // this block gets a thicker border
display: 'block', fontWeight: 500,
whiteSpace: 'pre', // was 'break-spaces' before we implmented per-block scrolling
overflowX: 'auto',
'&:hover > .code-buttons': { opacity: 1 },
...(props.sx || {}),
}}>
{/* Buttons */}
<Box
className='code-buttons'
sx={{
backdropFilter: 'blur(6px) grayscale(0.8)',
position: 'absolute', top: 0, right: 0, zIndex: 10, pt: 0.5, pr: 0.5,
display: 'flex', flexDirection: 'row', gap: 1,
opacity: 0, transition: 'opacity 0.3s',
}}>
{hasSVG && (
<Tooltip title={renderSVG ? 'Show Code' : 'Render SVG'} variant='solid'>
<IconButton variant={renderSVG ? 'solid' : 'soft'} color='neutral' onClick={() => setShowSVG(!showSVG)}>
<ShapeLineOutlinedIcon />
</IconButton>
</Tooltip>
)}
{hasPlantUML && (
<Tooltip title={renderPlantUML ? 'Show Code' : 'Render PlantUML'} variant='solid'>
<IconButton variant={renderPlantUML ? 'solid' : 'soft'} color='neutral' onClick={() => setShowPlantUML(!showPlantUML)}>
<SchemaIcon />
</IconButton>
</Tooltip>
)}
{hasCodepenLanguage &&
<OpenInCodepen codeBlock={{ code: props.codeBlock.code, language: props.codeBlock.language || undefined }} />
}
{hasReplitLanguage &&
<OpenInReplit codeBlock={{ code: props.codeBlock.code, language: props.codeBlock.language || undefined }} />
}
<Tooltip title='Copy Code' variant='solid'>
<IconButton variant='outlined' color='neutral' onClick={handleCopyToClipboard}>
<ContentCopyIcon />
</IconButton>
</Tooltip>
</Box>
{/* Highlighted Code / SVG render */}
<Box
dangerouslySetInnerHTML={{ __html: (renderPlantUML && plantUmlSvgData) ? plantUmlSvgData : renderSVG ? props.codeBlock.code : props.codeBlock.content }}
sx={{
...(renderSVG ? { lineHeight: 0 } : {}),
...(renderPlantUML ? { textAlign: 'center' } : {}),
}}
/>
</Box>
);
}
@@ -1,95 +0,0 @@
import * as React from 'react';
import { Box, Button, IconButton, Tooltip, Typography } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import WebIcon from '@mui/icons-material/Web';
import { HtmlBlock } from './blocks';
const IFrameComponent = (props: { htmlString: string }) => {
const iframeRef = React.useRef<HTMLIFrameElement>(null);
React.useEffect(() => {
if (iframeRef.current) {
const iframeDoc = iframeRef.current.contentWindow?.document;
if (iframeDoc) {
iframeDoc.open();
iframeDoc.write(props.htmlString);
iframeDoc.close();
}
}
}, [props.htmlString]);
return (
<iframe
ref={iframeRef}
style={{
flexGrow: 1,
width: '100%',
height: '50svh',
border: 'none',
boxSizing: 'border-box',
}}
title='HTML content'
/>
);
};
export function RenderHtml(props: { htmlBlock: HtmlBlock, sx?: SxProps }) {
const [showHTML, setShowHTML] = React.useState(false);
// remove the font* properties from sx
const sx: any = props.sx || {};
for (const key in sx)
if (key.startsWith('font'))
delete sx[key];
return (
<Box
sx={{
position: 'relative', mx: 0, p: 1.5, // this block gets a thicker border
minWidth: { xs: '300px', md: '750px', lg: '900px', xl: '1100px' },
'&:hover > .code-buttons': { opacity: 1 },
...sx,
}}>
{/* Buttons */}
<Box
className='code-buttons'
sx={{
position: 'absolute', top: 0, right: 0, zIndex: 10, mr: 7,
display: 'flex', flexDirection: 'row', gap: 1,
opacity: 0, transition: 'opacity 0.3s',
}}>
<Tooltip title={showHTML ? 'Hide' : 'Show Web Page'} variant='solid'>
<IconButton variant={showHTML ? 'solid' : 'soft'} color='danger' onClick={() => setShowHTML(!showHTML)}>
<WebIcon />
</IconButton>
</Tooltip>
</Box>
{/* Highlighted Code / SVG render */}
{showHTML
? <IFrameComponent htmlString={props.htmlBlock.html} />
: <Box>
<Typography>
<b>CAUTION</b> - The content you are about to access is an HTML page. It is possible that an
unauthorized entity is monitoring this connection and has generated this content.
Please exercise caution and do not trust the contents blindly. Be aware that proceeding
may pose potential risks. Click the button to view the content, if you wish to proceed.
</Typography>
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
<Button variant='plain' color='neutral' onClick={() => setShowHTML(false)}>
Ignore
</Button>
<Button variant='solid' color='danger' onClick={() => setShowHTML(true)}>
Show Web Page
</Button>
</Box>
</Box>
}
</Box>
);
}
@@ -1,51 +0,0 @@
import * as React from 'react';
import { Box, IconButton, Tooltip } from '@mui/joy';
import ReplayIcon from '@mui/icons-material/Replay';
import ZoomOutMapIcon from '@mui/icons-material/ZoomOutMap';
import { Link } from '~/common/components/Link';
import { ImageBlock } from './blocks';
export const RenderImage = (props: { imageBlock: ImageBlock, allowRunAgain: boolean, onRunAgain: (e: React.MouseEvent) => void }) => {
const imageUrls = props.imageBlock.url.split('\n');
return imageUrls.map((url, index) => (
<Box
key={'gen-img-' + index}
sx={theme => ({
display: 'flex', flexDirection: 'column', justifyContent: 'center', alignItems: 'center', position: 'relative',
mx: 1.5, mt: index > 0 ? 1.5 : 0,
// p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1,
minWidth: 32, minHeight: 32, boxShadow: theme.shadow.md,
background: theme.palette.neutral.solidBg,
'& picture': { display: 'flex' },
'& img': { maxWidth: '100%', maxHeight: '100%' },
'&:hover > .image-buttons': { opacity: 1 },
})}>
{/* External Image */}
<picture><img src={url} alt='Generated Image' /></picture>
{/* Image Buttons */}
<Box
className='image-buttons'
sx={{
position: 'absolute', top: 0, right: 0, zIndex: 10, pt: 0.5, px: 0.5,
display: 'flex', flexDirection: 'row', gap: 0.5,
opacity: 0, transition: 'opacity 0.3s',
}}>
{props.allowRunAgain && (
<Tooltip title='Draw again' variant='solid'>
<IconButton variant='solid' color='neutral' onClick={props.onRunAgain}>
<ReplayIcon />
</IconButton>
</Tooltip>
)}
<IconButton component={Link} href={url} target='_blank' variant='solid' color='neutral'>
<ZoomOutMapIcon />
</IconButton>
</Box>
</Box>
));
};
@@ -1,43 +0,0 @@
import * as React from 'react';
import { Box, useTheme } from '@mui/joy';
import { TextBlock } from './blocks';
// Dynamically import ReactMarkdown using React.lazy
const ReactMarkdown = React.lazy(async () => {
const [markdownModule, remarkGfmModule] = await Promise.all([
import('react-markdown'),
import('remark-gfm')
]);
// Pass the dynamically imported remarkGfm as children
const ReactMarkdownWithRemarkGfm = (props: any) => (
<markdownModule.default remarkPlugins={[remarkGfmModule.default]} {...props} />
);
return { default: ReactMarkdownWithRemarkGfm };
});
export const RenderMarkdown = ({ textBlock }: { textBlock: TextBlock }) => {
const theme = useTheme();
return (
<Box
className={`markdown-body ${theme.palette.mode === 'dark' ? 'markdown-body-dark' : 'markdown-body-light'}`}
sx={{
mx: '12px !important', // margin: 1.5 like other blocks
'& table': { width: 'inherit !important' }, // un-break auto-width (tables have 'max-content', which overflows)
'--color-canvas-default': 'transparent !important', // remove the default background color
fontFamily: `inherit !important`, // use the default font family
lineHeight: '1.75 !important', // line-height: 1.75 like the text block
}}>
{/* Using React.Suspense / React.Lazy loading this */}
<React.Suspense fallback={<div>Loading...</div>}>
<ReactMarkdown>{textBlock.content}</ReactMarkdown>
</React.Suspense>
</Box>
);
};
@@ -1,31 +0,0 @@
import * as React from 'react';
import { Chip, Typography } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { extractCommands } from '../../commands';
import { TextBlock } from './blocks';
export const RenderText = ({ textBlock, sx }: { textBlock: TextBlock; sx?: SxProps; }) => {
const elements = extractCommands(textBlock.content);
return (
<Typography
sx={{
lineHeight: 1.75,
mx: 1.5,
display: 'flex', alignItems: 'baseline',
overflowWrap: 'anywhere',
whiteSpace: 'break-spaces',
...(sx || {}),
}}
>
{elements.map((element, index) =>
element.type === 'cmd'
? <Chip key={index} component='span' size='md' variant='solid' color='neutral' sx={{ mr: 1 }}>{element.value}</Chip>
: <span key={index}>{element.value}</span>,
)}
</Typography>
);
};
-129
View File
@@ -1,129 +0,0 @@
import Prism from 'prismjs';
// per-language plugins
import 'prismjs/components/prism-bash';
import 'prismjs/components/prism-css';
import 'prismjs/components/prism-java';
import 'prismjs/components/prism-javascript';
import 'prismjs/components/prism-json';
import 'prismjs/components/prism-markdown';
import 'prismjs/components/prism-python';
import 'prismjs/components/prism-typescript';
export type Block = TextBlock | CodeBlock | ImageBlock | HtmlBlock;
export type TextBlock = { type: 'text'; content: string; };
export type CodeBlock = { type: 'code'; content: string; language: string | null; complete: boolean; code: string; };
export type ImageBlock = { type: 'image'; url: string; };
export type HtmlBlock = { type: 'html'; html: string; };
/**
* TODO: expensive function, especially as it's not been used in incremental fashion
*/
export const parseBlocks = (forceText: boolean, text: string): Block[] => {
if (forceText)
return [{ type: 'text', content: text }];
if (text.startsWith('https://images.prodia.xyz/') && text.endsWith('.png') && text.length > 60)
return [{ type: 'image', url: text.trim() }];
if (text.startsWith('<!DOCTYPE html') || text.startsWith('<head>\n'))
return [{ type: 'html', html: text }];
const codeBlockRegex = /`{3,}([\w\\.+-_]+)?\n([\s\S]*?)(`{3,}|$)/g;
const result: Block[] = [];
let lastIndex = 0;
let match;
while ((match = codeBlockRegex.exec(text)) !== null) {
const markdownLanguage = (match[1] || '').trim();
const code = match[2].trim();
const blockEnd: string = match[3];
// Load the specified language if it's not loaded yet
// NOTE: this is commented out because it inflates the size of the bundle by 200k
// if (!Prism.languages[language]) {
// try {
// require(`prismjs/components/prism-${language}`);
// } catch (e) {
// console.warn(`Prism language '${language}' not found, falling back to 'typescript'`);
// }
// }
const codeLanguage = inferCodeLanguage(markdownLanguage, code);
const highlightLanguage = codeLanguage || 'typescript';
const highlightedCode = Prism.highlight(
code,
Prism.languages[highlightLanguage] || Prism.languages.typescript,
highlightLanguage,
);
result.push({ type: 'text', content: text.slice(lastIndex, match.index) });
result.push({ type: 'code', content: highlightedCode, language: codeLanguage, complete: blockEnd.startsWith('```'), code });
lastIndex = match.index + match[0].length;
}
if (lastIndex < text.length) {
result.push({ type: 'text', content: text.slice(lastIndex) });
}
return result;
};
function inferCodeLanguage(markdownLanguage: string, code: string): string | null {
let detectedLanguage;
// we have an hint
if (markdownLanguage) {
// no dot: assume is the syntax-highlight name
if (!markdownLanguage.includes('.'))
return markdownLanguage;
// dot: there's probably a file extension
const extension = markdownLanguage.split('.').pop();
if (extension) {
const languageMap: { [key: string]: string } = {
cs: 'csharp', html: 'html', java: 'java', js: 'javascript', json: 'json', jsx: 'javascript',
md: 'markdown', py: 'python', sh: 'bash', ts: 'typescript', tsx: 'typescript', xml: 'xml',
};
detectedLanguage = languageMap[extension];
if (detectedLanguage)
return detectedLanguage;
}
}
// based on how the code starts, return the language
const codeStarts = [
{ starts: ['<!DOCTYPE html', '<html'], language: 'html' },
{ starts: ['<'], language: 'xml' },
{ starts: ['from '], language: 'python' },
{ starts: ['import ', 'export '], language: 'typescript' }, // or python
{ starts: ['interface ', 'function '], language: 'typescript' }, // ambiguous
{ starts: ['package '], language: 'java' },
{ starts: ['using '], language: 'csharp' },
];
for (const codeStart of codeStarts) {
if (codeStart.starts.some((start) => code.startsWith(start))) {
return codeStart.language;
}
}
// If no language detected based on code start, use Prism to tokenize and detect language
const languages = ['bash', 'css', 'java', 'javascript', 'json', 'markdown', 'python', 'typescript']; // matches Prism component imports
let maxTokens = 0;
languages.forEach((language) => {
const grammar = Prism.languages[language];
const tokens = Prism.tokenize(code, grammar);
const tokenCount = tokens.filter((token) => typeof token !== 'string').length;
if (tokenCount > maxTokens) {
maxTokens = tokenCount;
detectedLanguage = language;
}
});
return detectedLanguage || null;
}
@@ -1,173 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, Grid, IconButton, Stack, Typography } from '@mui/joy';
import ScienceIcon from '@mui/icons-material/Science';
import TelegramIcon from '@mui/icons-material/Telegram';
import { Link } from '~/common/components/Link';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { SystemPurposeId, SystemPurposes } from '../../../../data';
// Constants for tile sizes / grid width - breakpoints need to be computed here to work around
// the "flex box cannot shrink over wrapped content" issue
//
// Absolutely dislike this workaround, but it's the only way I found to make it work
const bpTileSize = { xs: 116, md: 125, xl: 130 };
const tileCols = [3, 4, 6];
const tileSpacing = 1;
const bpMaxWidth = Object.entries(bpTileSize).reduce((acc, [key, value], index) => {
acc[key] = tileCols[index] * (value + 8 * tileSpacing) - 8 * tileSpacing;
return acc;
}, {} as Record<string, number>);
const bpTileGap = { xs: 2, md: 3 };
// Add this utility function to get a random array element
const getRandomElement = <T, >(array: T[]): T | undefined =>
array.length > 0 ? array[Math.floor(Math.random() * array.length)] : undefined;
/**
* Purpose selector for the current chat. Clicking on any item activates it for the current chat.
*/
export function PersonaSelector(props: { conversationId: string, runExample: (example: string) => void }) {
// state
// const [editMode, setEditMode] = React.useState(false);
const editMode = false;
// external state
const { experimentalLabs } = useUIPreferencesStore(state => ({
experimentalLabs: state.experimentalLabs,
}), shallow);
const { systemPurposeId, setSystemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
systemPurposeId: conversation ? conversation.systemPurposeId : null,
setSystemPurposeId: state.setSystemPurposeId,
};
}, shallow);
const handlePurposeChanged = (purposeId: SystemPurposeId | null) => {
if (purposeId)
setSystemPurposeId(props.conversationId, purposeId);
};
// we show them all if the filter is clear (null)
const purposeIDs = Object.keys(SystemPurposes);
const selectedPurpose = (purposeIDs.length && systemPurposeId) ? (SystemPurposes[systemPurposeId] ?? null) : null;
const selectedExample = selectedPurpose?.examples && getRandomElement(selectedPurpose.examples) || null;
return <>
<Stack direction='column' sx={{ minHeight: '60vh', justifyContent: 'center', alignItems: 'center' }}>
<Box sx={{ maxWidth: bpMaxWidth }}>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'baseline', justifyContent: 'space-between', gap: 2, mb: 1 }}>
<Typography level='title-sm'>
AI Persona
</Typography>
{/*<Button variant='plain' color='neutral' size='sm' onClick={toggleEditMode}>*/}
{/* {editMode ? 'Done' : 'Edit'}*/}
{/*</Button>*/}
</Box>
<Grid container spacing={tileSpacing} sx={{ justifyContent: 'flex-start' }}>
{purposeIDs.map((spId) => (
<Grid key={spId}>
<Button
variant={(!editMode && systemPurposeId === spId) ? 'solid' : 'soft'}
color={(!editMode && systemPurposeId === spId) ? 'primary' : SystemPurposes[spId as SystemPurposeId]?.highlighted ? 'warning' : 'neutral'}
onClick={() => !editMode && handlePurposeChanged(spId as SystemPurposeId)}
sx={{
flexDirection: 'column',
fontWeight: 500,
gap: bpTileGap,
height: bpTileSize,
width: bpTileSize,
...((editMode || systemPurposeId !== spId) ? {
boxShadow: 'md',
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { backgroundColor: 'background.surface' }),
} : {}),
}}
>
{/*{editMode && (*/}
{/* <Checkbox*/}
{/* label={<Typography level='body-sm'>show</Typography>}*/}
{/* checked={!hiddenPurposeIDs.includes(spId)} onChange={() => toggleHiddenPurposeId(spId)}*/}
{/* sx={{ alignSelf: 'flex-start' }}*/}
{/* />*/}
{/*)}*/}
<div style={{ fontSize: '2rem' }}>
{SystemPurposes[spId as SystemPurposeId]?.symbol}
</div>
<div>
{SystemPurposes[spId as SystemPurposeId]?.title}
</div>
</Button>
</Grid>
))}
{/* Button to start the YouTube persona creator */}
{experimentalLabs && <Grid>
<Button
variant='soft' color='neutral'
component={Link} noLinkStyle href='/personas'
sx={{
'--Icon-fontSize': '2rem',
flexDirection: 'column',
fontWeight: 500,
// gap: bpTileGap,
height: bpTileSize,
width: bpTileSize,
border: `1px dashed`,
boxShadow: 'md',
backgroundColor: 'background.surface',
}}
>
<div>
<ScienceIcon />
</div>
<div>
YouTube persona creator
</div>
</Button>
</Grid>}
</Grid>
<Typography
level='body-sm'
sx={{
mt: selectedExample ? 1 : 3,
display: 'flex', alignItems: 'center', gap: 1,
// justifyContent: 'center',
'&:hover > button': { opacity: 1 },
}}>
{!selectedPurpose
? 'Oops! No AI persona found for your search.'
: (selectedExample
? <>
Example: {selectedExample}
<IconButton
variant='plain' color='primary' size='md'
onClick={() => props.runExample(selectedExample)}
sx={{ opacity: 0, transition: 'opacity 0.3s' }}
>
<TelegramIcon />
</IconButton>
</>
: selectedPurpose.description
)}
</Typography>
</Box>
</Stack>
</>;
}
-79
View File
@@ -1,79 +0,0 @@
import { DLLMId } from '~/modules/llms/llm.types';
import { SystemPurposeId } from '../../../data';
import { autoTitle } from '~/modules/aifn/autotitle/autoTitle';
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
import { streamChat } from '~/modules/llms/llm.client';
import { useElevenlabsStore } from '~/modules/elevenlabs/store-elevenlabs';
import { DMessage, useChatStore } from '~/common/state/store-chats';
import { createAssistantTypingMessage, updatePurposeInHistory } from './editors';
/**
* The main "chat" function. TODO: this is here so we can soon move it to the data model.
*/
export async function runAssistantUpdatingState(conversationId: string, history: DMessage[], assistantLlmId: DLLMId, systemPurpose: SystemPurposeId, _autoTitle: boolean, _autoSuggestions: boolean) {
// update the system message from the active Purpose, if not manually edited
history = updatePurposeInHistory(conversationId, history, systemPurpose);
// create a blank and 'typing' message for the assistant
const assistantMessageId = createAssistantTypingMessage(conversationId, assistantLlmId, history[0].purposeId, '...');
// when an abort controller is set, the UI switches to the "stop" mode
const controller = new AbortController();
const { startTyping, editMessage } = useChatStore.getState();
startTyping(conversationId, controller);
// stream the assistant's messages
await streamAssistantMessage(assistantLlmId, history, controller.signal, (updatedMessage) =>
editMessage(conversationId, assistantMessageId, updatedMessage, false));
// clear to send, again
startTyping(conversationId, null);
// update text, if needed
if (_autoTitle)
await autoTitle(conversationId);
}
async function streamAssistantMessage(
llmId: DLLMId, history: DMessage[],
abortSignal: AbortSignal,
editMessage: (updatedMessage: Partial<DMessage>) => void,
) {
// 📢 TTS: speak the first line, if configured
const speakFirstLine = useElevenlabsStore.getState().elevenLabsAutoSpeak === 'firstLine';
let firstLineSpoken = false;
try {
const messages = history.map(({ role, text }) => ({ role, content: text }));
await streamChat(llmId, messages, abortSignal, (updatedMessage: Partial<DMessage>) => {
// update the message in the store (and thus schedule a re-render)
editMessage(updatedMessage);
// 📢 TTS
if (updatedMessage?.text && speakFirstLine && !firstLineSpoken) {
let cutPoint = updatedMessage.text.lastIndexOf('\n');
if (cutPoint < 0)
cutPoint = updatedMessage.text.lastIndexOf('. ');
if (cutPoint > 100 && cutPoint < 400) {
firstLineSpoken = true;
const firstParagraph = updatedMessage.text.substring(0, cutPoint);
speakText(firstParagraph).then(() => false /* fire and forget, we don't want to stall this loop */);
}
}
});
} catch (error: any) {
if (error?.name !== 'AbortError') {
console.error('Fetch request error:', error);
// TODO: show an error to the UI?
}
}
// finally, stop the typing animation
editMessage({ typing: false });
}
-27
View File
@@ -1,27 +0,0 @@
import { DLLMId } from '~/modules/llms/llm.types';
import { SystemPurposeId, SystemPurposes } from '../../../data';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
export function createAssistantTypingMessage(conversationId: string, assistantLlmLabel: DLLMId | 'prodia' | 'react-...' | string, assistantPurposeId: SystemPurposeId | undefined, text: string): string {
const assistantMessage: DMessage = createDMessage('assistant', text);
assistantMessage.typing = true;
assistantMessage.purposeId = assistantPurposeId;
assistantMessage.originLLM = assistantLlmLabel;
useChatStore.getState().appendMessage(conversationId, assistantMessage);
return assistantMessage.id;
}
export function updatePurposeInHistory(conversationId: string, history: DMessage[], purposeId: SystemPurposeId): DMessage[] {
const systemMessageIndex = history.findIndex(m => m.role === 'system');
const systemMessage: DMessage = systemMessageIndex >= 0 ? history.splice(systemMessageIndex, 1)[0] : createDMessage('system', '');
if (!systemMessage.updated && purposeId && SystemPurposes[purposeId]?.systemMessage) {
systemMessage.purposeId = purposeId;
systemMessage.text = SystemPurposes[purposeId].systemMessage.replaceAll('{{Today}}', new Date().toISOString().split('T')[0]);
}
history.unshift(systemMessage);
useChatStore.getState().setMessages(conversationId, history);
return history;
}
-64
View File
@@ -1,64 +0,0 @@
import { apiAsync } from '~/modules/trpc/trpc.client';
import { prodiaDefaultModelId } from '~/modules/prodia/prodia.models';
import { useProdiaStore } from '~/modules/prodia/store-prodia';
import { useChatStore } from '~/common/state/store-chats';
import { createAssistantTypingMessage } from './editors';
/**
* The main 'image generation' function - for now specialized to the 'imagine' command.
*/
export async function runImageGenerationUpdatingState(conversationId: string, imageText: string) {
// if the imageText ends with " xN" or " [N]" (where N is a number), then we'll generate N images
const match = imageText.match(/\sx(\d+)$|\s\[(\d+)]$/);
const count = match ? parseInt(match[1] || match[2], 10) : 1;
if (count > 1)
imageText = imageText.replace(/x(\d+)$|\[(\d+)]$/, '').trim(); // Remove the "xN" or "[N]" part from the imageText
// create a blank and 'typing' message for the assistant
const assistantMessageId = createAssistantTypingMessage(conversationId, 'prodia', undefined,
`Give me a few seconds while I draw ${imageText?.length > 20 ? 'that' : '"' + imageText + '"'}...`);
// reference the state editing functions
const { editMessage } = useChatStore.getState();
try {
const {
prodiaApiKey: prodiaKey, prodiaModelId,
prodiaNegativePrompt: negativePrompt, prodiaSteps: steps, prodiaCfgScale: cfgScale,
prodiaAspectRatio: aspectRatio, prodiaUpscale: upscale,
prodiaSeed: seed,
} = useProdiaStore.getState();
// Run the image generation count times in parallel
const imageUrls = await Promise.all(
Array(count).fill(undefined).map(async () => {
const { imageUrl } = await apiAsync.prodia.imagine.query({
...(!!prodiaKey && { prodiaKey }),
prodiaModel: prodiaModelId || prodiaDefaultModelId,
prompt: imageText,
...(!!negativePrompt && { negativePrompt }),
...(!!steps && { steps }),
...(!!cfgScale && { cfgScale }),
...(!!aspectRatio && aspectRatio !== 'square' && { aspectRatio }),
...((upscale && { upscale })),
...(!!seed && { seed }),
});
return imageUrl;
}),
);
// Concatenate all the resulting URLs and update the assistant message with these URLs
const allImageUrls = imageUrls.join('\n');
editMessage(conversationId, assistantMessageId, { text: allImageUrls, typing: false }, false);
} catch (error: any) {
const errorMessage = error?.message || error?.toString() || 'Unknown error';
editMessage(conversationId, assistantMessageId, { text: `Sorry, I couldn't create an image for you. ${errorMessage}`, typing: false }, false);
}
}
-147
View File
@@ -1,147 +0,0 @@
import * as React from 'react';
import { Box, Button, Typography } from '@mui/joy';
import ExitToAppIcon from '@mui/icons-material/ExitToApp';
import FileDownloadIcon from '@mui/icons-material/FileDownload';
import { ExportPublishedModal } from './ExportPublishedModal';
import { PublishedSchema } from '~/modules/sharing/sharing.router';
import { apiAsync } from '~/modules/trpc/trpc.client';
import { Brand } from '~/common/brand';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { Link } from '~/common/components/Link';
import { conversationToMarkdown } from './trade.markdown';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { downloadDAllJson, downloadDConversationJson } from './trade.json';
export type ExportConfig = { dir: 'export', conversationId: string | null };
/// Returns a pretty link to the current page, for promo
function linkToOrigin() {
let origin = (typeof window !== 'undefined') ? window.location.href : '';
if (!origin || origin.includes('//localhost'))
origin = Brand.URIs.OpenRepo;
origin = origin.replace('https://', '');
if (origin.endsWith('/'))
origin = origin.slice(0, -1);
return origin;
}
function findConversation(conversationId: string) {
return conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
}
/**
* Export Buttons and functionality
* Supports Share to Paste.gg and Download in own format
*/
export function ExportChats(props: { config: ExportConfig, onClose: () => void }) {
// state
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
const [publishResponse, setPublishResponse] = React.useState<PublishedSchema | null>(null);
const [downloadedState, setDownloadedState] = React.useState<'ok' | 'fail' | null>(null);
const [downloadedAllState, setDownloadedAllState] = React.useState<'ok' | 'fail' | null>(null);
// publish
const handlePublishConversation = () => setPublishConversationId(props.config.conversationId);
const handlePublishConfirmed = async () => {
if (!publishConversationId) return;
const conversation = findConversation(publishConversationId);
setPublishConversationId(null);
if (!conversation) return;
const markdownContent = conversationToMarkdown(conversation, !useUIPreferencesStore.getState().showSystemMessages);
try {
const paste = await apiAsync.sharing.publishTo.mutate({
to: 'paste.gg',
title: '🤖💬 Chat Conversation',
fileContent: markdownContent,
fileName: 'my-chat.md',
origin: linkToOrigin(),
});
setPublishResponse(paste);
} catch (error: any) {
alert(`Failed to publish conversation: ${error?.message ?? error?.toString() ?? 'unknown error'}`);
setPublishResponse(null);
}
};
const handlePublishResponseClosed = () => {
setPublishResponse(null);
props.onClose();
};
// download
const handleDownloadConversation = () => {
if (!props.config.conversationId) return;
const conversation = findConversation(props.config.conversationId);
if (!conversation) return;
downloadDConversationJson(conversation)
.then(() => setDownloadedState('ok'))
.catch(() => setDownloadedState('fail'));
};
const handleDownloadAllConversations = () => {
downloadDAllJson()
.then(() => setDownloadedAllState('ok'))
.catch(() => setDownloadedAllState('fail'));
};
const hasConversation = !!props.config.conversationId;
return <>
<Box sx={{ display: 'flex', flexDirection: 'column', gap: 2, alignItems: 'center', py: 1 }}>
<Typography level='body-sm'>
Share or download this conversation
</Typography>
<Button variant='soft' size='md' disabled={!hasConversation} endDecorator={<ExitToAppIcon />} sx={{ minWidth: 240, justifyContent: 'space-between' }}
onClick={handlePublishConversation}>
Share to Paste.gg
</Button>
<Button variant='soft' size='md' disabled={!hasConversation} sx={{ minWidth: 240, justifyContent: 'space-between' }}
color={downloadedState === 'ok' ? 'success' : downloadedState === 'fail' ? 'warning' : 'primary'}
endDecorator={downloadedState === 'ok' ? '✔' : downloadedState === 'fail' ? '✘' : <FileDownloadIcon />}
onClick={handleDownloadConversation}>
Download JSON
</Button>
<Typography level='body-sm' sx={{ mt: 2 }}>
Store or transfer between devices
</Typography>
<Button variant='soft' size='md' sx={{ minWidth: 240, justifyContent: 'space-between' }}
color={downloadedAllState === 'ok' ? 'success' : downloadedAllState === 'fail' ? 'warning' : 'primary'}
endDecorator={downloadedAllState === 'ok' ? '✔' : downloadedAllState === 'fail' ? '✘' : <FileDownloadIcon />}
onClick={handleDownloadAllConversations}>
Backup all
</Button>
</Box>
{/* [publish] confirmation */}
{publishConversationId && <ConfirmationModal
open onClose={() => setPublishConversationId(null)} onPositive={handlePublishConfirmed}
confirmationText={<>
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
Are you sure you want to proceed?
</>} positiveActionText={'Understood, upload to paste.gg'}
/>}
{/* [publish] outcome */}
{!!publishResponse && <ExportPublishedModal open onClose={handlePublishResponseClosed} response={publishResponse} />}
</>;
}
-176
View File
@@ -1,176 +0,0 @@
import * as React from 'react';
import { fileOpen, FileWithHandle } from 'browser-fs-access';
import { Box, Button, FormControl, FormLabel, Input, Sheet, Typography } from '@mui/joy';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import type { ChatGptSharedChatSchema } from '~/modules/sharing/import.chatgpt';
import { OpenAIIcon } from '~/modules/llms/openai/OpenAIIcon';
import { apiAsync } from '~/modules/trpc/trpc.client';
import { Brand } from '~/common/brand';
import { createDConversation, createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { ImportedOutcome, ImportOutcomeModal } from './ImportOutcomeModal';
import { restoreDConversationsFromJSON } from './trade.json';
export type ImportConfig = { dir: 'import' };
/**
* Components and functionality to import conversations
* Supports our own JSON files, and ChatGPT Share Links
*/
export function ImportConversations(props: { onClose: () => void }) {
// state
const [chatGptEdit, setChatGptEdit] = React.useState(false);
const [chatGptUrl, setChatGptUrl] = React.useState('');
const [importOutcome, setImportOutcome] = React.useState<ImportedOutcome | null>(null);
// derived state
const chatGptUrlValid = chatGptUrl.startsWith('https://chat.openai.com/share/') && chatGptUrl.length > 40;
const handleImportFromFiles = async () => {
// pick file(s)
let blobs: FileWithHandle[];
try {
blobs = await fileOpen({ description: `${Brand.Title.Base} JSON`, mimeTypes: ['application/json'], multiple: true, startIn: 'downloads' });
} catch (error) {
return;
}
// begin
const outcome: ImportedOutcome = { conversations: [] };
// unroll files to conversations
for (const blob of blobs) {
const fileName = blob.name || 'unknown file';
try {
const fileString = await blob.text();
const fileObject = JSON.parse(fileString);
restoreDConversationsFromJSON(fileName, fileObject, outcome);
} catch (error: any) {
outcome.conversations.push({ success: false, fileName, error: `Invalid file: ${error?.message || error?.toString() || 'unknown error'}` });
}
}
// import conversations (warning - will overwrite things)
for (let conversation of [...outcome.conversations].reverse()) {
if (conversation.success)
useChatStore.getState().importConversation(conversation.conversation);
}
// show the outcome of the import
setImportOutcome(outcome);
};
const handleChatGptToggleShown = () => setChatGptEdit(!chatGptEdit);
const handleChatGptLoadFromURL = async () => {
if (!chatGptUrlValid)
return;
const outcome: ImportedOutcome = { conversations: [] };
// load the conversation
let conversationId: string, data: ChatGptSharedChatSchema;
try {
({ conversationId, data } = await apiAsync.sharing.importChatGptShare.query({ url: chatGptUrl }));
} catch (error) {
outcome.conversations.push({ fileName: 'chatgpt', success: false, error: (error as any)?.message || error?.toString() || 'unknown error' });
setImportOutcome(outcome);
return;
}
// transform to our data structure
const conversation = createDConversation();
conversation.id = conversationId;
conversation.created = Math.round(data.create_time * 1000);
conversation.updated = Math.round(data.update_time * 1000);
conversation.autoTitle = data.title;
conversation.messages = data.linear_conversation.map(msgNode => {
const message = msgNode.message;
if (message && message.content.parts) {
const role = message.author.role;
const joinedText = message.content.parts.join('\n');
if ((role === 'user' || role === 'assistant') && joinedText.length >= 1) {
const dMessage = createDMessage(role, joinedText);
dMessage.id = message.id;
if (message.create_time)
dMessage.created = Math.round(message.create_time * 1000);
return dMessage;
}
}
return null;
}).filter(msg => !!msg) as DMessage[];
// outcome
const success = conversation.messages.length >= 1;
if (success) {
useChatStore.getState().importConversation(conversation);
outcome.conversations.push({ success: true, fileName: 'chatgpt', conversation });
} else
outcome.conversations.push({ success: false, fileName: 'chatgpt', error: `Empty conversation` });
setImportOutcome(outcome);
};
const handleImportOutcomeClosed = () => {
setImportOutcome(null);
props.onClose();
};
return <>
<Box sx={{ display: 'flex', flexDirection: 'column', gap: 2, alignItems: 'center', py: 1 }}>
<Typography level='body-sm'>
Select where to import from
</Typography>
<Button variant='soft' size='md' endDecorator={<FileUploadIcon />} sx={{ minWidth: 240, justifyContent: 'space-between' }}
onClick={handleImportFromFiles}>
Upload JSON
</Button>
{!chatGptEdit && (
<Button variant='soft' size='md' endDecorator={<OpenAIIcon />} sx={{ minWidth: 240, justifyContent: 'space-between' }}
color={chatGptEdit ? 'neutral' : 'primary'}
onClick={handleChatGptToggleShown}>
ChatGPT shared link
</Button>
)}
</Box>
{/* [chatgpt] data & controls */}
{chatGptEdit && <Sheet variant='soft' color='primary' sx={{ display: 'flex', flexDirection: 'column', borderRadius: 'md', p: 1, gap: 1 }}>
<OpenAIIcon sx={{ mx: 'auto', my: 1 }} />
<FormControl>
<FormLabel>
Shared Chat URL
</FormLabel>
<Input
variant='outlined' placeholder='https://chat.openai.com/share/...'
required error={!chatGptUrlValid}
value={chatGptUrl} onChange={event => setChatGptUrl(event.target.value)}
/>
</FormControl>
<Box sx={{ display: 'flex', gap: 1 }}>
<Button variant='soft' color='primary' onClick={handleChatGptToggleShown} sx={{ mr: 'auto' }}>
Cancel
</Button>
<Button color='primary' disabled={!chatGptUrlValid} onClick={handleChatGptLoadFromURL} sx={{ minWidth: 150 }}>
Import Chat
</Button>
</Box>
</Sheet>}
{/* import outcome */}
{!!importOutcome && <ImportOutcomeModal outcome={importOutcome} onClose={handleImportOutcomeClosed} />}
</>;
}
@@ -1,67 +0,0 @@
import * as React from 'react';
import { Alert, Box, Divider, List, ListItem, Typography } from '@mui/joy';
import { GoodModal } from '~/common/components/GoodModal';
import { DConversation } from '~/common/state/store-chats';
type ConversationOutcome = {
success: true;
fileName: string;
conversation: DConversation;
} | {
success: false;
fileName: string;
error: string;
}
export interface ImportedOutcome {
conversations: ConversationOutcome[];
}
/**
* Displays the result of an import operation as a modal dialog.
*/
export function ImportOutcomeModal(props: { outcome: ImportedOutcome, onClose: () => void, }) {
const { conversations } = props.outcome;
const successes = conversations.filter(c => c.success);
const failures = conversations.filter(c => !c.success);
const hasAnyResults = successes.length > 0 || failures.length > 0;
const hasAnyFailures = failures.length > 0;
return (
<GoodModal open title={hasAnyResults ? hasAnyFailures ? 'Import issues' : 'Import successful' : 'Import failed'} strongerTitle onClose={props.onClose}>
<Divider />
{successes.length >= 1 && <>
<Alert variant='soft' color='success'>
<Typography>
Imported {successes.length} conversation{successes.length === 1 ? '' : 's'}.
</Typography>
</Alert>
<Typography>
The conversation{successes.length === 1 ? '' : 's'} can be found in the menu,
and {successes.length === 1 ? 'it' : 'the last one'} is now active.
</Typography>
</>}
{failures.length >= 1 && <Box>
<Alert variant='soft' color='danger'>
<Typography>
Issues importing {failures.length} conversation{failures.length === 1 ? '' : 's'}:
</Typography>
</Alert>
<List>
{failures.map((f, idx) =>
<ListItem variant='soft' color='warning' key={'fail-' + idx}><b>{f.fileName}</b>: {(f as any).error}</ListItem>,
)}
</List>
</Box>}
</GoodModal>
);
}
-21
View File
@@ -1,21 +0,0 @@
import * as React from 'react';
import { Divider } from '@mui/joy';
import { GoodModal } from '~/common/components/GoodModal';
import { ImportConfig, ImportConversations } from './ImportChats';
import { ExportConfig, ExportChats } from './ExportChats';
export type TradeConfig = ImportConfig | ExportConfig;
export function TradeModal(props: { config: TradeConfig, onClose: () => void }) {
return (
<GoodModal title={<><b>{props.config.dir === 'import' ? 'Import ' : props.config.dir === 'export' ? 'Export ' : ''}</b> conversations</>} open onClose={props.onClose}>
<Divider />
{props.config.dir === 'import' && <ImportConversations onClose={props.onClose} />}
{props.config.dir === 'export' && <ExportChats config={props.config} onClose={props.onClose} />}
<Divider />
</GoodModal>
);
}
-100
View File
@@ -1,100 +0,0 @@
import { fileSave } from 'browser-fs-access';
import { DModelSource } from '~/modules/llms/llm.types';
import { useModelsStore } from '~/modules/llms/store-llms';
import { DConversation, useChatStore } from '~/common/state/store-chats';
import { ImportedOutcome } from './ImportOutcomeModal';
/**
* Download a conversation as a JSON file, for backup and future restore
* @throws {Error} if the user closes the dialog, or file could not be saved
*/
export async function downloadDConversationJson(conversation: DConversation) {
// remove fields from the export
const exportableConversation: ExportedConversationJsonV1 = cleanConversationForExport(conversation);
const json = JSON.stringify(exportableConversation, null, 2);
const blob = new Blob([json], { type: 'application/json' });
// link to begin the download
await fileSave(blob, { fileName: `conversation-${conversation.id}.json`, extensions: ['.json'] });
}
/**
* Download all conversations as a JSON file, for backup and future restore
* @throws {Error} if the user closes the dialog, or file could not be saved
*/
export async function downloadDAllJson() {
// conversations and
const payload: ExportedAllJsonV1 = {
conversations: useChatStore.getState().conversations.map(cleanConversationForExport),
models: { sources: useModelsStore.getState().sources },
};
const json = JSON.stringify(payload);
const blob = new Blob([json], { type: 'application/json' });
// link to begin the download
const isoDate = new Date().toISOString().replace(/:/g, '-');
await fileSave(blob, { fileName: `conversations-${isoDate}.json`, extensions: ['.json'] });
}
function cleanConversationForExport(_conversation: DConversation): Partial<DConversation> {
// remove fields from the export
const { abortController, ephemerals, ...conversation } = _conversation;
return conversation;
}
// Restores a conversation from a JSON string
function restoreDConversationFromJson(fileName: string, part: Partial<DConversation>, outcome: ImportedOutcome) {
if (!part || !part.id || !part.messages) {
outcome.conversations.push({ success: false, fileName, error: `Invalid conversation: ${part.id}` });
return;
}
const restored: DConversation = {
id: part.id,
messages: part.messages,
systemPurposeId: part.systemPurposeId || undefined,
...(part.userTitle && { userTitle: part.userTitle }),
...(part.autoTitle && { autoTitle: part.autoTitle }),
tokenCount: part.tokenCount || 0,
created: part.created || Date.now(),
updated: part.updated || Date.now(),
// add these back - these fields are not exported
abortController: null,
ephemerals: [],
};
outcome.conversations.push({ success: true, fileName, conversation: restored });
}
// Restores a list of conversations by downloadDAllJson
export function restoreDConversationsFromJSON(fileName: string, obj: any, outcome: ImportedOutcome) {
// heuristics
const hasConversations = obj.hasOwnProperty('conversations');
const hasMessages = obj.hasOwnProperty('messages');
// parse ExportedAllJsonV1
if (hasConversations && !hasMessages) {
const payload = obj as ExportedAllJsonV1;
for (let conversation of payload.conversations)
restoreDConversationFromJson(fileName, conversation, outcome);
}
// parse ExportedConversationJsonV1
else if (hasMessages && !hasConversations) {
restoreDConversationFromJson(fileName, obj as ExportedConversationJsonV1, outcome);
}
// invalid
else {
outcome.conversations.push({ success: false, fileName, error: `Invalid file: ${fileName}` });
}
}
/// do not change these - consider people's backups
type ExportedConversationJsonV1 = Partial<DConversation>;
type ExportedAllJsonV1 = {
conversations: ExportedConversationJsonV1[];
models: { sources: DModelSource[] };
}
+150
View File
@@ -0,0 +1,150 @@
import { ChatModelId, SystemPurposeId, SystemPurposes } from '../../../data';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { OpenAI } from '@/modules/openai/openai.types';
import { getOpenAISettings } from '@/modules/openai/openai.client';
import { speakText } from '@/modules/elevenlabs/elevenlabs.client';
import { useSettingsStore } from '@/common/state/store-settings';
import { updateAutoConversationTitle } from './ai-functions';
/**
* The main "chat" function. TODO: this is here so we can soon move it to the data model.
*/
export const runAssistantUpdatingState = async (conversationId: string, history: DMessage[], assistantModel: ChatModelId, systemPurpose: SystemPurposeId) => {
// update the system message from the active Purpose, if not manually edited
history = updatePurposeInHistory(conversationId, history, systemPurpose);
// create a blank and 'typing' message for the assistant
const assistantMessageId = createAssistantTypingMessage(conversationId, assistantModel, history[0].purposeId, '...');
// when an abort controller is set, the UI switches to the "stop" mode
const controller = new AbortController();
const { startTyping, editMessage } = useChatStore.getState();
startTyping(conversationId, controller);
await streamAssistantMessage(conversationId, assistantMessageId, history, assistantModel, editMessage, controller.signal);
// clear to send, again
startTyping(conversationId, null);
// update text, if needed
await updateAutoConversationTitle(conversationId);
};
export function updatePurposeInHistory(conversationId: string, history: DMessage[], purposeId: SystemPurposeId): DMessage[] {
const systemMessageIndex = history.findIndex(m => m.role === 'system');
const systemMessage: DMessage = systemMessageIndex >= 0 ? history.splice(systemMessageIndex, 1)[0] : createDMessage('system', '');
if (!systemMessage.updated && purposeId && SystemPurposes[purposeId]?.systemMessage) {
systemMessage.purposeId = purposeId;
systemMessage.text = SystemPurposes[purposeId].systemMessage.replaceAll('{{Today}}', new Date().toISOString().split('T')[0]);
}
history.unshift(systemMessage);
useChatStore.getState().setMessages(conversationId, history);
return history;
}
export function createAssistantTypingMessage(conversationId: string, assistantModel: ChatModelId | 'prodia' | 'react-...', assistantPurposeId: SystemPurposeId | undefined, text: string): string {
const assistantMessage: DMessage = createDMessage('assistant', text);
assistantMessage.typing = true;
assistantMessage.purposeId = assistantPurposeId;
assistantMessage.originLLM = assistantModel;
useChatStore.getState().appendMessage(conversationId, assistantMessage);
return assistantMessage.id;
}
/**
* Main function to send the chat to the assistant and receive a response (streaming)
*/
async function streamAssistantMessage(
conversationId: string, assistantMessageId: string, history: DMessage[],
chatModelId: string,
editMessage: (conversationId: string, messageId: string, updatedMessage: Partial<DMessage>, touch: boolean) => void,
abortSignal: AbortSignal,
) {
const { modelTemperature, modelMaxResponseTokens, elevenLabsAutoSpeak } = useSettingsStore.getState();
const payload: OpenAI.API.Chat.Request = {
api: getOpenAISettings(),
model: chatModelId,
messages: history.map(({ role, text }) => ({
role: role,
content: text,
})),
temperature: modelTemperature,
max_tokens: modelMaxResponseTokens,
};
try {
const response = await fetch('/api/openai/stream-chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
signal: abortSignal,
});
if (response.body) {
const reader = response.body.getReader();
const decoder = new TextDecoder('utf-8');
// loop forever until the read is done, or the abort controller is triggered
let incrementalText = '';
let parsedFirstPacket = false;
let sentFirstParagraph = false;
while (true) {
const { value, done } = await reader.read();
if (done) break;
incrementalText += decoder.decode(value, { stream: true });
// there may be a JSON object at the beginning of the message, which contains the model name (streaming workaround)
if (!parsedFirstPacket && incrementalText.startsWith('{')) {
const endOfJson = incrementalText.indexOf('}');
if (endOfJson > 0) {
const json = incrementalText.substring(0, endOfJson + 1);
incrementalText = incrementalText.substring(endOfJson + 1);
try {
const parsed: OpenAI.API.Chat.StreamingFirstResponse = JSON.parse(json);
editMessage(conversationId, assistantMessageId, { originLLM: parsed.model }, false);
parsedFirstPacket = true;
} catch (e) {
// error parsing JSON, ignore
console.log('Error parsing JSON: ' + e);
}
}
}
// if the first paragraph (after the first packet) is complete, call the callback
if (parsedFirstPacket && elevenLabsAutoSpeak === 'firstLine' && !sentFirstParagraph) {
let cutPoint = incrementalText.lastIndexOf('\n');
if (cutPoint < 0)
cutPoint = incrementalText.lastIndexOf('. ');
if (cutPoint > 100 && cutPoint < 400) {
sentFirstParagraph = true;
const firstParagraph = incrementalText.substring(0, cutPoint);
speakText(firstParagraph).then(() => false /* fire and forget, we don't want to stall this loop */);
}
}
editMessage(conversationId, assistantMessageId, { text: incrementalText }, false);
}
}
} catch (error: any) {
if (error?.name === 'AbortError') {
// expected, the user clicked the "stop" button
} else {
// TODO: show an error to the UI
console.error('Fetch request error:', error);
}
}
// finally, stop the typing animation
editMessage(conversationId, assistantMessageId, { typing: false }, false);
}
@@ -1,30 +1,29 @@
import { Agent } from '~/modules/aifn/react/react';
import { DLLMId } from '~/modules/llms/llm.types';
import { Agent } from '@/common/llm-util/react';
import { ChatModelId } from '../../../data';
import { createEphemeral, DMessage, useChatStore } from '@/common/state/store-chats';
import { createDEphemeral, DMessage, useChatStore } from '~/common/state/store-chats';
import { createAssistantTypingMessage } from './editors';
import { createAssistantTypingMessage } from './agi-immediate';
/**
* Synchronous ReAct chat function - TODO: event loop, auto-ui, cleanups, etc.
*/
export async function runReActUpdatingState(conversationId: string, question: string, assistantLlmId: DLLMId) {
export const runReActUpdatingState = async (conversationId: string, question: string, assistantModelId: ChatModelId) => {
const { appendEphemeral, updateEphemeralText, updateEphemeralState, deleteEphemeral, editMessage } = useChatStore.getState();
// create a blank and 'typing' message for the assistant - to be filled when we're done
const assistantModelLabel = 'react-' + assistantLlmId.slice(4, 7); // HACK: this is used to change the Avatar animation
const assistantMessageId = createAssistantTypingMessage(conversationId, assistantModelLabel, undefined, '...');
const assistantModelStr = 'react-' + assistantModelId.slice(4, 7); // HACK: this is used to change the Avatar animation
const assistantMessageId = createAssistantTypingMessage(conversationId, assistantModelStr as ChatModelId, undefined, '...');
const updateAssistantMessage = (update: Partial<DMessage>) =>
editMessage(conversationId, assistantMessageId, update, false);
// create an ephemeral space
const ephemeral = createDEphemeral(`Reason+Act`, 'Initializing ReAct..');
const ephemeral = createEphemeral(`Reason+Act`, 'Initializing ReAct..');
appendEphemeral(conversationId, ephemeral);
let ephemeralText = '';
let ephemeralText: string = '';
const logToEphemeral = (text: string) => {
console.log(text);
ephemeralText += (text.length > 300 ? text.slice(0, 300) + '...' : text) + '\n';
@@ -35,7 +34,7 @@ export async function runReActUpdatingState(conversationId: string, question: st
// react loop
const agent = new Agent();
const reactResult = await agent.reAct(question, assistantLlmId, 5,
const reactResult = await agent.reAct(question, assistantModelId, 5,
logToEphemeral,
(state: object) => updateEphemeralState(conversationId, ephemeral.id, state),
);
@@ -46,6 +45,6 @@ export async function runReActUpdatingState(conversationId: string, question: st
} catch (error: any) {
console.error(error);
logToEphemeral(ephemeralText + `\nIssue: ${error || 'unknown'}`);
updateAssistantMessage({ text: 'Issue: ReAct did not produce an answer.', typing: false });
updateAssistantMessage({ text: 'Issue: ReAct did nor produce an answer.', typing: false });
}
}
};
+87
View File
@@ -0,0 +1,87 @@
import { ChatModelId, fastChatModelId } from '../../../data';
import { callChat } from '@/modules/openai/openai.client';
import { useChatStore } from '@/common/state/store-chats';
/**
* Creates the AI titles for conversations, by taking the last 5 first-lines and asking AI what's that about
*/
export async function updateAutoConversationTitle(conversationId: string) {
// external state
const conversations = useChatStore.getState().conversations;
// only operate on valid conversations, without any title
const conversation = conversations.find(c => c.id === conversationId) ?? null;
if (!conversation || conversation.autoTitle || conversation.userTitle) return;
// first line of the last 5 messages
const historyLines: string[] = conversation.messages.filter(m => m.role !== 'system').slice(-5).map(m => {
let text = m.text.split('\n')[0];
text = text.length > 50 ? text.substring(0, 50) + '...' : text;
text = `${m.role === 'user' ? 'You' : 'Assistant'}: ${text}`;
return `- ${text}`;
});
// LLM
callChat(fastChatModelId, [
{ role: 'system', content: `You are an AI conversation titles assistant who specializes in creating expressive yet few-words chat titles.` },
{
role: 'user', content:
'Analyze the given short conversation (every line is truncated) and extract a concise chat title that ' +
'summarizes the conversation in as little as a couple of words.\n' +
'Only respond with the lowercase short title and nothing else.\n' +
'\n' +
'```\n' +
historyLines.join('\n') +
'```\n',
},
]).then(chatResponse => {
const title = chatResponse?.message?.content
?.trim()
?.replaceAll('"', '')
?.replace('Title: ', '')
?.replace('title: ', '');
if (title)
useChatStore.getState().setAutoTitle(conversationId, title);
});
}
// https://www.youtube.com/watch?v=XLG-qtZwxIw
/*const promptNew =
'I want you to act as a prompt engineer. You will help me write prompts for an ai art generator.\n' +
'\n' +
'I will provide you with short content ideas and your job is to elaborate these into full, detailed, coherent prompts.\n' +
'\n' +
'Prompts involve describing the content and style of images in concise accurate language. It is useful to be explicit and use references to popular culture, artists and mediums. Your focus needs to be on nouns and adjectives. I will give you some example prompts for your reference. Please define the exact camera that should be used\n' +
'\n' +
'Here is a formula for you to use(content insert nouns here)(medium: insert artistic medium here)(style: insert references to genres, artists and popular culture here)(lighting, reference the lighting here)(colours reference color styles and palettes here)(composition: reference cameras, specific lenses, shot types and positional elements here)\n' +
'\n' +
'when giving a prompt remove the brackets, speak in natural language and be more specific, use precise, articulate language.';
*/
// NOTE: formerly using this for GPT3.5Turbo
// 'You are an AI prompt writer for AI art generation. I will provide you with an input that may include ideas or context, and your task is to create coherent and complete prompts that guide the AI in creating visually captivating artwork.\n' +
// 'Prompts involve crafting descriptive compelling captions that describe scenes, settings, or subjects at a high level, using mostly adjectives and nouns to provide clear and focused guidance. You may also include references to artistic styles, techniques, or cultural influences to help achieve the desired aesthetic.\n' +
// 'To ensure the AI can interpret and generate the artwork based on the provided guidance, the output must be the lowercase prompt and nothing else.',
const simpleImagineSystemPrompt: string = 'As an AI art prompt writer, create captivating prompts using adjectives, nouns, and artistic references that a non-technical person can understand. Craft creative, coherent and descriptive captions to guide the AI in generating visually striking artwork. Provide output as a lowercase prompt and nothing else.';
/**
* Creates a caption for a drawing or photo given some description - used to elevate the quality of the imaging
*/
export async function imaginePromptFromText(messageText: string, modelId: ChatModelId): Promise<string | null> {
try {
const chatResponse = await callChat(modelId, [
{ role: 'system', content: simpleImagineSystemPrompt },
{ role: 'user', content: 'Write a prompt, based on the following input.\n\n```\n' + messageText.slice(0, 1000) + '\n```\n' },
]);
return chatResponse.message?.content?.trim() ?? null;
} catch (error: any) {
console.error('imaginePromptFromText: fetch request error:', error);
return null;
}
}
+54
View File
@@ -0,0 +1,54 @@
import { Prodia } from '@/modules/prodia/prodia.types';
import { prodiaDefaultModelId } from '@/modules/prodia/prodia.client';
import { useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { createAssistantTypingMessage } from './agi-immediate';
/**
* The main 'image generation' function - for now specialized to the 'imagine' command.
*/
export const runImageGenerationUpdatingState = async (conversationId: string, imageText: string) => {
// reference the state editing functions
const { editMessage } = useChatStore.getState();
// create a blank and 'typing' message for the assistant
const assistantMessageId = createAssistantTypingMessage(conversationId, 'prodia', undefined,
`Give me a few seconds while I draw ${imageText?.length > 20 ? 'that' : '"' + imageText + '"'}...`);
// generate the image
const { prodiaApiKey: apiKey, prodiaModelId, prodiaNegativePrompt: negativePrompt, prodiaSteps: steps, prodiaCfgScale: cfgScale, prodiaSeed: seed } = useSettingsStore.getState();
const input: Prodia.API.Imagine.RequestBody = {
...(apiKey && { apiKey }),
prompt: imageText,
prodiaModelId: prodiaModelId || prodiaDefaultModelId,
...(!!negativePrompt && { negativePrompt }),
...(!!steps && { steps }),
...(!!cfgScale && { cfgScale }),
...(!!seed && { seed }),
};
try {
const response = await fetch('/api/prodia/imagine', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(input),
});
if (response.ok) {
const imagineResponse: Prodia.API.Imagine.Response = await response.json();
// edit the assistant message to be the image
if (imagineResponse.status === 'success') {
editMessage(conversationId, assistantMessageId, { text: imagineResponse.imageUrl }, false);
// NOTE: imagineResponse shall have an altText which contains some description we could show on mouse hover
// Would be hard to do it with the current plain-text URL tho - shall consider changing the workaround format
}
} else
editMessage(conversationId, assistantMessageId, { text: `Sorry, I had issues requesting this image. Check your API key?` }, false);
} catch (error: any) {
editMessage(conversationId, assistantMessageId, { text: `Sorry, I couldn't generate an image for that. Issue: ${error}.` }, false);
}
editMessage(conversationId, assistantMessageId, { typing: false }, false);
};
-67
View File
@@ -1,67 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, Card, CardContent, Container, Switch, Typography } from '@mui/joy';
import ScienceIcon from '@mui/icons-material/Science';
import { Link } from '~/common/components/Link';
import { useUIPreferencesStore } from '~/common/state/store-ui';
export default function AppLabs() {
// external state
const { experimentalLabs, setExperimentalLabs } = useUIPreferencesStore(state => ({
experimentalLabs: state.experimentalLabs, setExperimentalLabs: state.setExperimentalLabs,
}), shallow);
const handleLabsChange = (event: React.ChangeEvent<HTMLInputElement>) => setExperimentalLabs(event.target.checked);
return (
<Box sx={{
backgroundColor: 'background.level1',
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
overflowY: 'auto',
minHeight: 96,
p: { xs: 3, md: 6 },
gap: 4,
}}>
<Typography level='h1' sx={{ fontSize: '3.6rem' }}>
Labs <ScienceIcon sx={{ fontSize: '3.3rem' }} />
</Typography>
<Switch checked={experimentalLabs} onChange={handleLabsChange}
endDecorator={experimentalLabs ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
<Container disableGutters maxWidth='sm'>
<Card>
<CardContent>
<Typography>
The Labs section is where we experiment with new features and ideas.
</Typography>
<Typography level='title-md' sx={{ mt: 2 }}>
Features {experimentalLabs ? 'enabled' : 'disabled'}:
</Typography>
<ul style={{ marginTop: 8, marginBottom: 8, paddingInlineStart: 32 }}>
<li><b>YouTube persona synthesizer</b> - 90% complete</li>
<li><b>Chat mode: Follow-up augmentation</b> - almost done</li>
<li><b>Relative chats size</b> - complete</li>
</ul>
<Typography sx={{ mt: 2 }}>
For any questions and creative idea, please join us on Discord, and let&apos;s talk!
</Typography>
</CardContent>
</Card>
</Container>
<Button variant='solid' color='neutral' size='lg' component={Link} href='/' noLinkStyle>
Got it!
</Button>
</Box>
);
}
-122
View File
@@ -1,122 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Button, ButtonGroup, Divider, FormControl, FormLabel, Input, Switch, Typography } from '@mui/joy';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import VisibilityIcon from '@mui/icons-material/Visibility';
import VisibilityOffIcon from '@mui/icons-material/VisibilityOff';
import { DLLMId } from '~/modules/llms/llm.types';
import { useModelsStore } from '~/modules/llms/store-llms';
import { GoodModal } from '~/common/components/GoodModal';
import { useUIStateStore } from '~/common/state/store-ui';
import { VendorLLMOptions } from './VendorLLMOptions';
export function LLMOptionsModal(props: { id: DLLMId }) {
// state
const [showDetails, setShowDetails] = React.useState(false);
// external state
const closeLLMOptions = useUIStateStore(state => state.closeLLMOptions);
const {
llm,
removeLLM, updateLLM,
isChatLLM, setChatLLMId,
isFastLLM, setFastLLMId,
isFuncLLM, setFuncLLMId,
} = useModelsStore(state => ({
llm: state.llms.find(llm => llm.id === props.id),
removeLLM: state.removeLLM,
updateLLM: state.updateLLM,
isChatLLM: state.chatLLMId === props.id,
isFastLLM: state.fastLLMId === props.id,
isFuncLLM: state.funcLLMId === props.id,
setChatLLMId: state.setChatLLMId,
setFastLLMId: state.setFastLLMId,
setFuncLLMId: state.setFuncLLMId,
}), shallow);
if (!llm)
return <>Options issue: LLM not found for id {props.id}</>;
const handleLlmLabelSet = (event: React.ChangeEvent<HTMLInputElement>) => updateLLM(llm.id, { label: event.target.value || '' });
const handleLlmVisibilityToggle = () => updateLLM(llm.id, { hidden: !llm.hidden });
const handleLlmDelete = () => {
removeLLM(llm.id);
closeLLMOptions();
};
return (
<GoodModal
title={<><b>{llm.label}</b> options</>}
open={!!props.id} onClose={closeLLMOptions}
startButton={
<Button variant='plain' color='neutral' onClick={handleLlmDelete} startDecorator={<DeleteOutlineIcon />}>
Delete
</Button>
}
>
<VendorLLMOptions id={props.id} />
<Divider />
<FormControl orientation='horizontal' sx={{ flexWrap: 'wrap' }}>
<FormLabel sx={{ minWidth: 80 }}>
Name
</FormLabel>
<Input variant='outlined' value={llm.label} onChange={handleLlmLabelSet} />
</FormControl>
<FormControl orientation='horizontal' sx={{ flexWrap: 'wrap', alignItems: 'center' }}>
<FormLabel sx={{ minWidth: 80 }}>
Defaults
</FormLabel>
<ButtonGroup orientation='horizontal' size='sm' variant='outlined'>
<Button variant={isChatLLM ? 'solid' : undefined} onClick={() => setChatLLMId(isChatLLM ? null : props.id)}>Chat</Button>
<Button variant={isFastLLM ? 'solid' : undefined} onClick={() => setFastLLMId(isFastLLM ? null : props.id)}>Fast</Button>
<Button variant={isFuncLLM ? 'solid' : undefined} onClick={() => setFuncLLMId(isFuncLLM ? null : props.id)}>Func</Button>
</ButtonGroup>
</FormControl>
<FormControl orientation='horizontal' sx={{ flexWrap: 'wrap', alignItems: 'center' }}>
<FormLabel sx={{ minWidth: 80 }}>
Visible
</FormLabel>
<Switch checked={!llm.hidden} onChange={handleLlmVisibilityToggle}
endDecorator={!llm.hidden ? <VisibilityIcon /> : <VisibilityOffIcon />}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }}
sx={{ ml: 0, mr: 'auto' }} />
</FormControl>
{/*<FormControl orientation='horizontal' sx={{ flexWrap: 'wrap', alignItems: 'center' }}>*/}
{/* <FormLabel sx={{ minWidth: 80 }}>*/}
{/* Flags*/}
{/* </FormLabel>*/}
{/* <Checkbox color='neutral' checked={llm.tags?.includes('chat')} readOnly disabled label='Chat' sx={{ ml: 4 }} />*/}
{/* <Checkbox color='neutral' checked={llm.tags?.includes('stream')} readOnly disabled label='Stream' sx={{ ml: 4 }} />*/}
{/*</FormControl>*/}
<FormControl orientation='horizontal' sx={{ flexWrap: 'nowrap' }}>
<FormLabel onClick={() => setShowDetails(!showDetails)} sx={{ minWidth: 80, cursor: 'pointer', textDecoration: 'underline' }}>
Details
</FormLabel>
{showDetails && <Typography level='body-sm' sx={{ display: 'block' }}>
[{llm.id}]: {llm.options.llmRef && `${llm.options.llmRef} · `} context tokens: {llm.contextTokens?.toLocaleString()} · {
llm.created && `created: ${(new Date(llm.created * 1000)).toLocaleString()}`} · description: {llm.description}
{/*· tags: {llm.tags.join(', ')}*/}
</Typography>}
</FormControl>
</GoodModal>
);
}
-109
View File
@@ -1,109 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Chip, IconButton, List, ListItem, ListItemButton, Tooltip, Typography } from '@mui/joy';
import SettingsOutlinedIcon from '@mui/icons-material/SettingsOutlined';
import VisibilityOffOutlinedIcon from '@mui/icons-material/VisibilityOffOutlined';
import { DLLM, DModelSourceId, ModelVendor } from '~/modules/llms/llm.types';
import { findVendorById } from '~/modules/llms/vendor.registry';
import { useModelsStore } from '~/modules/llms/store-llms';
import { useUIStateStore } from '~/common/state/store-ui';
function ModelItem(props: { llm: DLLM, vendor: ModelVendor, chipChat: boolean, chipFast: boolean, chipFunc: boolean }) {
// external state
const openLLMOptions = useUIStateStore(state => state.openLLMOptions);
// derived
const llm = props.llm;
const label = llm.label;
const tooltip = `${llm._source.label} - ${llm.description}`;
return (
<ListItem>
<ListItemButton onClick={() => openLLMOptions(llm.id)} sx={{ alignItems: 'center', gap: 1 }}>
{/* Model Name */}
<Tooltip title={tooltip}>
<Typography sx={llm.hidden ? { color: 'neutral.plainDisabledColor' } : undefined}>
{label}
</Typography>
</Tooltip>
{/* --> */}
<Box sx={{ flex: 1 }} />
{props.chipChat && <Chip size='sm' variant='plain' sx={{ boxShadow: 'sm' }}>chat</Chip>}
{props.chipFast && <Chip size='sm' variant='plain' sx={{ boxShadow: 'sm' }}>fast</Chip>}
{props.chipFunc && <Chip size='sm' variant='plain' sx={{ boxShadow: 'sm' }}>𝑓n</Chip>}
{llm.hidden && (
<IconButton disabled size='sm' variant='plain' color='neutral'>
<VisibilityOffOutlinedIcon />
</IconButton>
)}
<IconButton size='sm'>
<SettingsOutlinedIcon />
</IconButton>
</ListItemButton>
</ListItem>
);
}
export function ModelsList(props: {
filterSourceId: DModelSourceId | null
}) {
// external state
const { chatLLMId, fastLLMId, funcLLMId, llms } = useModelsStore(state => ({
chatLLMId: state.chatLLMId,
fastLLMId: state.fastLLMId,
funcLLMId: state.funcLLMId,
llms: state.llms.filter(llm => !props.filterSourceId || llm.sId === props.filterSourceId),
}), shallow);
// find out if there's more than 1 sourceLabel in the llms array
const multiSources = llms.length >= 2 && llms.find(llm => llm._source !== llms[0]._source);
const showAllSources = !props.filterSourceId;
let lastGroupLabel = '';
// generate the list items, prepending headers when necessary
const items: React.JSX.Element[] = [];
for (const llm of llms) {
// prepend label if changing source
const groupLabel = llm._source.label;
if ((multiSources || showAllSources) && groupLabel !== lastGroupLabel) {
lastGroupLabel = groupLabel;
items.push(
<ListItem key={'lab-' + llm._source.id} sx={{ justifyContent: 'center' }}>
<Typography>
{groupLabel}
</Typography>
</ListItem>,
);
}
// for safety, ensure the vendor exists
const vendor = findVendorById(llm._source.vId);
!!vendor && items.push(
<ModelItem key={'llm-' + llm.id} llm={llm} vendor={vendor} chipChat={llm.id === chatLLMId} chipFast={llm.id === fastLLMId} chipFunc={llm.id === funcLLMId} />,
);
}
return (
<List variant='soft' size='sm' sx={{
borderRadius: 'sm',
pl: { xs: 0, md: 1 },
}}>
{items}
</List>
);
}
-86
View File
@@ -1,86 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Checkbox, Divider } from '@mui/joy';
import { GoodModal } from '~/common/components/GoodModal';
import { useUIStateStore } from '~/common/state/store-ui';
import { DModelSourceId } from '~/modules/llms/llm.types';
import { createModelSourceForDefaultVendor } from '~/modules/llms/vendor.registry';
import { useModelsStore } from '~/modules/llms/store-llms';
import { LLMOptionsModal } from './LLMOptionsModal';
import { ModelsList } from './ModelsList';
import { ModelsSourceSelector } from './ModelsSourceSelector';
import { VendorSourceSetup } from './VendorSourceSetup';
export function ModelsModal(props: { suspendAutoModelsSetup?: boolean }) {
// local state
const [_selectedSourceId, setSelectedSourceId] = React.useState<DModelSourceId | null>(null);
const [showAllSources, setShowAllSources] = React.useState<boolean>(false);
// external state
const { modelsSetupOpen, openModelsSetup, closeModelsSetup, llmOptionsId } = useUIStateStore();
const { modelSources, llmCount } = useModelsStore(state => ({
modelSources: state.sources,
llmCount: state.llms.length,
}), shallow);
// auto-select the first source - note: we could use a useEffect() here, but this is more efficient
// also note that state-persistence is unneeded
const selectedSourceId = _selectedSourceId ?? modelSources[0]?.id ?? null;
const activeSource = modelSources.find(source => source.id === selectedSourceId);
const multiSource = modelSources.length > 1;
// if no sources at startup, open the modal
React.useEffect(() => {
if (!selectedSourceId && !props.suspendAutoModelsSetup)
openModelsSetup();
}, [selectedSourceId, openModelsSetup, props.suspendAutoModelsSetup]);
// add the default source on cold - will require setup
React.useEffect(() => {
const { addSource, sources } = useModelsStore.getState();
if (!sources.length)
addSource(createModelSourceForDefaultVendor(sources));
}, []);
return <>
{/* Sources Setup */}
{modelsSetupOpen && <GoodModal
title={<>Configure <b>AI Models</b></>}
startButton={
multiSource ? <Checkbox
label='all vendors' sx={{ my: 'auto' }}
checked={showAllSources} onChange={() => setShowAllSources(all => !all)}
/> : undefined
}
open={modelsSetupOpen} onClose={closeModelsSetup}
>
<ModelsSourceSelector selectedSourceId={selectedSourceId} setSelectedSourceId={setSelectedSourceId} />
{!!activeSource && <Divider />}
{!!activeSource && <VendorSourceSetup source={activeSource} />}
{!!llmCount && <Divider />}
{!!llmCount && <ModelsList filterSourceId={showAllSources ? null : selectedSourceId} />}
<Divider />
</GoodModal>}
{/* per-LLM options */}
{!!llmOptionsId && <LLMOptionsModal id={llmOptionsId} />}
</>;
}
@@ -1,160 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, IconButton, ListItemDecorator, MenuItem, Option, Select, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import CloudDoneOutlinedIcon from '@mui/icons-material/CloudDoneOutlined';
import CloudOutlinedIcon from '@mui/icons-material/CloudOutlined';
import ComputerIcon from '@mui/icons-material/Computer';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import { DModelSourceId, ModelVendor, ModelVendorId } from '~/modules/llms/llm.types';
import { createModelSourceForVendor, findAllVendors, findVendorById } from '~/modules/llms/vendor.registry';
import { hasServerKeyOpenAI } from '~/modules/llms/openai/openai.vendor';
import { useModelsStore } from '~/modules/llms/store-llms';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { hideOnDesktop, hideOnMobile } from '~/common/theme';
function locationIcon(vendor?: ModelVendor | null) {
if (vendor && vendor.id === 'openai' && hasServerKeyOpenAI)
return <CloudDoneOutlinedIcon />;
return !vendor ? null : vendor.location === 'local' ? <ComputerIcon /> : <CloudOutlinedIcon />;
}
function vendorIcon(vendor?: ModelVendor | null) {
const Icon = !vendor ? null : vendor.Icon;
return Icon ? <Icon /> : null;
}
export function ModelsSourceSelector(props: {
selectedSourceId: DModelSourceId | null, setSelectedSourceId: (sourceId: DModelSourceId | null) => void,
}) {
// state
const [vendorsMenuAnchor, setVendorsMenuAnchor] = React.useState<HTMLElement | null>(null);
const [confirmDeletionSourceId, setConfirmDeletionSourceId] = React.useState<DModelSourceId | null>(null);
// external state
const { modelSources, addModelSource, removeModelSource } = useModelsStore(state => ({
modelSources: state.sources,
addModelSource: state.addSource, removeModelSource: state.removeSource,
}), shallow);
const handleShowVendors = (event: React.MouseEvent<HTMLElement>) => setVendorsMenuAnchor(event.currentTarget);
const closeVendorsMenu = () => setVendorsMenuAnchor(null);
const handleAddSourceFromVendor = React.useCallback((vendorId: ModelVendorId) => {
closeVendorsMenu();
const { sources: modelSources } = useModelsStore.getState();
const modelSource = createModelSourceForVendor(vendorId, modelSources);
if (modelSource) {
addModelSource(modelSource);
props.setSelectedSourceId(modelSource.id);
}
}, [addModelSource, props]);
const enableDeleteButton = !!props.selectedSourceId && (modelSources.length > 1 /*|| (process.env.NODE_ENV === 'development')*/);
const handleDeleteSource = (id: DModelSourceId) => setConfirmDeletionSourceId(id);
const handleDeleteSourceConfirmed = React.useCallback(() => {
if (confirmDeletionSourceId) {
props.setSelectedSourceId(modelSources.find(source => source.id !== confirmDeletionSourceId)?.id ?? null);
removeModelSource(confirmDeletionSourceId);
setConfirmDeletionSourceId(null);
}
}, [confirmDeletionSourceId, modelSources, props, removeModelSource]);
// vendor list items
const vendorItems = React.useMemo(() => findAllVendors().filter(v => !!v.instanceLimit).map(vendor => {
const sourceCount = modelSources.filter(source => source.vId === vendor.id).length;
const enabled = vendor.instanceLimit > sourceCount;
return {
vendor,
enabled,
sourceCount,
component: (
<MenuItem key={vendor.id} disabled={!enabled} onClick={() => handleAddSourceFromVendor(vendor.id)}>
<ListItemDecorator>
{vendorIcon(vendor)}
</ListItemDecorator>
{vendor.name}{/*{sourceCount > 0 && ` (added)`}*/}
</MenuItem>
),
};
}), [handleAddSourceFromVendor, modelSources]);
// source items
const sourceItems = React.useMemo(() => modelSources.map(source => {
return {
source,
icon: locationIcon(findVendorById(source.vId)),
component: <Option key={source.id} value={source.id}>{source.label}</Option>,
};
}), [modelSources]);
const selectedSourceItem = sourceItems.find(item => item.source.id === props.selectedSourceId);
const noSources = !sourceItems.length;
return (
<Box sx={{ display: 'flex', flexDirection: 'row', flexWrap: 'wrap', alignItems: 'center', gap: 1 }}>
{/* Models: [Select] Add Delete */}
<Typography sx={{ mr: 1, ...hideOnMobile }}>
Vendor:
</Typography>
<Select
variant='outlined'
value={props.selectedSourceId}
disabled={noSources}
onChange={(_event, value) => value && props.setSelectedSourceId(value)}
startDecorator={selectedSourceItem?.icon}
slotProps={{
root: { sx: { minWidth: 190 } },
indicator: { sx: { opacity: 0.5 } },
}}
>
{sourceItems.map(item => item.component)}
</Select>
<IconButton variant={noSources ? 'solid' : 'plain'} color='primary' onClick={handleShowVendors} disabled={!!vendorsMenuAnchor} sx={{ ...hideOnDesktop }}>
<AddIcon />
</IconButton>
<Button variant={noSources ? 'solid' : 'plain'} onClick={handleShowVendors} disabled={!!vendorsMenuAnchor} startDecorator={<AddIcon />} sx={{ ...hideOnMobile }}>
Add
</Button>
<IconButton
variant='plain' color='neutral' disabled={!enableDeleteButton} sx={{ ml: 'auto' }}
onClick={() => props.selectedSourceId && handleDeleteSource(props.selectedSourceId)}
>
<DeleteOutlineIcon />
</IconButton>
{/* vendors popup, for adding */}
<CloseableMenu
placement='bottom-start' zIndex={10000} sx={{ minWidth: 280 }}
open={!!vendorsMenuAnchor} anchorEl={vendorsMenuAnchor} onClose={closeVendorsMenu}
>
{vendorItems.map(item => item.component)}
</CloseableMenu>
{/* source delete confirmation */}
<ConfirmationModal
open={!!confirmDeletionSourceId} onClose={() => setConfirmDeletionSourceId(null)} onPositive={handleDeleteSourceConfirmed}
confirmationText={'Are you sure you want to remove these models? The configuration data will be lost and you may have to enter it again.'} positiveActionText={'Remove'}
/>
</Box>
);
}
@@ -1,22 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { DLLMId } from '~/modules/llms/llm.types';
import { findVendorById } from '~/modules/llms/vendor.registry';
import { useModelsStore } from '~/modules/llms/store-llms';
export function VendorLLMOptions(props: { id: DLLMId }) {
// get LLM (warning: this will refresh all children components on every change of any LLM field)
const llm = useModelsStore(state => state.llms.find(llm => llm.id === props.id), shallow);
if (!llm)
return <>Configuration issue: LLM not found for id {props.id}</>;
// get vendor
const vendor = findVendorById(llm._source.vId);
if (!vendor)
return <>Configuration issue: Vendor not found for LLM {llm.id}, source: {llm.sId}</>;
const LLMOptionsComponent = vendor.LLMOptionsComponent;
return <LLMOptionsComponent llm={llm} />;
}
@@ -1,14 +0,0 @@
import * as React from 'react';
import { DModelSource } from '~/modules/llms/llm.types';
import { findVendorById } from '~/modules/llms/vendor.registry';
export function VendorSourceSetup(props: { source: DModelSource }) {
const vendor = findVendorById(props.source.vId);
if (!vendor)
return <>Configuration issue: Vendor not found for Source {props.source.id}</>;
const SourceSetupComponent = vendor.SourceSetupComponent;
return <SourceSetupComponent sourceId={props.source.id} />;
}
-94
View File
@@ -1,94 +0,0 @@
import * as React from 'react';
import { Box, Button, Card, CardContent, Container, IconButton, Typography } from '@mui/joy';
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import { Brand } from '~/common/brand';
import { Link } from '~/common/components/Link';
import { capitalizeFirstLetter } from '~/common/util/textUtils';
import { NewsItems } from './news.data';
export default function AppNews() {
// state
const [lastNewsIdx, setLastNewsIdx] = React.useState<number>(0);
// news selection
const news = NewsItems.filter((_, idx) => idx <= lastNewsIdx);
const firstNews = news[0] ?? null;
return (
<Box sx={{
backgroundColor: 'background.level1',
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
overflowY: 'auto',
minHeight: 96,
p: { xs: 3, md: 6 },
gap: 4,
}}>
<Typography level='h1' sx={{fontSize: '3.6rem'}}>
New updates!
</Typography>
<Typography>
{capitalizeFirstLetter(Brand.Title.Base)} has been updated to version {firstNews?.versionName}. Enjoy what&apos;s new:
</Typography>
{!!news && <Container disableGutters maxWidth='sm'>
{news?.map((item, idx) => {
const firstCard = idx === 0;
const hasCardAfter = news.length < NewsItems.length;
const showExpander = hasCardAfter && (idx === news.length - 1);
const addPadding = !firstCard || showExpander;
return <Card key={'news-' + idx} sx={{ mb: 2, minHeight: 32 }}>
<CardContent sx={{ position: 'relative', pr: addPadding ? 4 : 0 }}>
{!!item.text && <Typography component='div'>
{item.text}
</Typography>}
{!!item.items && (item.items.length > 0) && <ul style={{ marginTop: 8, marginBottom: 8, paddingInlineStart: 32 }}>
{item.items.map((item, idx) => <li key={idx}>
<Typography component='div'>
{item.text}
</Typography>
</li>)}
</ul>}
{!firstCard && (
<Typography level='body-sm' sx={{ position: 'absolute', right: 0, top: 0 }}>
{item.versionName}
</Typography>
)}
{showExpander && (
<IconButton
variant='plain' size='sm'
onClick={() => setLastNewsIdx(idx + 1)}
sx={{ position: 'absolute', right: 0, bottom: 0, mr: -1, mb: -1 }}
>
<ExpandMoreIcon />
</IconButton>
)}
</CardContent>
</Card>;
})}
</Container>}
<Button variant='solid' color='neutral' size='lg' component={Link} href='/' noLinkStyle>
Got it!
</Button>
{/*<Typography sx={{ textAlign: 'center' }}>*/}
{/* Enjoy!*/}
{/* <br /><br />*/}
{/* -- The {Brand.Title.Base} Team*/}
{/*</Typography>*/}
</Box>
);
}
-55
View File
@@ -1,55 +0,0 @@
import * as React from 'react';
import { Box, Typography } from '@mui/joy';
import { Brand } from '~/common/brand';
import { Link } from '~/common/components/Link';
import { clientUtmSource } from '~/common/util/pwaUtils';
// update this variable every time you want to broadcast a new version to clients
export const incrementalVersion: number = 4;
// news and feature surfaces
export const NewsItems: NewsItem[] = [
{
versionName: '1.3.5',
items: [
// { text: <>(Labs mode) YouTube personas creator</> },
{ text: <>Backup chats (export all)</> },
{ text: <>Import ChatGPT shared chats</> },
{ text: <>Cleaner, better, newer UI, including relative chats size</> },
// -- version separator --
{ text: <>AI in the real world with <Typography color='success' sx={{ fontWeight: 600 }}>camera OCR</Typography> - MOBILE-ONLY</> },
{ text: <><Typography color='success' sx={{ fontWeight: 600 }}>Anthropic</Typography> models full support</> },
],
},
{
versionName: '1.3.1',
items: [
{ text: <><Typography color='success'>Flattener</Typography> - 4-mode conversations summarizer</> },
{ text: <><Typography color='success'>Forking</Typography> - branch your conversations</> },
{ text: <><Typography color='success'>/s</Typography> and <Typography color='success'>/a</Typography> to append a <i>system</i> or <i>assistant</i> message</> },
{ text: <>Local LLMs with <Link href='https://github.com/enricoros/big-agi/blob/main/docs/local-llm-text-web-ui.md' target='_blank'>Oobabooga server</Link></> },
{ text: 'NextJS STOP bug.. squashed, with Vercel!' },
],
},
{
versionName: '1.2.1',
// text: '',
items: [
{ text: <>New home page: <b><Link href={Brand.URIs.Home + clientUtmSource()} target='_blank'>{Brand.URIs.Home.replace('https://', '')}</Link></b></> },
{ text: 'Support 𝑓unction models' }, // (n)
{ text: <Box sx={{ display: 'flex', alignItems: 'center' }}>Labs: experiments</Box> }, // ⚗️🧬🔬🥼 🥽🧪 <ScienceIcon sx={{ fontSize: 24, opacity: 0.5 }} />
],
},
];
interface NewsItem {
versionName: string;
text?: string | React.JSX.Element;
items?: {
text: string | React.JSX.Element;
}[];
}
-29
View File
@@ -1,29 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { useRouter } from 'next/router';
import { useAppStateStore } from '~/common/state/store-appstate';
import { incrementalVersion } from './news.data';
export function useShowNewsOnUpdate() {
const { push } = useRouter();
const { usageCount, lastSeenNewsVersion } = useAppStateStore(state => ({
usageCount: state.usageCount,
lastSeenNewsVersion: state.lastSeenNewsVersion,
}), shallow);
React.useEffect(() => {
const isNewsOutdated = (lastSeenNewsVersion || 0) < incrementalVersion;
if (isNewsOutdated && usageCount > 2) {
// Disable for now
push('/news').then(() => null);
}
}, [lastSeenNewsVersion, push, usageCount]);
}
export function useMarkNewsAsSeen() {
React.useEffect(() => {
useAppStateStore.getState().setLastSeenNewsVersion(incrementalVersion);
}, []);
}
-39
View File
@@ -1,39 +0,0 @@
import * as React from 'react';
import { Box, Container, ListDivider, Sheet, Typography } from '@mui/joy';
import { YTPersonaCreator } from './YTPersonaCreator';
import ScienceIcon from '@mui/icons-material/Science';
export function AppPersonas() {
return (
<Sheet sx={{
flexGrow: 1,
overflowY: 'auto',
backgroundColor: 'background.level1',
p: { xs: 3, md: 6 },
}}>
<Container disableGutters maxWidth='md' sx={{ display: 'flex', flexDirection: 'column', gap: 1 }}>
<Typography level='title-lg' sx={{ textAlign: 'center' }}>
Advanced AI Personas
</Typography>
<Box sx={{ display: 'flex', alignItems: 'center', justifyContent: 'center', gap: 1 }}>
<Typography>
Experimental
</Typography>
<ScienceIcon color='primary' />
</Box>
<ListDivider sx={{ my: 2 }} />
<YTPersonaCreator />
</Container>
</Sheet>
);
}
-266
View File
@@ -1,266 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Alert, Box, Button, Card, CardContent, CircularProgress, Grid, IconButton, Input, LinearProgress, Modal, ModalDialog, Radio, RadioGroup, Tooltip, Typography } from '@mui/joy';
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
import WhatshotIcon from '@mui/icons-material/Whatshot';
import YouTubeIcon from '@mui/icons-material/YouTube';
import { apiQuery } from '~/modules/trpc/trpc.client';
import { useModelsStore } from '~/modules/llms/store-llms';
import { copyToClipboard } from '~/common/util/copyToClipboard';
import { LLMChainStep, useLLMChain } from './useLLMChain';
function extractVideoID(videoURL: string): string | null {
let regExp = /^(?:https?:\/\/)?(?:www\.)?(?:youtube\.com\/(?:watch\?v=|embed\/)|youtu\.be\/)([^#&?]*).*/;
let match = videoURL.match(regExp);
return (match && match[1]?.length == 11) ? match[1] : null;
}
function useTranscriptFromVideo(videoID: string | null) {
const { data, isFetching, isError, error } =
apiQuery.ytpersona.getTranscript.useQuery({ videoId: videoID || '' }, {
enabled: !!videoID,
refetchOnWindowFocus: false,
staleTime: Infinity,
});
return {
title: data?.videoTitle ?? null,
thumbnailUrl: data?.thumbnailUrl ?? null,
transcript: data?.transcript?.trim() ?? null,
isFetching,
isError, error,
};
}
const YouTubePersonaSteps: LLMChainStep[] = [
{
name: 'Analyzing the transcript',
setSystem: 'You are skilled in analyzing and embodying diverse characters. You meticulously study transcripts to capture key attributes, draft comprehensive character sheets, and refine them for authenticity. Feel free to make assumptions without hedging, be concise and be creative.',
addUserInput: true,
addUser: 'Conduct comprehensive research on the provided transcript. Identify key characteristics of the speaker, including age, professional field, distinct personality traits, style of communication, narrative context, and self-awareness. Additionally, consider any unique aspects such as their use of humor, their cultural background, core values, passions, fears, personal history, and social interactions. Your output for this stage is an in-depth written analysis that exhibits an understanding of both the superficial and more profound aspects of the speaker\'s persona.',
},
{
name: 'Defining the character',
addPrevAssistant: true,
addUser: 'Craft your documented analysis into a draft of the \'You are a...\' character sheet. It should encapsulate all crucial personality dimensions, along with the motivations and aspirations of the persona. Keep in mind to balance succinctness and depth of detail for each dimension. The deliverable here is a comprehensive draft of the character sheet that captures the speaker\'s unique essence.',
},
{
name: 'Crossing the t\'s',
addPrevAssistant: true,
addUser: 'Compare the draft character sheet with the original transcript, validating its content and ensuring it captures both the speakers overt characteristics and the subtler undertones. Omit unknown information, fine-tune any areas that require clarity, have been overlooked, or require more authenticity. Use clear and illustrative examples from the transcript to refine your sheet and offer meaningful, tangible reference points. Your output is a coherent, comprehensive, and nuanced instruction that begins with \'You are a...\' and serves as a go-to guide for an actor recreating the persona.',
},
// {
// name: 'Shrink',
// addPrevAssistant: true,
// addUser: 'Now remove all the uncertain information, omit unknown information, Your output is a coherent, comprehensive, and nuanced instruction that begins with \'You are a...\' and serves as a go-to guide for a recreating the persona.',
// },
];
export function YTPersonaCreator() {
// state
const [videoURL, setVideoURL] = React.useState('');
const [selectedModelType, setSelectedModelType] = React.useState<'chat' | 'fast'>('fast');
// const [selectedLLMLabel, setSelectedLLMLabel] = React.useState<string | null>(null);
const [videoID, setVideoID] = React.useState('');
const [personaTranscript, setPersonaTranscript] = React.useState<string | null>(null);
// external state
const { chatLLM, fastLLM } = useModelsStore(state => {
const { chatLLMId, fastLLMId } = state;
const chatLLM = state.llms.find(llm => llm.id === chatLLMId) ?? null;
const fastLLM = state.llms.find(llm => llm.id === fastLLMId) ?? null;
return {
chatLLM: chatLLM,
fastLLM: /*chatLLM === fastLLM ? null :*/ fastLLM,
};
}, shallow);
// fetch transcript when the Video ID is ready, then store it
const { transcript, thumbnailUrl, title, isFetching, isError, error: transcriptError } =
useTranscriptFromVideo(videoID);
React.useEffect(() => setPersonaTranscript(transcript), [transcript]);
// use the transformation sequence to create a persona
const llm = selectedModelType === 'chat' ? chatLLM : fastLLM;
const { isFinished, isTransforming, chainProgress, chainIntermediates, chainStepName, chainOutput, chainError, abortChain } =
useLLMChain(YouTubePersonaSteps, llm?.id, personaTranscript ?? undefined);
const handleVideoIdChange = (e: React.ChangeEvent<HTMLInputElement>) => setVideoURL(e.target.value);
const handleFetchTranscript = (e: React.FormEvent<HTMLFormElement>) => {
e.preventDefault(); // stop the form submit
const videoId = extractVideoID(videoURL);
if (!videoId) {
setVideoURL('Invalid');
} else {
setPersonaTranscript(null);
setVideoID(videoId);
}
};
return <>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'center', gap: 1 }}>
<YouTubeIcon sx={{ color: '#f00' }} />
<Typography level='title-lg'>
YouTube -&gt; AI persona
</Typography>
</Box>
<form onSubmit={handleFetchTranscript}>
<Box sx={{ display: 'flex', flexDirection: 'row', gap: 2 }}>
<Input
required
type='url'
fullWidth
variant='outlined'
placeholder='YouTube Video URL'
value={videoURL} onChange={handleVideoIdChange}
endDecorator={
<IconButton
variant='outlined' color='neutral'
onClick={() => setVideoURL('https://www.youtube.com/watch?v=M_wZpSEvOkc')}
>
<WhatshotIcon />
</IconButton>
}
/>
<Button
type='submit'
variant='solid' disabled={isFetching || isTransforming} loading={isFetching}
sx={{ minWidth: 120 }}>
Create
</Button>
</Box>
</form>
{/* LLM selector (chat vs fast) */}
{!isTransforming && !isFinished && !!chatLLM && !!fastLLM && (
<RadioGroup
orientation='horizontal'
value={selectedModelType}
onChange={(event: React.ChangeEvent<HTMLInputElement>) => setSelectedModelType(event.target.value as 'chat' | 'fast')}
>
<Radio value='chat' label={chatLLM.label.startsWith('GPT-4') ? chatLLM.label + ' (slow, accurate)' : chatLLM.label} />
<Radio value='fast' label={fastLLM.label} />
</RadioGroup>
)}
{/* 1. Transcript*/}
{personaTranscript && (
<Card sx={{ mt: 2, boxShadow: 'md' }}>
<CardContent>
<Typography level='title-md' sx={{ mb: 1 }}>
{title || 'Transcript'}
</Typography>
<Box>
{!!thumbnailUrl && <picture><img src={thumbnailUrl} alt='YouTube Video Image' height={80} style={{ float: 'left', marginRight: 8 }} /></picture>}
<Typography level='body-sm'>
{personaTranscript.slice(0, 280)}...
</Typography>
</Box>
</CardContent>
</Card>
)}
{/* Errors */}
{isError && (
<Alert color='warning' sx={{ mt: 1 }}>
<Typography component='div'>{transcriptError?.message || 'Unknown error'}</Typography>
</Alert>
)}
{!!chainError && (
<Alert color='warning' sx={{ mt: 1 }}>
<Typography component='div'>{chainError}</Typography>
</Alert>
)}
{/* Persona! */}
{chainOutput && <Box sx={{ mt: 2 }}>
<Typography level='title-lg'>
YouTuber Persona System Prompt
</Typography>
<Card sx={{ boxShadow: 'md' }}>
<CardContent sx={{
position: 'relative',
'&:hover > button': { opacity: 1 },
}}>
<Alert variant='soft' color='success' sx={{ mb: 1 }}>
You can now copy the following text and use it as Custom prompt!
</Alert>
<Tooltip title='Copy system prompt' variant='solid'>
<IconButton
variant='outlined' color='neutral' onClick={() => copyToClipboard(chainOutput)}
sx={{
position: 'absolute', right: 0, zIndex: 10,
// opacity: 0, transition: 'opacity 0.3s',
}}>
<ContentCopyIcon />
</IconButton>
</Tooltip>
<Typography level='body-sm'>
{chainOutput}
</Typography>
</CardContent>
</Card>
</Box>}
{/* Intermediate outputs rendered as cards in a grid */}
{chainIntermediates && chainIntermediates.length > 0 && <Box sx={{ mt: 2 }}>
<Typography level='title-lg'>
{isTransforming ? 'Working...' : 'Intermediate Work'}
</Typography>
<Grid container spacing={2}>
{chainIntermediates.map((intermediate, i) =>
<Grid xs={12} sm={6} md={4} key={i}>
<Card>
<CardContent>
<Typography level='title-sm' sx={{ mb: 1 }}>
{i + 1}. {YouTubePersonaSteps[i].name}
</Typography>
<Typography level='body-sm'>
{intermediate?.slice(0, 140)}...
</Typography>
</CardContent>
</Card>
</Grid>,
)}
</Grid>
</Box>}
{/* Embodiment Progress */}
{isTransforming && <Modal open>
<ModalDialog>
<Box sx={{ display: 'flex', flexDirection: 'column', alignItems: 'center', my: 2 }}>
<CircularProgress color='primary' value={Math.max(10, 100 * chainProgress)} />
</Box>
<Typography color='success' level='title-lg' sx={{ mt: 1 }}>
Embodying Persona ...
</Typography>
<Typography color='success' level='title-sm' sx={{ mt: 1, fontWeight: 600 }}>
{chainStepName}
</Typography>
<LinearProgress color='success' determinate value={Math.max(10, 100 * chainProgress)} sx={{ mt: 1, mb: 2 }} />
<Typography level='title-sm'>
This may take 1-2 minutes. Do not close this window or the progress will be lost.
If you experience any errors (e.g. LLM timeouts, or context overflows for larger videos)
please try again with faster/smaller models.
</Typography>
<Button variant='soft' color='neutral' onClick={abortChain} sx={{ ml: 'auto', minWidth: 100, mt: 5 }}>
Cancel
</Button>
</ModalDialog>
</Modal>}
</>;
}
-186
View File
@@ -1,186 +0,0 @@
import * as React from 'react';
import { DLLMId } from '~/modules/llms/llm.types';
import { callChatGenerate, VChatMessageIn } from '~/modules/llms/llm.client';
import { useModelsStore } from '~/modules/llms/store-llms';
export interface LLMChainStep {
name: string;
setSystem?: string;
addPrevAssistant?: boolean;
addUserInput?: boolean;
addUser?: string;
}
/**
* React hook to manage a chain of LLM transformations.
*/
export function useLLMChain(steps: LLMChainStep[], llmId: DLLMId | undefined, chainInput: string | undefined) {
const [chain, setChain] = React.useState<ChainState | null>(null);
const [error, setError] = React.useState<string | null>(null);
const chainAbortController = React.useRef(new AbortController());
// restart Chain on inputs change
React.useEffect(() => {
// abort any ongoing chain, if any
chainAbortController.current.abort();
chainAbortController.current = new AbortController();
setChain(null);
// error if no LLM
setError(!llmId ? 'LLM not provided' : null);
// abort if no input
if (!chainInput || !llmId)
return;
// start the chain
setChain(initChainState(llmId, chainInput, steps));
return () => chainAbortController.current.abort();
}, [chainInput, llmId, steps]);
// perform Step on Chain update
React.useEffect(() => {
// skip step if the chain has been aborted
const _chainAbortController = chainAbortController.current;
if (_chainAbortController.signal.aborted) return;
// skip if there is no chain
if (!chain || !llmId) return;
// skip if no next unprocessed step
const stepIdx = chain.steps.findIndex((step) => !step.isComplete);
if (stepIdx === -1) return;
// safety check (re-processing the same step shall never happen)
const chainStep = chain.steps[stepIdx];
if (chainStep.output)
return console.log('WARNING - Output overlap - why is this happening?', chainStep);
// execute step instructions
let llmChatInput: VChatMessageIn[] = [...chain.chatHistory];
const instruction = chainStep.ref;
if (instruction.setSystem) {
llmChatInput = llmChatInput.filter((msg) => msg.role !== 'system');
llmChatInput.unshift({ role: 'system', content: instruction.setSystem });
}
if (instruction.addUserInput)
llmChatInput.push({ role: 'user', content: implodeText(chain.input, chain.safeInputLength) });
if (instruction.addPrevAssistant && stepIdx > 0)
llmChatInput.push({ role: 'assistant', content: implodeText(chain.steps[stepIdx - 1].output!, chain.safeInputLength) });
if (instruction.addUser)
llmChatInput.push({ role: 'user', content: instruction.addUser });
// monitor for cleanup before the result
let stepDone = false;
const stepAbortController = new AbortController();
const globalToStepListener = () => stepAbortController.abort('chain aborted');
_chainAbortController.signal.addEventListener('abort', globalToStepListener);
// LLM call
callChatGenerate(llmId, llmChatInput, chain.overrideResponseTokens)
.then(({ content }) => {
stepDone = true;
if (!stepAbortController.signal.aborted)
setChain(updateChainState(chain, llmChatInput, stepIdx, content));
})
.catch((err) => {
stepDone = true;
if (!stepAbortController.signal.aborted)
setError(`Transformation Error: ${err?.message || err?.toString() || err || 'unknown'}`);
});
// abort if unmounted before the LLM call ends, or if the full chain has been aborted
return () => {
if (!stepDone)
stepAbortController.abort('step aborted');
_chainAbortController.signal.removeEventListener('abort', globalToStepListener);
};
}, [chain, llmId]);
return {
isFinished: !!chain?.output,
isTransforming: !!chain?.steps?.length && !chain?.output && !error,
chainOutput: chain?.output ?? null,
chainProgress: chain?.progress ?? 0,
chainStepName: chain?.steps?.find((step) => !step.isComplete)?.ref.name ?? null,
chainIntermediates: chain?.steps?.map((step) => step.output ?? null)?.filter(out => out) ?? [],
chainError: error,
abortChain: () => {
chainAbortController.current.abort('user canceled');
setError('Canceled');
},
};
}
interface ChainState {
steps: StepState[];
chatHistory: VChatMessageIn[];
progress: number;
safeInputLength: number;
overrideResponseTokens: number;
input: string;
output: string | null;
}
interface StepState {
ref: LLMChainStep;
output?: string;
isComplete: boolean;
isLast: boolean;
}
function initChainState(llmId: DLLMId, input: string, steps: LLMChainStep[]): ChainState {
// max token allocation fo the job
const { llms } = useModelsStore.getState();
const llm = llms.find(llm => llm.id === llmId);
if (!llm)
throw new Error(`LLM ${llmId} not found`);
const maxTokens = llm.contextTokens;
const overrideResponseTokens = Math.floor(maxTokens / 3);
const inputTokens = maxTokens - overrideResponseTokens;
const safeInputLength = Math.floor(inputTokens * 2); // it's deemed around 4
return {
steps: steps.map((step, i) => ({
ref: step,
output: undefined,
isComplete: false,
isLast: i === steps.length - 1,
})),
chatHistory: [],
overrideResponseTokens,
safeInputLength,
progress: 0,
input: input,
output: null,
};
}
function updateChainState(chain: ChainState, history: VChatMessageIn[], stepIdx: number, output: string): ChainState {
const steps = chain.steps.length;
return {
...chain,
steps: chain.steps.map((step, i) =>
(i === stepIdx) ? {
...step,
output: output,
isComplete: true,
} : step),
chatHistory: history,
progress: Math.round(100 * (stepIdx + 1) / steps) / 100,
output: (stepIdx === steps - 1) ? output : null,
};
}
function implodeText(text: string, maxLength: number) {
if (text.length <= maxLength) return text;
const halfLength = Math.floor(maxLength / 2);
return `${text.substring(0, halfLength)}\n...\n${text.substring(text.length - halfLength)}`;
}
-108
View File
@@ -1,108 +0,0 @@
// noinspection ExceptionCaughtLocallyJS
import { TRPCError } from '@trpc/server';
import { z } from 'zod';
import { createTRPCRouter, publicProcedure } from '~/modules/trpc/trpc.server';
const inputSchema = z.object({
videoId: z.string().nonempty(),
});
const youtubeTranscriptionSchema = z.object({
wireMagic: z.literal('pb3'),
events: z.array(
z.object({
tStartMs: z.number(),
dDurationMs: z.number().optional(),
aAppend: z.number().optional(),
segs: z.array(
z.object({
utf8: z.string(),
tOffsetMs: z.number().optional(),
}),
).optional(),
}),
),
});
export const ytPersonaRouter = createTRPCRouter({
/**
* Get the transcript for a YouTube video ID
*/
getTranscript: publicProcedure
.input(inputSchema)
.query(async ({ input }) => {
const { videoId } = input;
try {
// 1. find the cpations URL within the video HTML page
const data = await fetch(`https://www.youtube.com/watch?v=${videoId}`);
const html = await data.text();
const captionsUrlEnc = extractFromTo(html, 'https://www.youtube.com/api/timedtext', '"', 'Captions URL');
const captionsUrl = decodeURIComponent(captionsUrlEnc.replaceAll('\\u0026', '&'));
const thumbnailUrl = extractFromTo(html, 'https://i.ytimg.com/vi/', '"', 'Thumbnail URL').replaceAll('maxres', 'hq');
const videoTitle = extractFromTo(html, '<title>', '</title>', 'Video Title').slice(7).replaceAll(' - YouTube', '').trim();
// 2. fetch the captions
// note: the desktop player appends this much: &fmt=json3&xorb=2&xobt=3&xovt=3&cbr=Chrome&cbrver=114.0.0.0&c=WEB&cver=2.20230628.07.00&cplayer=UNIPLAYER&cos=Windows&cosver=10.0&cplatform=DESKTOP
const captionsData = await fetch(captionsUrl + `&fmt=json3`);
const captions = await captionsData.json();
const safeData = youtubeTranscriptionSchema.safeParse(captions);
if (!safeData.success) {
console.error(safeData.error);
throw new TRPCError({ code: 'INTERNAL_SERVER_ERROR', message: '[YouTube API Issue] Could not parse the captions' });
}
// 3. flatten to text
const transcript = safeData.data.events
.flatMap(event => event.segs ?? [])
.map(seg => seg.utf8)
.join('');
return {
videoId,
videoTitle,
thumbnailUrl,
transcript,
};
} catch (error: any) {
throw error instanceof TRPCError ? error
: new TRPCError({ code: 'INTERNAL_SERVER_ERROR', message: `[YouTube Transcript Issue] Error: ${error?.message || error?.toString() || 'unknown'}` });
}
}),
});
function extractFromTo(html: string, from: string, to: string, label: string): string {
const indexStart = html.indexOf(from);
const indexEnd = html.indexOf(to, indexStart);
if (indexStart < 0 || indexEnd <= indexStart)
throw new TRPCError({ code: 'BAD_REQUEST', message: `[YouTube API Issue] Could not find ${label}` });
return html.substring(indexStart, indexEnd);
}
/*
"Analyze the provided YouTube transcript, identifying and interpreting key characteristics such as the speaker's professional background, personality traits, style of communication, and core motivations, while specifically focusing on their age, industry knowledge, and narrative context. From this analysis, create a succinct yet comprehensive 'You are a...' character sheet that encapsulates the persona's multifaceted traits. Be sure to infuse the sheet with vivid illustrations drawn from the transcript that bring the character to life, equipping an actor with enough actionable insights for an accurate, engaging portrayal of the persona. The ultimate objective is to transform the text analysis into a tangible character, capturing the essence and complexities of the persona in one complete character sheet."
1. Analysis: "Conduct a comprehensive study of the YouTube transcript. Pinpoint and document key attributes of the speaker, such as age, professional expertise, standout personality traits, unique style of communication, narrative context, and levels of self-awareness. Scrutinize tone, language use, industry knowledge depth, humour usage, and motivations. Your deliverable is a detailed written analysis that effectively chronicles all aspects of the speaker's persona."
2. Character Sheet Drafting: "Translate the completed written analysis into a draft 'You are a...' character sheet. Ensure your draft covers all notable characteristics of the persona, including personality traits, professional background, communication style, knowledge base, context, self-awareness, and motivational aspects. The deliverable at this stage is a comprehensive draft of the character sheet."
3. Validation and Refinement: "Perform a detailed comparison of your character sheet draft and the original transcript. Ensure the sheet captures the speaker's essence and aligns with the transcript content. Integrate distinctive examples from the transcript for tangible, actionable references and refine as necessary for clarity and authenticity. Your final product is a perfected 'You are a...' character sheet, serving as a definitive guide for an actor embodying the persona."
1. Analysis: Conduct comprehensive research on the provided transcript. Identify key characteristics of the speaker, including age, professional field, distinct personality traits, style of communication, narrative context, and self-awareness. Additionally, consider any unique aspects such as their use of humor, their cultural background, core values, passions, fears, personal history, and social interactions. Your output for this stage is an in-depth written analysis that exhibits an understanding of both the superficial and more profound aspects of the speaker's persona.
2. Character Sheet Drafting: Craft your documented analysis into a draft of the 'You are a...' character sheet. It should encapsulate all crucial personality dimensions, along with the motivations and aspirations of the persona. Keep in mind to balance succinctness and depth of detail for each dimension. The deliverable here is a comprehensive draft of the character sheet that captures the speaker's unique essence.
3. Validation and Refinement: Compare the draft character sheet with the original transcript, validating its content and ensuring it captures both the speakers overt characteristics and the subtler undertones. Fine-tune any areas that require clarity, have been overlooked, or require more authenticity. Use clear and illustrative examples from the transcript to refine your sheet and offer meaningful, tangible reference points. Your finalized deliverable is a coherent, comprehensive, and nuanced 'You are a...' character sheet that serves as a go-to guide for an actor recreating the persona.
*/
-87
View File
@@ -1,87 +0,0 @@
import * as React from 'react';
import { Button, Divider, Tab, TabList, TabPanel, Tabs, useTheme } from '@mui/joy';
import { tabClasses } from '@mui/joy/Tab';
import BuildCircleIcon from '@mui/icons-material/BuildCircle';
import { ElevenlabsSettings } from '~/modules/elevenlabs/ElevenlabsSettings';
import { ProdiaSettings } from '~/modules/prodia/ProdiaSettings';
import { GoodModal } from '~/common/components/GoodModal';
import { useUIStateStore } from '~/common/state/store-ui';
import { ToolsSettings } from './ToolsSettings';
import { UISettings } from './UISettings';
/**
* Component that allows the User to modify the application settings,
* persisted on the client via localStorage.
*/
export function SettingsModal() {
// external state
const theme = useTheme();
const { settingsOpenTab, closeSettings, openModelsSetup } = useUIStateStore();
const tabFixSx = { fontFamily: theme.fontFamily.body, flex: 1, p: 0, m: 0 };
return (
<GoodModal title={`Preferences`} open={!!settingsOpenTab} onClose={closeSettings}
startButton={
<Button variant='soft' color='success' onClick={openModelsSetup} startDecorator={<BuildCircleIcon />} sx={{
'--Icon-fontSize': 'var(--joy-fontSize-xl2)',
}}>
Models
</Button>
}
sx={{ p: { xs: 1, sm: 2, lg: 2.5 } }}>
{/*<Divider />*/}
<Tabs aria-label='Settings tabbed menu' defaultValue={settingsOpenTab}>
<TabList
variant='soft'
disableUnderline
sx={{
'--ListItem-minHeight': '2.4rem',
mb: 2,
p: 0.5,
borderRadius: 'lg',
fontSize: 'md',
gap: 1,
overflow: 'hidden',
[`& .${tabClasses.root}[aria-selected="true"]`]: {
bgcolor: 'background.surface',
boxShadow: 'sm',
fontWeight: 'lg',
},
}}
>
<Tab disableIndicator value={1} sx={tabFixSx}>Chat</Tab>
<Tab disableIndicator value={2} sx={tabFixSx}>Draw</Tab>
<Tab disableIndicator value={3} sx={tabFixSx}>Speak</Tab>
<Tab disableIndicator value={4} sx={tabFixSx}>Tools</Tab>
</TabList>
<TabPanel value={1} sx={{ p: 'var(--Tabs-gap)' }}>
<UISettings />
</TabPanel>
<TabPanel value={2} sx={{ p: 'var(--Tabs-gap)' }}>
<ProdiaSettings />
</TabPanel>
<TabPanel value={3} sx={{ p: 'var(--Tabs-gap)' }}>
<ElevenlabsSettings />
</TabPanel>
<TabPanel value={4} sx={{ p: 'var(--Tabs-gap)' }}>
<ToolsSettings />
</TabPanel>
</Tabs>
<Divider />
</GoodModal>
);
}
-24
View File
@@ -1,24 +0,0 @@
import * as React from 'react';
import { FormHelperText, Stack } from '@mui/joy';
import { GoogleSearchSettings } from '~/modules/google/GoogleSearchSettings';
import { settingsGap } from '~/common/theme';
export function ToolsSettings() {
return (
<Stack direction='column' sx={{ gap: settingsGap }}>
<FormHelperText>
🛠 Tools enable additional capabilities if enabled and correctly configured
</FormHelperText>
<GoogleSearchSettings />
</Stack>
);
}
-123
View File
@@ -1,123 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, FormControl, FormHelperText, FormLabel, Radio, RadioGroup, Stack, Switch } from '@mui/joy';
import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined';
import ScienceIcon from '@mui/icons-material/Science';
import TelegramIcon from '@mui/icons-material/Telegram';
import WidthNormalIcon from '@mui/icons-material/WidthNormal';
import WidthWideIcon from '@mui/icons-material/WidthWide';
import { Link } from '~/common/components/Link';
import { hideOnMobile, settingsGap } from '~/common/theme';
import { isPwa } from '~/common/util/pwaUtils';
import { useUIPreferencesStore, useUIStateStore } from '~/common/state/store-ui';
export function UISettings() {
// external state
const {
centerMode, setCenterMode,
doubleClickToEdit, setDoubleClickToEdit,
enterToSend, setEnterToSend,
experimentalLabs, setExperimentalLabs,
renderMarkdown, setRenderMarkdown,
zenMode, setZenMode,
} = useUIPreferencesStore(state => ({
centerMode: state.centerMode, setCenterMode: state.setCenterMode,
doubleClickToEdit: state.doubleClickToEdit, setDoubleClickToEdit: state.setDoubleClickToEdit,
enterToSend: state.enterToSend, setEnterToSend: state.setEnterToSend,
experimentalLabs: state.experimentalLabs, setExperimentalLabs: state.setExperimentalLabs,
renderMarkdown: state.renderMarkdown, setRenderMarkdown: state.setRenderMarkdown,
zenMode: state.zenMode, setZenMode: state.setZenMode,
}), shallow);
const { closeSettings } = useUIStateStore(state => ({ closeSettings: state.closeSettings }), shallow);
const handleCenterModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setCenterMode(event.target.value as 'narrow' | 'wide' | 'full' || 'wide');
const handleEnterToSendChange = (event: React.ChangeEvent<HTMLInputElement>) => setEnterToSend(event.target.checked);
const handleDoubleClickToEditChange = (event: React.ChangeEvent<HTMLInputElement>) => setDoubleClickToEdit(event.target.checked);
const handleZenModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setZenMode(event.target.value as 'clean' | 'cleaner');
const handleRenderMarkdownChange = (event: React.ChangeEvent<HTMLInputElement>) => setRenderMarkdown(event.target.checked);
const handleExperimentalLabsChange = (event: React.ChangeEvent<HTMLInputElement>) => setExperimentalLabs(event.target.checked);
return (
<Stack direction='column' sx={{ gap: settingsGap }}>
{!isPwa() && <FormControl orientation='horizontal' sx={{ ...hideOnMobile, alignItems: 'center', justifyContent: 'space-between' }}>
<Box>
<FormLabel>Centering</FormLabel>
<FormHelperText>{centerMode === 'full' ? 'Full screen chat' : centerMode === 'narrow' ? 'Narrow chat' : 'Wide'}</FormHelperText>
</Box>
<RadioGroup orientation='horizontal' value={centerMode} onChange={handleCenterModeChange}>
<Radio value='narrow' label={<WidthNormalIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
<Radio value='wide' label={<WidthWideIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
<Radio value='full' label='Full' />
</RadioGroup>
</FormControl>}
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel>Enter to send</FormLabel>
<FormHelperText>{enterToSend ? <>Sends message<TelegramIcon /></> : 'New line'}</FormHelperText>
</Box>
<Switch checked={enterToSend} onChange={handleEnterToSendChange}
endDecorator={enterToSend ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel>Double click to edit</FormLabel>
<FormHelperText>{doubleClickToEdit ? 'Double click' : 'Three dots'}</FormHelperText>
</Box>
<Switch checked={doubleClickToEdit} onChange={handleDoubleClickToEditChange}
endDecorator={doubleClickToEdit ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel>Markdown</FormLabel>
<FormHelperText>{renderMarkdown ? 'Render markdown' : 'As text'}</FormHelperText>
</Box>
<Switch checked={renderMarkdown} onChange={handleRenderMarkdownChange}
endDecorator={renderMarkdown ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
<FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
<Box>
<FormLabel>Appearance</FormLabel>
<FormHelperText>{zenMode === 'clean' ? 'Show senders' : 'Minimal UI'}</FormHelperText>
</Box>
<RadioGroup orientation='horizontal' value={zenMode} onChange={handleZenModeChange}>
{/*<Radio value='clean' label={<Face6Icon sx={{ width: 24, height: 24, mt: -0.25 }} />} />*/}
<Radio value='clean' label='Clean' />
<Radio value='cleaner' label='Zen' />
</RadioGroup>
</FormControl>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel component={Link} href='/labs' onClick={closeSettings}>
<u>Experiments</u>
<InfoOutlinedIcon sx={{ mx: 0.5 }} />
</FormLabel>
<FormHelperText>{experimentalLabs ? <>Enabled <ScienceIcon /></> : 'Disabled'}</FormHelperText>
</Box>
<Switch checked={experimentalLabs} onChange={handleExperimentalLabsChange}
endDecorator={experimentalLabs ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
</Stack>
);
}
+52
View File
@@ -0,0 +1,52 @@
import * as React from 'react';
import { Box, Button, Modal, ModalClose, ModalDialog, ModalOverflow, Typography } from '@mui/joy';
import { ElevenlabsSettings } from '@/modules/elevenlabs/ElevenlabsSettings';
import { OpenAIAdvancedSettings } from '@/modules/openai/OpenAIAdvancedSettings';
import { OpenAISettings } from '@/modules/openai/OpenAISettings';
import { ProdiaSettings } from '@/modules/prodia/ProdiaSettings';
import { SearchSettings } from '@/modules/search/SearchSettings';
import { UISettings } from './UISettings';
/**
* Component that allows the User to modify the application settings,
* persisted on the client via localStorage.
*
* @param {boolean} open Whether the Settings modal is open
* @param {() => void} onClose Call this to close the dialog from outside
*/
export function SettingsModal({ open, onClose }: { open: boolean, onClose: () => void; }) {
return (
<Modal open={open} onClose={onClose}>
<ModalOverflow>
<ModalDialog sx={{ maxWidth: 500, display: 'flex', p: { xs: 1, sm: 2, lg: '20px' } }}>
<Typography level='h6' sx={{ mb: 2 }}>Settings</Typography>
<ModalClose />
<OpenAISettings />
<UISettings />
<ElevenlabsSettings />
<ProdiaSettings />
<SearchSettings />
<OpenAIAdvancedSettings />
<Box sx={{ mt: 4, display: 'flex', justifyContent: 'flex-end' }}>
<Button variant='solid' onClick={onClose}>
Close
</Button>
</Box>
</ModalDialog>
</ModalOverflow>
</Modal>
);
}
+145
View File
@@ -0,0 +1,145 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, FormControl, FormHelperText, FormLabel, Option, Radio, RadioGroup, Select, Stack, Switch, Tooltip } from '@mui/joy';
import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
import WidthNormalIcon from '@mui/icons-material/WidthNormal';
import WidthWideIcon from '@mui/icons-material/WidthWide';
import { Section } from '@/common/components/Section';
import { hideOnMobile, settingsGap } from '@/common/theme';
import { useSettingsStore } from '@/common/state/store-settings';
// languages is defined as a JSON file
import languages from './languages.json' assert { type: 'json' };
function LanguageSelect() {
// external state
const { preferredLanguage, setPreferredLanguage } = useSettingsStore(state => ({ preferredLanguage: state.preferredLanguage, setPreferredLanguage: state.setPreferredLanguage }), shallow);
const handleLanguageChanged = (event: any, newValue: string | null) => {
if (!newValue) return;
setPreferredLanguage(newValue as string);
// NOTE: disabled, to make sure the code can be adapted at runtime - will re-enable to trigger translations, if not dynamically switchable
//if (typeof window !== 'undefined')
// window.location.reload();
};
const languageOptions = React.useMemo(() => Object.entries(languages).map(([language, localesOrCode]) =>
typeof localesOrCode === 'string'
? (
<Option key={localesOrCode} value={localesOrCode}>
{language}
</Option>
) : (
Object.entries(localesOrCode).map(([country, code]) => (
<Option key={code} value={code}>
{`${language} (${country})`}
</Option>
))
)), []);
return (
<Select value={preferredLanguage} onChange={handleLanguageChanged}
indicator={<KeyboardArrowDownIcon />}
slotProps={{
root: { sx: { minWidth: 200 } },
indicator: { sx: { opacity: 0.5 } },
}}>
{languageOptions}
</Select>
);
}
export function UISettings() {
// external state
const {
centerMode, setCenterMode,
renderMarkdown, setRenderMarkdown,
showPurposeFinder, setShowPurposeFinder,
zenMode, setZenMode,
} = useSettingsStore(state => ({
centerMode: state.centerMode, setCenterMode: state.setCenterMode,
renderMarkdown: state.renderMarkdown, setRenderMarkdown: state.setRenderMarkdown,
showPurposeFinder: state.showPurposeFinder, setShowPurposeFinder: state.setShowPurposeFinder,
zenMode: state.zenMode, setZenMode: state.setZenMode,
}), shallow);
const handleCenterModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setCenterMode(event.target.value as 'narrow' | 'wide' | 'full' || 'wide');
const handleZenModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setZenMode(event.target.value as 'clean' | 'cleaner');
const handleRenderMarkdownChange = (event: React.ChangeEvent<HTMLInputElement>) => setRenderMarkdown(event.target.checked);
const handleShowSearchBarChange = (event: React.ChangeEvent<HTMLInputElement>) => setShowPurposeFinder(event.target.checked);
return (
<Section>
<Stack direction='column' sx={{ gap: settingsGap }}>
<FormControl orientation='horizontal' sx={{ ...hideOnMobile, alignItems: 'center', justifyContent: 'space-between' }}>
<Box>
<FormLabel>Centering</FormLabel>
<FormHelperText>{centerMode === 'full' ? 'Full screen' : centerMode === 'narrow' ? 'Narrow' : 'Wide'} chat</FormHelperText>
</Box>
<RadioGroup orientation='horizontal' value={centerMode} onChange={handleCenterModeChange}>
<Radio value='narrow' label={<WidthNormalIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
<Radio value='wide' label={<WidthWideIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
<Radio value='full' label='Full' />
</RadioGroup>
</FormControl>
<FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
<Box>
<FormLabel>Appearance</FormLabel>
<FormHelperText>{zenMode === 'clean' ? 'Show senders' : 'Hide senders and menus'}</FormHelperText>
</Box>
<RadioGroup orientation='horizontal' value={zenMode} onChange={handleZenModeChange}>
{/*<Radio value='clean' label={<Face6Icon sx={{ width: 24, height: 24, mt: -0.25 }} />} />*/}
<Radio value='clean' label='Clean' />
<Radio value='cleaner' label='Empty' />
</RadioGroup>
</FormControl>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel>Markdown</FormLabel>
<FormHelperText>{renderMarkdown ? 'Render markdown' : 'Text only'}</FormHelperText>
</Box>
<Switch checked={renderMarkdown} onChange={handleRenderMarkdownChange}
endDecorator={renderMarkdown ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
<Box>
<FormLabel>Purpose finder</FormLabel>
<FormHelperText>{showPurposeFinder ? 'Show search bar' : 'Hide search bar'}</FormHelperText>
</Box>
<Switch checked={showPurposeFinder} onChange={handleShowSearchBarChange}
endDecorator={showPurposeFinder ? 'On' : 'Off'}
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
</FormControl>
<FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
<Box>
<Tooltip title='Currently for Microphone input and Voice output. Microphone support varies by browser (iPhone/Safari lacks speech input). We will use the ElevenLabs MultiLanguage model if a language other than English is selected.'>
<FormLabel>
Language <InfoOutlinedIcon sx={{ mx: 0.5 }} />
</FormLabel>
</Tooltip>
<FormHelperText>
Speech input
</FormHelperText>
</Box>
<LanguageSelect />
</FormControl>
</Stack>
</Section>
);
}
+12 -10
View File
@@ -2,27 +2,29 @@
* Application Identity (Brand)
*
* Also note that the 'Brand' is used in the following places:
* - README.md all over
* - package.json app-slug and version
* - [public/manifest.json] name, short_name, description, theme_color, background_color
* - README.md all over
* - package.json app-slug and version
* - public/manifest.json name, short_name, description, theme_color, background_color
*/
export const Brand = {
// Name: 'big-AGI',
// UpperName: 'BIG-AGI',
Title: {
Base: 'big-AGI',
Common: (process.env.NODE_ENV === 'development' ? '[DEV] ' : '') + 'big-AGI',
},
Meta: {
Description: 'Leading open-source AI web interface to help you learn, think, and do. AI personas, superior privacy, advanced features, and fun UX.',
SiteName: 'big-AGI | Harnessing AI for You',
ThemeColor: '#32383E',
TwitterSite: '@enricoros',
SiteName: 'big-AGI',
Title: 'big-AGI: Personal AGI App',
Description: 'big-AGI is a free, open-source project to build a general artificial intelligence (AGI) that can solve any problem.',
Keywords: 'artificial general intelligence, agi, openai, gpt-4, ai personas, code execution, pdf import, voice i/o, ai chat, artificial intelligence',
ThemeColor: '#434356',
TwitterSite: '@bigagienergy',
},
URIs: {
// Slug: 'big-agi',
Home: 'https://big-agi.com',
// App: 'https://get.big-agi.com',
CardImage: 'https://big-agi.com/icons/card-dark-1200.png',
OpenRepo: 'https://github.com/enricoros/big-agi',
SupportInvite: 'https://discord.gg/MkH4qj2Jp9',
// Twitter: 'https://www.twitter.com/enricoros',
},
};
-93
View File
@@ -1,93 +0,0 @@
import * as React from 'react';
import { KeyboardEvent } from 'react';
import { ClickAwayListener, Popper, PopperPlacementType } from '@mui/base';
import { MenuList, styled, VariantProp } from '@mui/joy';
import { SxProps } from '@mui/system';
// adds the 'sx' prop to the Popper, and defaults zIndex to 1000
const Popup = styled(Popper)({
zIndex: 1000,
});
/**
* Workaround to the Menu in Joy 5-beta.0.
*
* This component addresses major changes in the Menu component in Joy 5-beta.0:
* - missing callback for onClose
* - clickaway listener not working
* - dynamic menus unsupported
* - ...
*/
export function CloseableMenu(props: {
open: boolean, anchorEl: HTMLElement | null, onClose: () => void,
variant?: VariantProp,
// color?: ColorPaletteProp,
// size?: 'sm' | 'md' | 'lg',
placement?: PopperPlacementType,
maxHeightGapPx?: number,
noTopPadding?: boolean,
noBottomPadding?: boolean,
sx?: SxProps,
zIndex?: number,
children?: React.ReactNode,
}) {
const handleClose = (event: MouseEvent | TouchEvent | KeyboardEvent) => {
event.stopPropagation();
props.onClose();
};
const handleListKeyDown = (event: KeyboardEvent) => {
if (event.key === 'Tab') {
handleClose(event);
} else if (event.key === 'Escape') {
if (props.anchorEl)
props.anchorEl?.focus();
handleClose(event);
}
};
return (
<Popup
role={undefined}
open={props.open && props.anchorEl !== null}
anchorEl={props.anchorEl}
placement={props.placement}
disablePortal={false}
modifiers={[{
name: 'offset',
options: {
offset: [0, 4],
},
}]}
sx={props.zIndex
? { zIndex: props.zIndex }
: {}
}
>
<ClickAwayListener onClickAway={handleClose}>
<MenuList
variant={props.variant}
// color={props.color}
onKeyDown={handleListKeyDown}
sx={{
'--Icon-fontSize': 'var(--joy-fontSize-xl2)',
'--ListItem-minHeight': '3rem',
'--ListItemDecorator-size': '2.75rem',
backgroundColor: 'background.popup',
boxShadow: 'md',
...(props.maxHeightGapPx !== undefined ? { maxHeight: `calc(100dvh - ${props.maxHeightGapPx}px)`, overflowY: 'auto' } : {}),
...(props.noTopPadding ? { pt: 0 } : {}),
...(props.noBottomPadding ? { pb: 0 } : {}),
...(props.sx || {}),
}}
>
{props.children}
</MenuList>
</ClickAwayListener>
</Popup>
);
}

Some files were not shown because too many files have changed in this diff Show More