Compare commits

..

4 Commits

Author SHA1 Message Date
Enrico Ros 3a9a6b0273 Merge branch 'jondwillis-feature/auth' into jondwillis-feature/auth-merged 2023-05-01 23:44:07 -07:00
Enrico Ros 3b51c39fc3 Small bits 2023-05-01 22:13:34 -07:00
Enrico Ros 05293ba557 Merge branch 'feature/auth' of https://github.com/jondwillis/nextjs-chatgpt-app into jondwillis-feature/auth 2023-05-01 22:13:02 -07:00
jon d18d5323aa auth squash and rebase 2023-04-11 13:25:39 -07:00
337 changed files with 7473 additions and 21288 deletions
-38
View File
@@ -1,38 +0,0 @@
# big-AGI non-code files
/docs/
README.md
# Node build artifacts
/node_modules
/.pnp
.pnp.js
# next.js
/.next/
/out/
# production
/build
# versioning
.git/
.github/
# IDEs
.idea/
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts
+33
View File
@@ -0,0 +1,33 @@
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
OPENAI_API_KEY=
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
OPENAI_API_HOST=
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
OPENAI_API_ORG_ID=
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# [Optional] Prodia credentials on the server side - for optional image generation
PRODIA_API_KEY=
# [Optional, Search] Google Cloud API Key
# https://console.cloud.google.com/apis/credentials -
GOOGLE_CLOUD_API_KEY=
# [Optional, Search] Google Custom/Programmable Search Engine ID
# https://programmablesearchengine.google.com/
GOOGLE_CSE_ID=
# see docs/authentication.md to configure this section
AUTH_TYPE=
# [At least one required if AUTH_TYPE == credential] You may declare credentials for users from 0 to 99.
AUTH_USER_0=
AUTH_PASSWORD_0=
# [Required if AUTH_TYPE == basic and not in development mode] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=
# [Required if AUTH_TYPE == basic] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=
-13
View File
@@ -1,13 +0,0 @@
# These are supported funding model platforms
github: enricoros # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
-25
View File
@@ -1,25 +0,0 @@
---
name: Bug report
about: Omg what's happening?
title: "[BUG]"
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
Where is it happening?
- Which device [Mobile/Desktop, os version]:
- Which browser:
- Which website:
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots / context**
If applicable, please add screenshots or additional context
@@ -1,33 +0,0 @@
---
name: Maintainers-Release
about: Maintainers
title: Release 1.2.3
labels: ''
assignees: enricoros
---
Release checklist:
- [ ] Update the [Roadmap](https://github.com/users/enricoros/projects/4/views/2) calling out shipped features
- [ ] Create and update a [Milestone](https://github.com/enricoros/big-agi/milestones) for the release
- [ ] Assign this task
- [ ] Assign all the shipped roadmap Issues
- [ ] Assign the relevant [recently closed Isssues](https://github.com/enricoros/big-agi/issues?q=is%3Aclosed+sort%3Aupdated-desc)
- Code changes:
- [ ] Create a release branch 'release-x.y.z', and commit:
- [ ] Update the release version in package.json, and `npm i`
- [ ] Update in-app News [src/apps/news/news.data.tsx](src/apps/news/news.data.tsx)
- [ ] Update the in-app News version number
- [ ] Update the readme with the new release
- [ ] Copy the highlights to the [changelog](docs/changelog.md)
- Release:
- [ ] merge onto main
- [ ] verify deployment on Vercel
- [ ] verify container on GitHub Packages
- create a GitHub release
- [ ] name it 'vX.Y.Z'
- [ ] copy the release notes and link appropriate artifacts
- Announce:
- [ ] Discord announcement
- [ ] Twitter announcement
-17
View File
@@ -1,17 +0,0 @@
---
name: Roadmap request
about: Suggest a roadmap item
title: "[Roadmap]"
labels: ''
assignees: ''
---
**Why**
The reason behind the request - we love it to be framed for "users will be able to do x" rather than quick-aging hype-tech-of-the-day requests
**Concise description**
A clear and concise description of what you want to happen.
**Requirements**
If you can, please detail the changes you expect in UX, user workflows, technology, architecture (if not, the reviewers will do it for you)
+1 -2
View File
@@ -26,8 +26,7 @@ yarn-error.log*
.pnpm-debug.log*
# local env files
.env
.env.*
.env*.local
# vercel
.vercel
+1 -2
View File
@@ -1,7 +1,6 @@
{
"singleAttributePerLine": false,
"singleQuote": true,
"trailingComma": "all",
"endOfLine": "lf",
"printWidth": 160
}
}
+24 -39
View File
@@ -1,56 +1,41 @@
# Base
FROM node:18-alpine AS base
ENV NEXT_TELEMETRY_DISABLED 1
# Test
FROM node:18-alpine as test-target
ENV NODE_ENV=development
ENV PATH $PATH:/usr/src/app/node_modules/.bin
# Dependencies
FROM base AS deps
WORKDIR /app
WORKDIR /usr/src/app
# Dependency files
COPY package*.json ./
COPY prisma ./prisma
# Install dependencies, including dev (release builds should use npm ci)
ENV NODE_ENV development
RUN npm ci
# CI and release builds should use npm ci to fully respect the lockfile.
# Local development may use npm install for opportunistic package updates.
ARG npm_install_command=ci
RUN npm $npm_install_command
# Builder
FROM base AS builder
WORKDIR /app
# Copy development deps and source
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Build the application
ENV NODE_ENV production
# Build
FROM test-target as build-target
ENV NODE_ENV=production
# Use build tools, installed as development packages, to produce a release build.
RUN npm run build
# Reduce installed packages to production-only
# Reduce installed packages to production-only.
RUN npm prune --production
# Runner
FROM base AS runner
WORKDIR /app
# Archive
FROM node:18-alpine as archive-target
ENV NODE_ENV=production
ENV PATH $PATH:/usr/src/app/node_modules/.bin
# As user
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
WORKDIR /usr/src/app
# Copy Built app
COPY --from=builder --chown=nextjs:nodejs /app/public public
COPY --from=builder --chown=nextjs:nodejs /app/.next .next
COPY --from=builder --chown=nextjs:nodejs /app/node_modules node_modules
# Minimal ENV for production
ENV NODE_ENV production
ENV PATH $PATH:/app/node_modules/.bin
# Run as non-root user
USER nextjs
# Include only the release build and production packages.
COPY --from=build-target /usr/src/app/node_modules node_modules
COPY --from=build-target /usr/src/app/.next .next
# Expose port 3000 for the application to listen on
EXPOSE 3000
# Start the application
CMD ["next", "start"]
CMD ["next", "start"]
+81 -79
View File
@@ -1,63 +1,37 @@
# BIG-AGI 🧠✨
# `BIG-AGI` 🤖💬
Welcome to big-AGI 👋, the GPT application for Pro users that combines utility,
simplicity, and speed. Powered by the latest models from 7 vendors, including
open-source, `big-AGI` offers best-in-class Voice and Chat with AI Personas,
visualizations, coding, drawing, calling, and quite more -- all in a polished UX.
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
Pros use big-AGI. 🚀 Developers love big-AGI. 🤖
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=launch)](https://big-agi.com)
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=demo)](https://big-agi.com)
Or fork & run on Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
## 🗺️ Explore the Roadmap
The development of big-AGI is an open book. Our **[public roadmap](https://github.com/users/enricoros/projects/4/views/2)** is
live, providing a detailed look at the current and future development of the application.
- Got a suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
- Want to contribute? [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
### What's New in 1.5.0 🌟
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
- **Visualization Tool**: Create data representations with our new visualization capabilities
- **Ollama Local Models**: Leverage local models support with our comprehensive guide
- **Text Tools**: Enjoy tools including highlight differences to refine your content
- **Mermaid Diagramming**: Render complex diagrams with our Mermaid language support
- **OpenAI 1106 Chat Models**: Experience the cutting-edge capabilities of the latest OpenAI models
- **SDXL Support**: Enhance your image generation with SDXL support for Prodia
- **Cloudflare OpenAI API Gateway**: Integrate with Cloudflare for a robust API gateway
- **Helicone for Anthropic**: Utilize Helicone's tools for Anthropic models
Check out the [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), or
the [past releases changelog](docs/changelog.md).
## ✨ Key Features 👊
## Useful 👊
![Ask away, paste a ton, copy the gems](docs/pixels/big-AGI-compo1.png)
[More](docs/pixels/big-AGI-compo2b.png), [screenshots](docs/pixels).
- **AI Personas**: Tailor your AI interactions with customizable personas
- **Sleek UI/UX**: A smooth, intuitive, and mobile-responsive interface
- **Efficient Interaction**: Voice commands, OCR, and drag-and-drop file uploads
- **Multiple AI Models**: Choose from a variety of leading AI providers
- **Privacy First**: Self-host and use your own API keys for full control
- **Advanced Tools**: Execute code, import PDFs, and summarize documents
- **Seamless Integrations**: Enhance functionality with various third-party services
- **Open Roadmap**: Contribute to the progress of big-AGI
- Engaging AI Personas
- Clean UX, w/ tokens counters
- Privacy: user-owned API keys and localStorage
- Human I/O: Advanced voice support (TTS, STT)
- Machine I/O: PDF import & Summarization, code execution
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
- Coming up: automatic-AGI reasoning
## 💖 Support
## Support 🙌
[//]: # ([![Official Discord](https://img.shields.io/discord/1098796266906980422?label=discord&logo=discord&logoColor=%23fff&style=for-the-badge)](https://discord.gg/MkH4qj2Jp9))
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
energy!
* send PRs! ...
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
@@ -66,7 +40,64 @@ the [past releases changelog](docs/changelog.md).
<br/>
## 🧩 Develop
## Latest Drops 🚀
#### 🚨 May: mature #big-agi-energy
- 🎉 **Authentication** basic user authentication framework
#### April: #big-agi-energy grows
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- 🎉 **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- 🎉 **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- 🎉 **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- 🎉 **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- 🎉 **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- 🎉 **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- 🎉 Chats: multiple chats, AI titles, Import/Export, Selection mode
- 🎉 Rendering: Markdown, SVG, improved Code blocks
- 🎉 Integrations: OpenAI organization ID
- 🎉 [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
[awesome-agi](https://github.com/enricoros/awesome-agi)
- 🎉 [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
<!-- p><a href="docs/pixels/gif_typing_040123.gif"><img src="docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
#### March: first release
- 🎉 **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- 🎉 **Syntax highlighting** - for multiple languages 🌈
- 🎉 **Code Execution: Sandpack
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
- 🎉 Real-time streaming of AI responses ⚡
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
- 🎉 Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- 🎉 Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- 🌙 Dark model - Wide mode ⛶
<br/>
### Basic Authentication for public deployments 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API key (`OPENAI_API_KEY`), you can [set up basic authentication.](/docs/authentication.md).
## Why this? 💡
Because the official Chat ___lacks important features___, is ___more limited than the api___, at times
___slow or unavailable___, and you cannot deploy it yourself, remix it, add features, or share it with
your friends.
Our users report that ___big-AGI is faster___, ___more reliable___, and ___features rich___
with features that matter to them.
![Much features, so fun](docs/pixels/big-AGI-compo2b.png)
## Code 🧩
![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=&logo=typescript&logoColor=white)
![React](https://img.shields.io/badge/React-61DAFB?style=&logo=react&logoColor=black)
@@ -81,44 +112,15 @@ npm install
npm run dev
```
The app will be running on `http://localhost:3000`
Now the app should be running on `http://localhost:3000`
Integrations:
### Integrations:
* Local models: Ollama, Oobabooga, LocalAi, etc.
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Settings > Advanced > API Host: 'oai.hconeai.com'
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
## 🐳 Deploy with Docker
For more detailed information on deploying with Docker, please refer to the [docker deployment documentation](docs/deploy-docker.md).
Build and run:
```bash
docker build -t big-agi .
docker run -d -p 3000:3000 big-agi
```
Or run the official container:
- manually: `docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi`
- or, with docker-compose: `docker-compose up`
## ☁️ Deploy on Cloudflare Pages
Please refer to the [Cloudflare deployment documentation](docs/deploy-cloudflare.md).
## 🚀 Deploy on Vercel
Create your GitHub fork, create a Vercel project over that fork, and deploy it. Or press the button below for convenience.
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
<br/>
This project is licensed under the MIT License.
@@ -130,4 +132,4 @@ This project is licensed under the MIT License.
[//]: # ([![GitHub issues]&#40;https://img.shields.io/github/issues/enricoros/big-agi&#41;]&#40;https://github.com/enricoros/big-agi/issues&#41;)
Made with 💙
Made with 💙
-52
View File
@@ -1,52 +0,0 @@
import { createEmptyReadableStream, safeErrorString, serverFetchOrThrow } from '~/server/wire';
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
/* NOTE: Why does this file even exist?
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
and that would force us to use base64 encoding for the audio data, which would be a waste of
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
and client-side vs. the tRPC implementation. So at lease we recycle the input structures.
*/
const handler = async (req: Request) => {
try {
// construct the upstream request
const {
elevenKey, text, voiceId, nonEnglish,
streaming, streamOptimization,
} = speechInputSchema.parse(await req.json());
const path = `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}` + (streaming ? `/stream?optimize_streaming_latency=${streamOptimization || 1}` : '');
const { headers, url } = elevenlabsAccess(elevenKey, path);
const body: ElevenlabsWire.TTSRequest = {
text: text,
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
};
// elevenlabs POST
const upstreamResponse: Response = await serverFetchOrThrow(url, 'POST', headers, body);
// NOTE: this is disabled, as we pass-through what we get upstream for speed, as it is not worthy
// to wait for the entire audio to be downloaded before we send it to the client
// if (!streaming) {
// const audioArrayBuffer = await upstreamResponse.arrayBuffer();
// return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
// }
// stream the data to the client
const audioReadableStream = upstreamResponse.body || createEmptyReadableStream();
return new Response(audioReadableStream, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error: any) {
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + error.cause : '');
console.log(`api/elevenlabs/speech: fetch issue: ${fetchOrVendorError}`);
return new Response(`[Issue] elevenlabs: ${fetchOrVendorError}`, { status: 500 });
}
};
export const runtime = 'edge';
export { handler as POST };
-2
View File
@@ -1,2 +0,0 @@
export const runtime = 'edge';
export { openaiStreamingRelayHandler as POST } from '~/modules/llms/transports/server/openai/openai.streaming';
-19
View File
@@ -1,19 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterEdge } from '~/server/api/trpc.router';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerEdgeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterEdge,
endpoint: '/api/trpc-edge',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'edge';
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
-19
View File
@@ -1,19 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterNode } from '~/server/api/trpc.router';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerNodeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterNode,
endpoint: '/api/trpc-node',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'nodejs';
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
-10
View File
@@ -1,10 +0,0 @@
version: '3.9'
services:
big-agi:
image: ghcr.io/enricoros/big-agi:main
ports:
- "3000:3000"
env_file:
- .env
command: [ "next", "start", "-p", "3000" ]
+37
View File
@@ -0,0 +1,37 @@
### Authentication with NextAuth.js 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API
key (`OPENAI_API_KEY`), you can set up basic authentication using [NextAuth.js](https://next-auth.js.org/).
#### Configuration
Update your `.env` file or Environment Variables with the following variables:
```
# [Optional] Set the authentication type to "credential" to enable basic username/password authentication
AUTH_TYPE=credential
# [Required if AUTH_TYPE == credential] Define credentials for users - you can declare up to 100 users
AUTH_USER_0=your_username
AUTH_PASSWORD_0=your_password
AUTH_USER_1=...
AUTH_PASSWORD_1=...
...
# [Required if AUTH_TYPE == credential and *not in development mode*] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=https://example.com
# [Required if AUTH_TYPE == credential] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=your_nextauth_secret
```
You can add multiple users by incrementing the index, e.g., `AUTH_USER_1`, `AUTH_PASSWORD_1`, and so on. They do not
need to be contiguous.
#### Usage
Once you have set up basic authentication, users will be prompted to enter their credentials when accessing the app.
Only users with valid credentials will be able to use the app and make requests to the OpenAI API.
For more information on configuring and using NextAuth.js, refer to
the [official documentation](https://next-auth.js.org/).
-86
View File
@@ -1,86 +0,0 @@
## Changelog
This is a high-level changelog. Calls out some of the high level features batched
by release.
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
### ✨ What's New in 1.5.0 👊 - Nov 19, 2023
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
- **Visualization Tool**: Create data representations with our new visualization capabilities
- **Ollama Local Models**: Leverage local models support with our comprehensive guide
- **Text Tools**: Enjoy tools including highlight differences to refine your content
- **Mermaid Diagramming**: Render complex diagrams with our Mermaid language support
- **OpenAI 1106 Chat Models**: Experience the cutting-edge capabilities of the latest OpenAI models
- **SDXL Support**: Enhance your image generation with SDXL support for Prodia
- **Cloudflare OpenAI API Gateway**: Integrate with Cloudflare for a robust API gateway
- **Helicone for Anthropic**: Utilize Helicone's tools for Anthropic models
### 1.4.0: Sept/Oct: scale OUT
- **Expanded Model Support**: Azure and [OpenRouter](https://openrouter.ai/docs#models) models, including gpt-4-32k
- **Share and clone** conversations with public links
- Removed the 20 chats hard limit ([Ashesh3](https://github.com/enricoros/big-agi/pull/158))
- Latex Rendering
- Augmented Chat modes (Labs)
### July/Aug: More Better Faster
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
- **Anthropic models** support, e.g. Claude
- **Backup/Restore** - save chats, and restore them later
- **[Local model support with Oobabooga server](../docs/config-local-oobabooga)** - run your own LLMs!
- **Flatten conversations** - conversations summarizer with 4 modes
- **Fork conversations** - create a new chat, to experiment with different endings
- New commands: /s to add a System message, and /a for an Assistant message
- New Chat modes: Write-only - just appends the message, without assistant response
- Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
- Fixes on the HTML block - particularly useful to see error pages
### June: scale UP
- **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
- **Cleaner UI** - with rationalized Settings, Modals, and Configurators
- **Dynamic Models Configurator** - easy connection with different model vendors
- **Multiple Model Vendors Support** framework to support many LLM vendors
- **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
- Support for GPT-4-32k
- Improved Dialogs and Messages
- Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
### April / May: more #big-agi-energy
- **[Google Search](../docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
Search
- **[Reason+Act](../docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- **[Image Generation](../docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- **[Voice Synthesis](../docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- **[Precise Token Counter](../docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- **[Install Mobile APP](../docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- **[UI language](../docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- **[SVG Drawing](../docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- Chats: multiple chats, AI titles, Import/Export, Selection mode
- Rendering: Markdown, SVG, improved Code blocks
- Integrations: OpenAI organization ID
- [Cloudflare deployment instructions](../docs/deploy-cloudflare.md),
[awesome-agi](https://github.com/enricoros/awesome-agi)
- [Typing Avatars](../docs/pixels/gif_typing_040123.gif) ⌨️
<!-- p><a href="../docs/pixels/gif_typing_040123.gif"><img src="../docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
### March: first release
- **[AI Personas](../docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- **Context** - Attach or [Drag & Drop files](../docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- **Syntax highlighting** - for multiple languages 🌈
- **Code Execution: Sandpack** -
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- Chat with GPT-4 and 3.5 Turbo 🧠💨
- Real-time streaming of AI responses ⚡
- **Voice Input** 🎙️ - works great on Chrome / Windows
- Integration: **[Paste.gg](../docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- 🌙 Dark model - Wide mode ⛶
-87
View File
@@ -1,87 +0,0 @@
# Configuring Azure OpenAI Service with `big-AGI`
The entire procedure takes about 5 minutes and involves creating an Azure account,
setting up the Azure OpenAI service, deploying models, and configuring `big-AGI`
to access these models.
Please note that Azure operates on a 'pay-as-you-go' pricing model and requires
credit card information tied to a 'subscription' to the Azure service.
## Configuring `big-AGI`
If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follows:
1. Launch the `big-AGI` application
2. Go to the **Models** settings
3. Add a Vendor and select **Azure OpenAI**
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
- Enter the API Key (e.g., 'fd5...........................ba')
The deployed models are now available in the application. If you don't have a configured
Azure OpenAI service instance, continue with the next section.
## Setting Up Azure
### Step 1: Azure Account & Subscription
1. Create an account on [azure.microsoft.com](https://azure.microsoft.com/en-us/)
2. Go to the [Azure Portal](https://portal.azure.com/)
3. Click on **Create a resource** in the top left corner
4. Search for **Subscription** and select **[Create Subscription](https://portal.azure.com/#create/Microsoft.Subscription)**
- Fill in the required fields and click on **Create**
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
### Step 2: Apply for Azure OpenAI Service
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
and acceptance should be quick (even as low as minutes).
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
2. Click on **Apply for access**
- Fill in the required fields (including the subscription ID) and click on **Apply**
Once your application is accepted, you can create OpenAI resources on Azure.
### Step 3: Create Azure OpenAI Resource
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
1. Click on **Create a resource** in the top left corner
2. Search for **OpenAI** and select **[Create OpenAI](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)**
3. Fill in the necessary fields on the **Create OpenAI** page
![Creating an OpenAI service](pixels/config-azure-openai-create.png)
- Select the subscription
- Select a resource group or create a new one
- Select the region. Note that the region determines the available models.
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
- Name the service (e.g., `your-openai-api-1234`)
- Select a pricing tier (e.g., `S0` for standard)
- Select: "All networks, including the internet, can access this resource."
- Click on **Review + create** and then **Create**
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
the OpenAI Service instance page to get this information.
- Click on **Go to resource**
- Click on **Develop**
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
- Copy `KEY 1`
### Step 4: Deploy Models
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
1. Click on **Model Deployments > Manage Deployments**
2. Click on **+Create New Deployment**
![Deploying a model](pixels/config-azure-openai-deploy.png)
- Select the model you want to deploy
- Optionally select a version
- name the model, e.g., `gpt4-32k-0613`
Repeat as necessary for each model you want to deploy.
## Resources
- [Azure OpenAI Service Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
- [Guide: Create an Azure OpenAI Resource](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
- [Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
-34
View File
@@ -1,34 +0,0 @@
# Local LLM integration with `localai`
Integrate local Large Language Models (LLMs) with [LocalAI](https://localai.io).
_Last updated Nov 7, 2023_
## Instructions
### LocalAI installation and configuration
Follow the guide at: https://localai.io/basics/getting_started/
For instance with [Use luna-ai-llama2 with docker compose](https://localai.io/basics/getting_started/#example-use-luna-ai-llama2-model-with-docker-compose):
- clone LocalAI
- get the model
- copy the prompt template
- start docker
- -> the server will be listening on `localhost:8080`
- verify it works by going to [http://localhost:8080/v1/models](http://localhost:8080/v1/models) on
your browser and seeing listed the model you downloaded
### Integrating LocalAI with big-AGI
- Go to Models > Add a model source of type: **LocalAI**
- Enter the address: `http://localhost:8080` (default)
- If running remotely, replace localhost with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- Select model & Chat
> NOTE: LocalAI does not list details about the mdoels. Every model is assumed to be
> capable of chatting, and with a context window of 4096 tokens.
> Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/transports/server/openai/models.data.ts)
> file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
-54
View File
@@ -1,54 +0,0 @@
# Local LLM Integration with `text-web-ui` :llama:
Integrate local Large Language Models (LLMs) with
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
_Last updated on Nov 7, 2023_
### Components
The implementation of local LLMs involves the following components:
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
## Instructions
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
### Text-web-ui Installation & Configuration:
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation).
- Download the one-click installer, extract it, and double-click on "start" - ~10 minutes
- Close it afterwards as we need to modify the startup flags
2. Enable the **openai extension**
- Edit `CMD_FLAGS.txt`
- Make sure that `--listen --extensions openai` is present and uncommented
3. Restart text-generation-webui
- Double-click on "start"
- You should see something like:
```
2023-11-07 21:24:26 INFO:Loading the extension "openai"...
2023-11-07 21:24:27 INFO:OpenAI compatible API URL:
http://0.0.0.0:5000/v1
```
- The OpenAI API is now running on port 5000, on both localhost (127.0.0.1) and your network IP address
4. Load your first model
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
- Switch to the **Model** tab
- Download, for instance, `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - 4.3 GB
- Select the model once it's loaded
### Integrating text-web-ui with big-AGI:
1. Integrating Text-Generation-WebUI with big-AGI:
- Go to Models > Add a model source of type: **Oobabooga**
- Enter the address: `http://127.0.0.1:5000`
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
- Select model & Chat
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
-81
View File
@@ -1,81 +0,0 @@
# `Ollama` x `big-AGI` :llama:
This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama.ai/library) to
[big-AGI](https://big-agi.com) for a professional AI/AGI operation and a good UI/Conversational
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
editing tools, models switching, personas, and more.
![config-local-ollama-0-example.png](pixels/config-ollama-0-example.png)
## Quick Integration Guide
1. **Ensure Ollama API Server is Running**: Before starting, make sure your Ollama API server is up and running.
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**.
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`).
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models.
5. **Start Using AI Personas**: Select an Ollama model and begin interacting with AI personas tailored to your needs.
### Ollama: installation and Setup
For detailed instructions on setting up the Ollama API server, please refer to the
[Ollama download page](https://ollama.ai/download) and [instructions for linux](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
### Visual Guide
* After adding the `Ollama` model vendor, entering the IP address of an Ollama server, and refreshing models:
<img src="pixels/config-ollama-1-models.png" alt="config-local-ollama-1-models.png" style="max-width: 320px;">
* The `Ollama` admin panel, with the `Pull` button highlighted, after pulling the "Yi" model:
<img src="pixels/config-ollama-2-admin-pull.png" alt="config-local-ollama-2-admin-pull.png" style="max-width: 320px;">
* You can now switch model/persona dynamically and text/voice chat with the models:
<img src="pixels/config-ollama-3-chat.png" alt="config-local-ollama-3-chat.png" style="max-width: 320px;">
### Advanced: Model parameters
For users who wish to delve deeper into advanced settings, `big-AGI` offers additional configuration options, such
as the model temperature, maximum tokens, etc.
### Advanced: Ollama under a reverse proxy
You can elegantly expose your Ollama server to the internet (and thus make it easier to use from your server-side
big-AGI deployments) by exposing it on an http/https URL, such as: `https://yourdomain.com/ollama`
On Ubuntu Servers, you will need to install `nginx` and configure it to proxy requests to Ollama.
```bash
sudo apt update
sudo apt install nginx
sudo apt install certbot python3-certbot-nginx
sudo certbot --nginx -d yourdomain.com
```
Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and add the following block:
```nginx
location /ollama/ {
proxy_pass http://localhost:11434;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# Disable buffering for the streaming responses
proxy_buffering off;
}
```
Reach out to our community if you need help with this.
### Community and Support
Join our community to share your experiences, get help, and discuss best practices:
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
---
`big-AGI` is committed to providing a powerful, intuitive, and privacy-respecting AI experience.
We are excited for you to explore the possibilities with Ollama models. Happy creating!
-31
View File
@@ -1,31 +0,0 @@
# OpenRouter Configuration
[OpenRouter](https://openrouter.ai) is a standalone, premium service
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
This document details the process of integrating OpenRouter with big-AGI.
### 1. OpenRouter Account Setup and API Key Generation
1. Register for an OpenRouter account at [openrouter.ai](https://openrouter.ai) by clicking on Sign In > Continue with Google.
2. Top up your account (minimum $5) by navigating to [openrouter.ai/account](https://openrouter.ai/account) > Add Credits > Pay with Stripe.
3. Generate an API key at [openrouter.ai/keys](https://openrouter.ai/keys) > API Key > Generate API Key.
- **Remember to copy and securely store your API key** - the key will not be displayed again and will be in the format `sk-or-v1-...`.
- Keep the key confidential as it can be used to expend your credits.
### 2. Integrating OpenRouter with big-AGI
1. Launch big-AGI, and navigate to the AI **Models** settings.
2. Add a Vendor, and select **OpenRouter**.
![feature-openrouter-add.png](pixels/feature-openrouter-add.png)
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
![feature-openrouter-configure.png](pixels/feature-openrouter-configure.png)
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
### Pricing
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
credits - a single prompt may cost $1 or more, at the time of writing.
+34 -47
View File
@@ -1,68 +1,55 @@
# Deploying a Next.js App on Cloudflare Pages
# Deploying Next.js App on Cloudflare Pages
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
>
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
> edge functions, so we cannot deploy this project on Cloudflare Pages.
>
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
> parts of this application will not work.
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
>
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
> and convert the Node runtime to Edge runtime once Prisma supports it.
Follow these steps to deploy your Next.js app on Cloudflare Pages. This guide is based on
the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with a few additional steps.
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with some additional steps.
## Step 1: Fork the Repository
## Step 1: Repository Forking
Fork the repository to your own GitHub account.
Fork the repository to your personal GitHub account.
## Step 2: Connect Cloudflare Pages to Your GitHub Account
## Step 2: Linking Cloudflare Pages to Your GitHub Account
1. Go to the Cloudflare Pages section and click the `Create a project` button.
2. Click `Connect To Git` and give Cloudflare Pages either All GitHub account Repo access or selected Repo access. We
recommend using selected Repo access and selecting the forked repo from step 1.
1. Navigate to the Cloudflare Pages section and click on the `Create a project` button.
2. Click `Connect To Git` and grant Cloudflare Pages access to either all GitHub account repositories or selected repositories.
We recommend using selected Repo access and selecting the forked repository from step 1.
## Step 3: Setup Build and Deployments
## Step 3: Configuring Build and Deployments
1. Once you select the forked GitHub repo, click the `Begin Setup` button.
2. On this page, set your `Project name`, `Production branch` (e.g., main), and your Build settings.
3. Select `Next.js` from the `Framework preset` dropdown menu.
4. Leave the preset filled Build command and Build output directory as preset defaults.
5. Set `Environmental variables` (advanced) on this page to configure some variables as follows:
1. After selecting the forked GitHub repository, click the **Begin Setup** button
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
3. Choose `Next.js` from the **Framework preset** dropdown menu
4. Set a custom **Build Command**:
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
- see the tradeoffs for this deletion on the notice at the top
5. Keep the **Build output directory** as default
6. Click the **Save and Deploy** button
| Variable | Value |
|---------------------------|---------|
| `GO_VERSION` | `1.16` |
| `NEXT_TELEMETRY_DISABLED` | `1` |
| `NODE_VERSION` | `17` |
| `PHP_VERSION` | `7.4` |
| `PYTHON_VERSION` | `3.7` |
| `RUBY_VERSION` | `2.7.1` |
## Step 4: Monitoring the Deployment Process
6. Click the `Save and Deploy` button.
Observe the process as it initializes your build environment, clones the GitHub repository, builds the application, and deploys it
to the Cloudflare Network. Once complete, proceed to the project you created.
## Step 4: Monitor the Deployment Process
## Step 5: Required: Set the `nodejs_compat` compatibility flag
Watch the process run to initialize your build environment, clone the GitHub repo, build the application, and deploy to
the Cloudflare Network. Once that is done, proceed to the project you created.
1. Navigate to the [Settings > Functions](https://dash.cloudflare.com/?to=/:account/pages/view/:pages-project/settings/functions) page of your newly created project
2. Scroll to `Compatibility flags` and enter "`nodejs_compat`" for both **Production** and **Preview** environments.
It should look like this: ![](pixels/config-deploy-cloudflare-compat2.png)
3. Re-deploy your project for the new flags to take effect
## Step 6: (Optional) Custom Domain Configuration
## Step 5: Set up a Custom Domain
Use the `Custom domains` tab to set up your domain via CNAME.
## Step 7: (Optional) Access Policy and Web Analytics Configuration
## Step 6: Configure Access Policy and Web Analytics
Navigate to the `Settings` page and enable the following settings:
Go to the `Settings` page and enable the following settings:
1. Access Policy: Restrict [preview deployments](https://developers.cloudflare.com/pages/platform/preview-deployments/)
to members of your Cloudflare account via one-time pin and restrict primary `*.YOURPROJECT.pages.dev` domain.
Refer to [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more details.
See [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more information.
2. Enable Web Analytics.
Congratulations! You have successfully deployed your Next.js app on Cloudflare Pages.
Now you have successfully deployed your Next.js app on Cloudflare Pages.
+14 -48
View File
@@ -1,60 +1,26 @@
# Deploying `big-AGI` with Docker
# Deploy `big-AGI` with Docker 🐳
Utilize Docker containers to deploy the big-AGI application for an efficient and automated deployment process.
Docker ensures faster development cycles, easier collaboration, and seamless environment management.
Deploy the big-AGI application using Docker containers for a consistent, efficient, and automated deployment process. Enjoy faster development cycles, easier collaboration, and seamless environment management. 🚀
## Build and run your container 🔧
Docker is a platform for developing, packaging, and deploying applications as lightweight containers, ensuring consistent behavior across environments.
1. **Clone big-AGI**
```bash
git clone https://github.com/enricoros/big-agi.git
cd big-agi
```
2. **Build the Docker Image**: Build a local docker image from the provided Dockerfile:
```bash
docker build -t big-agi .
```
3. **Run the Docker Container**: start a Docker container from the newly built image,
and expose its http port 3000 to your `localhost:3000` using:
```bash
docker run -d -p 3000:3000 big-agi
```
4. Browse to [http://localhost:3000](http://localhost:3000)
## `big-AGI` Docker Components
## Documentation
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a
Docker image of the application.
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a Docker image of the application.
### Dockerfile
The [`Dockerfile`](../Dockerfile) describes how to create a Docker image. It establishes a Node.js environment,
installs dependencies, and creates a production-ready version of the application as a local container.
The [`Dockerfile`](../Dockerfile) sets up a Node.js environment, installs dependencies, and creates a production-ready version of the application.
### Official container images
### GitHub Actions Workflow
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates the
building and publishing of the Docker images to the GitHub Container Registry (ghcr) when changes are
pushed to the `main` branch.
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates building and publishing the Docker image when changes are pushed to the `main` branch.
Official pre-built containers: [ghcr.io/enricoros/big-agi](https://github.com/enricoros/big-agi/pkgs/container/big-agi)
## Deploy Steps
Run official pre-built containers:
```bash
docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi
```
1. Clone the big-AGI repository
2. Navigate to the project directory
3. Build the Docker image using the provided Dockerfile
4. Run the Docker container with the built image
### Run official containers
In addition, the repository also includes a `docker-compose.yaml` file, configured to run the pre-built
'ghcr image'. This file is used to define the `big-agi` service, the ports to expose, and the command to run.
If you have Docker Compose installed, you can run the Docker container with `docker-compose up`
to pull the Docker image (if it hasn't been pulled already) and start a Docker container. If you want to
update the image to the latest version, you can run `docker-compose pull` before starting the service.
```bash
docker-compose up -d
```
Leverage Docker's capabilities for a reliable and efficient big-AGI deployment.
Embrace the benefits of Docker for a reliable and efficient big-AGI deployment. 🎉
-111
View File
@@ -1,111 +0,0 @@
# Environment Variables
This document provides an explanation of the environment variables used in the big-AGI application.
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
which take place over _defaults_. This file is kept in sync with [`../src/server/env.mjs`](../src/server/env.mjs).
### Setting Environment Variables
Environment variables can be set by creating a `.env` file in the root directory of the project.
> For Docker deployment, ensure all necessary environment variables are set **both during build and run**.
> If the Docker container is built without setting environment variables, the frontend UI will be unaware
> of them, despite the backend being able to use them at runtime.
The following is an example `.env` for copy-paste convenience:
```bash
# Database
POSTGRES_PRISMA_URL=
POSTGRES_URL_NON_POOLING=
# LLMs
OPENAI_API_KEY=
OPENAI_API_HOST=
OPENAI_API_ORG_ID=
AZURE_OPENAI_API_ENDPOINT=
AZURE_OPENAI_API_KEY=
ANTHROPIC_API_KEY=
ANTHROPIC_API_HOST=
OLLAMA_API_HOST=
OPENROUTER_API_KEY=
# Model Observability: Helicone
HELICONE_API_KEY=
# Text-To-Speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# Text-To-Image
PRODIA_API_KEY=
# Google Custom Search
GOOGLE_CLOUD_API_KEY=
GOOGLE_CSE_ID=
```
## Variables Documentation
### Database
To enable features such as Chat Link Shring, you need to connect the backend to a database. We require
serverless Postgres, which is available on Vercel, Neon and more.
Also make sure that you run `npx prisma db:push` to create the initial schema on the database for the
first time (or update it on a later stage).
| Variable | Description |
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `POSTGRES_PRISMA_URL` | The URL of the Postgres database used by Prisma - example: `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15` |
| `POSTGRES_URL_NON_POOLING` | The URL of the Postgres database without pooling |
### LLMs
The following variables when set will enable the corresponding LLMs on the server-side, without
requiring the user to enter an API key
| Variable | Description | Required |
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-ollama.md](config-ollama.md) | |
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
### Model Observability: Helicone
Helicone provides observability to your LLM calls. It is a paid service, with a generous free tier.
It is currently supported for:
- **Anthropic**: by setting the Helicone API key, Helicone is automatically activated
- **OpenAI**: you also need to set `OPENAI_API_HOST` to `oai.hconeai.com`, to enable routing
| Variable | Description |
|--------------------|--------------------------|
| `HELICONE_API_KEY` | The API key for Helicone |
### Specials
Enable the app to Talk, Draw, and Google things up.
| Variable | Description |
|:-------------------------|:------------------------------------------------------------------------------------------------------------------------|
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
---
Binary file not shown.

Before

Width:  |  Height:  |  Size: 279 KiB

After

Width:  |  Height:  |  Size: 283 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 209 KiB

After

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 618 KiB

After

Width:  |  Height:  |  Size: 626 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 370 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.8 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

After

Width:  |  Height:  |  Size: 3.8 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 157 KiB

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 156 KiB

After

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 62 KiB

+16
View File
@@ -0,0 +1,16 @@
import { withAuth } from 'next-auth/middleware';
import { authType } from '@/modules/authentication/auth.server';
// noinspection JSUnusedGlobalSymbols
export const middleware = !authType ? () => null : withAuth({
callbacks: {
authorized({ req, token }) {
// console.log('authorized', req, token);
return !!token;
},
},
});
export const config = { matcher: ['/:path*'] };
+27
View File
@@ -0,0 +1,27 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
env: {
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
// for auth only
SERVER_AUTH_TYPE: process.env.AUTH_TYPE,
},
webpack(config, { isServer, dev }) {
// @mui/joy: anything material gets redirected to Joy
config.resolve.alias['@mui/material'] = '@mui/joy';
// @dqbd/tiktoken: enable asynchronous WebAssembly
config.experiments = {
asyncWebAssembly: true,
layers: true,
};
return config;
},
};
module.exports = nextConfig;
-36
View File
@@ -1,36 +0,0 @@
/** @type {import('next').NextConfig} */
let nextConfig = {
reactStrictMode: true,
// Note: disabled to chech whether the project becomes slower with this
// modularizeImports: {
// '@mui/icons-material': {
// transform: '@mui/icons-material/{{member}}',
// },
// },
webpack: (config, _options) => {
// @mui/joy: anything material gets redirected to Joy
config.resolve.alias['@mui/material'] = '@mui/joy';
// @dqbd/tiktoken: enable asynchronous WebAssembly
config.experiments = {
asyncWebAssembly: true,
layers: true,
};
return config;
},
};
// Validate environment variables, if set at build time. Will be actually read and used at runtime.
// This is the reason both this file and the servr/env.mjs files have this extension.
await import('./src/server/env.mjs');
// conditionally enable the nextjs bundle analyzer
if (process.env.ANALYZE_BUNDLE) {
const { default: withBundleAnalyzer } = await import('@next/bundle-analyzer');
nextConfig = withBundleAnalyzer({ openAnalyzer: true })(nextConfig);
}
export default nextConfig;
+1618 -2141
View File
File diff suppressed because it is too large Load Diff
+29 -53
View File
@@ -1,70 +1,46 @@
{
"name": "big-agi",
"version": "1.5.0",
"version": "0.9.1",
"private": true,
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"env:pull": "npx vercel env pull .env.development.local",
"postinstall": "prisma generate",
"db:push": "prisma db push",
"db:studio": "prisma studio"
"lint": "next lint"
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.1",
"@emotion/server": "^11.11.0",
"@emotion/styled": "^11.11.0",
"@mui/icons-material": "^5.14.16",
"@mui/joy": "^5.0.0-beta.15",
"@next/bundle-analyzer": "^14.0.3",
"@prisma/client": "^5.6.0",
"@sanity/diff-match-patch": "^3.1.1",
"@t3-oss/env-nextjs": "^0.7.1",
"@tanstack/react-query": "^4.36.1",
"@trpc/client": "^10.43.3",
"@trpc/next": "^10.43.3",
"@trpc/react-query": "^10.43.3",
"@trpc/server": "^10.43.3",
"@vercel/analytics": "^1.1.1",
"browser-fs-access": "^0.35.0",
"eventsource-parser": "^1.1.1",
"idb-keyval": "^6.2.1",
"next": "^14.0.3",
"pdfjs-dist": "4.0.189",
"plantuml-encoder": "^1.4.0",
"@emotion/react": "^11.10.8",
"@emotion/server": "^11.10.0",
"@emotion/styled": "^11.10.8",
"@mui/icons-material": "^5.11.16",
"@mui/joy": "^5.0.0-alpha.77",
"@tanstack/react-query": "^4.29.5",
"@vercel/analytics": "^1.0.0",
"eventsource-parser": "^1.0.0",
"next": "^13.3.2",
"pdfjs-dist": "^3.5.141",
"next-auth": "^4.21.1",
"prismjs": "^1.29.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-katex": "^3.0.1",
"react-markdown": "^9.0.0",
"react-timeago": "^7.2.0",
"remark-gfm": "^4.0.0",
"superjson": "^2.2.1",
"tesseract.js": "^5.0.3",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zustand": "~4.3.9"
"react-markdown": "^8.0.7",
"remark-gfm": "^3.0.1",
"uuid": "^9.0.0",
"zustand": "^4.3.7"
},
"devDependencies": {
"@types/node": "^20.9.2",
"@types/plantuml-encoder": "^1.4.2",
"@types/prismjs": "^1.26.3",
"@types/react": "^18.2.37",
"@types/react-dom": "^18.2.15",
"@types/react-katex": "^3.0.3",
"@types/react-timeago": "^4.1.6",
"@types/uuid": "^9.0.7",
"eslint": "^8.54.0",
"eslint-config-next": "^14.0.3",
"prettier": "^3.1.0",
"prisma": "^5.6.0",
"typescript": "^5.2.2"
},
"engines": {
"node": "^20.0.0 || ^18.0.0"
"@types/node": "^18.16.3",
"@types/prismjs": "^1.26.0",
"@types/react": "^18.2.0",
"@types/react-dom": "^18.2.1",
"@types/uuid": "^9.0.1",
"eslint": "^8.39.0",
"eslint-config-next": "^13.3.2",
"prettier": "^2.8.8",
"typescript": "^5.0.4"
}
}
+37 -29
View File
@@ -1,39 +1,47 @@
import * as React from 'react';
import Head from 'next/head';
import { MyAppProps } from 'next/app';
import { Analytics as VercelAnalytics } from '@vercel/analytics/react';
import { AppProps } from 'next/app';
import { CacheProvider, EmotionCache } from '@emotion/react';
import { CssBaseline, CssVarsProvider } from '@mui/joy';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import { Session as NextAuthSession } from 'next-auth';
import { SessionProvider } from 'next-auth/react';
import { Brand } from '~/common/app.config';
import { apiQuery } from '~/common/util/trpc.client';
import 'katex/dist/katex.min.css';
import '~/common/styles/CodePrism.css';
import '~/common/styles/GithubMarkdown.css';
import { ProviderBackend } from '~/common/state/ProviderBackend';
import { ProviderTRPCQueryClient } from '~/common/state/ProviderTRPCQueryClient';
import { ProviderTheming } from '~/common/state/ProviderTheming';
import '@/common/styles/GithubMarkdown.css';
import { Brand } from '@/common/brand';
import { createEmotionCache, theme } from '@/common/theme';
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) =>
<>
// Client-side cache, shared for the whole session of the user in the browser.
const clientSideEmotionCache = createEmotionCache();
<Head>
<title>{Brand.Title.Common}</title>
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
</Head>
<ProviderTheming emotionCache={emotionCache}>
<ProviderTRPCQueryClient>
<ProviderBackend>
<Component {...pageProps} />
</ProviderBackend>
</ProviderTRPCQueryClient>
</ProviderTheming>
export interface MyAppProps extends AppProps {
emotionCache?: EmotionCache;
session?: NextAuthSession;
}
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps: { session, ...pageProps } }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient());
return <>
<CacheProvider value={emotionCache}>
<Head>
<title>{Brand.Title.Common}</title>
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
</Head>
{/* Next-Auth provider */}
<SessionProvider session={session}>
{/* Rect-query provider */}
<QueryClientProvider client={queryClient}>
{/* JoyUI/Emotion */}
<CssVarsProvider defaultMode='light' theme={theme}>
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
<CssBaseline />
<Component {...pageProps} />
</CssVarsProvider>
</QueryClientProvider>
</SessionProvider>
</CacheProvider>
<VercelAnalytics debug={false} />
</>;
// enables the React Query API invocation
export default apiQuery.withTRPC(MyApp);
}
+8 -6
View File
@@ -1,15 +1,16 @@
import * as React from 'react';
import { AppType, MyAppProps } from 'next/app';
import { AppType } from 'next/app';
import { default as Document, DocumentContext, DocumentProps, Head, Html, Main, NextScript } from 'next/document';
import createEmotionServer from '@emotion/server/create-instance';
import { getInitColorSchemeScript } from '@mui/joy/styles';
import { Brand } from '~/common/app.config';
import { bodyFontClassName, createEmotionCache } from '~/common/app.theme';
import { Brand } from '@/common/brand';
import { MyAppProps } from './_app';
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
interface MyDocumentProps extends DocumentProps {
emotionStyleTags: React.JSX.Element[];
emotionStyleTags: JSX.Element[];
}
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
@@ -18,6 +19,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<Head>
{/* Meta (missing Title, set by the App or Page) */}
<meta name='description' content={Brand.Meta.Description} />
<meta name='keywords' content={Brand.Meta.Keywords} />
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
{/* Favicons & PWA */}
@@ -30,7 +32,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
{/* Opengraph */}
<meta property='og:title' content={Brand.Title.Common} />
<meta property='og:title' content={Brand.Meta.Title} />
<meta property='og:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
<meta property='og:url' content={Brand.URIs.Home} />
@@ -40,7 +42,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{/* Twitter */}
<meta property='twitter:card' content='summary_large_image' />
<meta property='twitter:url' content={Brand.URIs.Home} />
<meta property='twitter:title' content={Brand.Title.Common} />
<meta property='twitter:title' content={Brand.Meta.Title} />
<meta property='twitter:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
+20
View File
@@ -0,0 +1,20 @@
import { NextApiRequest, NextApiResponse } from 'next';
import { default as NextAuth } from 'next-auth';
import { authBasicUsers, authCreateProviders, authType } from '@/modules/authentication/auth.server';
const authOptions = {
secret: process.env.NEXTAUTH_SECRET,
providers: authCreateProviders(),
};
export default function handler(req: NextApiRequest, res: NextApiResponse) {
if (!authType)
return res.status(200).send('Auth not enabled');
if (Object.keys(authBasicUsers).length <= 0)
res.status(200).send('Auth enabled but no users have been set up');
return NextAuth(req, res, authOptions);
}
+77
View File
@@ -0,0 +1,77 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
function parseApiParameters(apiKey?: string) {
return {
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
apiHeaders: {
'Content-Type': 'application/json',
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
},
};
}
async function rethrowElevenLabsError(response: Response) {
if (!response.ok) {
let errorPayload: object | null = null;
try {
errorPayload = await response.json();
} catch (e) {
// ignore
}
console.error('Error in ElevenLabs API:', errorPayload);
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
}
}
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'GET',
headers: apiHeaders,
});
await rethrowElevenLabsError(response);
return await response.json();
}
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'POST',
headers: apiHeaders,
body: JSON.stringify(body),
signal,
});
await rethrowElevenLabsError(response);
return response;
}
export default async function handler(req: NextRequest) {
try {
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
text: text,
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
};
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error) {
console.error('Error posting to ElevenLabs', error);
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
import { getFromElevenLabs } from './speech';
export default async function handler(req: NextRequest) {
try {
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
// bring category != 'premade to the top
voicesList.voices.sort((a, b) => {
if (a.category === 'premade' && b.category !== 'premade') return 1;
if (a.category !== 'premade' && b.category === 'premade') return -1;
return 0;
});
// map to our own response format
const response: ElevenLabs.API.Voices.Response = {
voices: voicesList.voices.map((voice, idx) => ({
id: voice.voice_id,
name: voice.name,
description: voice.description,
previewUrl: voice.preview_url,
category: voice.category,
default: idx === 0,
})),
};
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
} catch (error) {
console.error('Error fetching voices from ElevenLabs:', error);
return new NextResponse(
JSON.stringify({
type: 'error',
error: error?.toString() || error || 'Network issue',
}),
{ status: 500 },
);
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+27
View File
@@ -0,0 +1,27 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest) {
try {
const requestBodyJson = await req.json();
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
return new NextResponse(JSON.stringify({
message: upstreamResponse.choices[0].message,
} satisfies OpenAI.API.Chat.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+30
View File
@@ -0,0 +1,30 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
// keep working on this
const requestBodyJson = await req.json();
const { api } = await toApiChatRequest(requestBodyJson);
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
// flatten IDs (most recent first)
return new NextResponse(JSON.stringify({
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
} satisfies OpenAI.API.Models.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+117
View File
@@ -0,0 +1,117 @@
import { NextRequest, NextResponse } from 'next/server';
import { createParser } from 'eventsource-parser';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
// Handle the abort event when the connection is closed by the client
signal.addEventListener('abort', () => {
console.log('Client closed the connection.');
});
// begin event streaming from the OpenAI API
const encoder = new TextEncoder();
let upstreamResponse: Response;
try {
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
} catch (error: any) {
console.log(error);
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
return new ReadableStream({
start: controller => {
controller.enqueue(encoder.encode(message));
controller.close();
},
});
}
// decoding and re-encoding loop
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
let hasBegun = false;
// stream response (SSE) from OpenAI is split into multiple chunks. this function
// will parse the event into a text stream, and re-emit it to the client
const upstreamParser = createParser(event => {
// ignore reconnect interval
if (event.type !== 'event')
return;
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
if (event.data === '[DONE]') {
controller.close();
return;
}
try {
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
// ignore any 'role' delta update
if (json.choices[0].delta?.role)
return;
// stringify and send the first packet as a JSON object
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
}
// transmit the text stream
const text = json.choices[0].delta?.content || '';
controller.enqueue(encoder.encode(text));
} catch (error) {
// maybe parse error
console.error('Error parsing OpenAI response', error);
controller.error(error);
}
});
// https://web.dev/streams/#asynchronous-iteration
const decoder = new TextDecoder();
for await (const upstreamChunk of upstreamResponse.body as any)
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
};
return new ReadableStream({
start: onReadableStreamStart,
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
});
}
export default async function handler(req: NextRequest): Promise<Response> {
try {
const requestBodyJson = await req.json();
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
return new NextResponse(chatResponseStream);
} catch (error: any) {
if (error.name === 'AbortError') {
console.log('Fetch request aborted in handler');
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
} else if (error.code === 'ECONNRESET') {
console.log('Connection reset by the client in handler');
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
} else {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
};
//noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+88
View File
@@ -0,0 +1,88 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
});
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch('https://api.prodia.com/v1/job', {
method: 'POST',
headers: {
...prodiaHeaders(apiKey),
'Content-Type': 'application/json',
},
body: JSON.stringify(jobRequest),
});
if (response.status !== 200) {
console.log('Bad Prodia Response:', await response.text());
throw new Error(`Bad Prodia Response: ${response.status}`);
}
return await response.json();
}
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
headers: prodiaHeaders(apiKey),
});
if (response.status !== 200)
throw new Error(`Bad Prodia Response: ${response.status}`);
return await response.json();
}
export default async function handler(req: NextRequest) {
// timeout, in seconds
const timeout = 15;
const tStart = Date.now();
try {
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
// crate the job, getting back a job ID
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
model: prodiaModelId,
prompt,
...(!!cfgScale && { cfg_scale: cfgScale }),
...(!!steps && { steps }),
...(!!negativePrompt && { negative_prompt: negativePrompt }),
...(!!seed && { seed }),
};
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
// poll the job status until it's done
let sleepDelay = 2000;
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
await new Promise(resolve => setTimeout(resolve, sleepDelay));
job = await getJobStatus(apiKey, job.job);
if (sleepDelay > 250)
sleepDelay /= 2;
}
// check for success
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
if (job.status !== 'succeeded' || !job.imageUrl)
throw new Error(`Prodia image generation failed within ${elapsed}s`);
// respond with the image URL
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
return new NextResponse(JSON.stringify(response));
} catch (error) {
console.error('Handler failed:', error);
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
return new NextResponse(JSON.stringify(response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
// for lack of an API
const HARDCODED_MODELS: Prodia.API.Models.Response = {
models: [
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
],
};
// sort by priority
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
// noinspection JSUnusedLocalSymbols
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+50
View File
@@ -0,0 +1,50 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
/**
* 'Proxy' that uploads a file to paste.gg.
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
*/
export default async function handler(req: NextRequest) {
try {
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
throw new Error('Invalid options');
const paste = await pasteGgPost(title, fileName, fileContent, origin);
console.log(`Posted to paste.gg`, paste);
if (paste?.status !== 'success')
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
return new NextResponse(JSON.stringify({
type: 'success',
url: `https://paste.gg/${paste.result.id}`,
expires: paste.result.expires || 'never',
deletionKey: paste.result.deletion_key || 'none',
created: paste.result.created_at,
} satisfies PasteGG.API.Publish.Response));
} catch (error) {
console.error('Error posting to paste.gg', error);
return new NextResponse(JSON.stringify({
type: 'error',
error: error?.toString() || 'Network issue',
} satisfies PasteGG.API.Publish.Response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+47
View File
@@ -0,0 +1,47 @@
import { NextRequest, NextResponse } from 'next/server';
import { Search } from '@/modules/search/search.types';
import { objectToQueryString } from '@/modules/search/search.client';
export default async function handler(req: NextRequest): Promise<NextResponse> {
const { searchParams } = new URL(req.url);
const customSearchParams: Search.Wire.RequestParams = {
q: searchParams.get('query') || '',
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
num: 5,
};
try {
if (!customSearchParams.key || !customSearchParams.cx) {
// noinspection ExceptionCaughtLocallyJS
throw new Error('Missing API Key or Custom Search Engine ID');
}
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
if (data.error) {
// noinspection ExceptionCaughtLocallyJS
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
}
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
title: result.title,
link: result.link,
snippet: result.snippet,
})) || [];
return new NextResponse(JSON.stringify(apiResponse));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import { AppCall } from '../src/apps/call/AppCall';
import { AppLayout } from '~/common/layout/AppLayout';
export default function CallPage() {
return (
<AppLayout>
<AppCall />
</AppLayout>
);
}
+44 -9
View File
@@ -1,18 +1,53 @@
import * as React from 'react';
import { AppChat } from '../src/apps/chat/AppChat';
import { useShowNewsOnUpdate } from '../src/apps/news/news.hooks';
import { Container, useTheme } from '@mui/joy';
import { AppLayout } from '~/common/layout/AppLayout';
import { NoSSR } from '@/common/components/NoSSR';
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
import { useSettingsStore } from '@/common/state/store-settings';
import { Chat } from '../src/apps/chat/Chat';
import { SettingsModal } from '../src/apps/settings/SettingsModal';
export default function ChatPage() {
// show the News page on updates
useShowNewsOnUpdate();
export default function Home() {
// state
const [settingsShown, setSettingsShown] = React.useState(false);
// external state
const theme = useTheme();
const apiKey = useSettingsStore(state => state.apiKey);
const centerMode = useSettingsStore(state => state.centerMode);
// show the Settings Dialog at startup if the API key is required but not set
React.useEffect(() => {
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
setSettingsShown(true);
}, [apiKey]);
return (
<AppLayout>
<AppChat />
</AppLayout>
/**
* Note the global NoSSR wrapper
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
*/
<NoSSR>
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
boxShadow: {
xs: 'none',
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
},
}}>
<Chat onShowSettings={() => setSettingsShown(true)} />
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
</Container>
</NoSSR>
);
}
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import { AppLabs } from '../src/apps/labs/AppLabs';
import { AppLayout } from '~/common/layout/AppLayout';
export default function LabsPage() {
return (
<AppLayout suspendAutoModelsSetup>
<AppLabs />
</AppLayout>
);
}
-141
View File
@@ -1,141 +0,0 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { Alert, Box, Button, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import { setComposerStartupText } from '../src/apps/chat/components/composer/store-composer';
import { AppLayout } from '~/common/layout/AppLayout';
import { LogoProgress } from '~/common/components/LogoProgress';
import { asValidURL } from '~/common/util/urlUtils';
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* See the /public/manifest.json for how this is configured. Parameters:
* - text: the text to share
* - url: the URL to share
* - if the URL is a valid URL, it will be downloaded and the content will be shared
* - if the URL is not a valid URL, it will be shared as text
* - title: the title of the shared content
*/
function AppShareTarget() {
// state
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
const [intentText, setIntentText] = React.useState<string | null>(null);
const [intentURL, setIntentURL] = React.useState<string | null>(null);
const [isDownloading, setIsDownloading] = React.useState(false);
// external state
const { query, push: routerPush, replace: routerReplace } = useRouter();
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
setComposerStartupText(text);
void routerReplace('/');
}, [routerReplace]);
// Detect the share Intent from the query
React.useEffect(() => {
// skip when query is not parsed yet
if (!Object.keys(query).length)
return;
// single item from the query
let queryTextItem: string[] | string | null = query.url || query.text || null;
if (Array.isArray(queryTextItem))
queryTextItem = queryTextItem[0];
// check if the item is a URL
const url = asValidURL(queryTextItem);
if (url)
setIntentURL(url);
else if (queryTextItem)
setIntentText(queryTextItem);
else
setErrorMessage('No text or url. Received: ' + JSON.stringify(query));
}, [query.url, query.text, query]);
// Text -> Composer
React.useEffect(() => {
if (intentText)
queueComposerTextAndLaunchApp(intentText);
}, [intentText, queueComposerTextAndLaunchApp]);
// URL -> download -> Composer
React.useEffect(() => {
if (intentURL) {
setIsDownloading(true);
// TEMP: until the Browse module is ready, just use the URL, verbatim
queueComposerTextAndLaunchApp(intentURL);
setIsDownloading(false);
/*callBrowseFetchSinglePage(intentURL)
.then(pageContent => {
if (pageContent)
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + pageContent + '\n```\n');
else
setErrorMessage('Could not read any data');
})
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
.finally(() => setIsDownloading(false));*/
}
}, [intentURL, queueComposerTextAndLaunchApp]);
return (
<Box sx={{
backgroundColor: 'background.level2',
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
}}>
{/* Logo with Circular Progress */}
<LogoProgress showProgress={isDownloading} />
{/* Title */}
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
</Typography>
{/* Possible Error */}
{errorMessage && <>
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
<Typography>{errorMessage}</Typography>
</Alert>
<Button
variant='solid' color='danger'
onClick={() => routerPush('/')}
endDecorator={<ArrowBackIcon />}
sx={{ mt: 2 }}
>
Cancel
</Button>
</>}
{/* URL under analysis */}
<Typography level='body-xs'>
{intentURL}
</Typography>
</Box>
);
}
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* Example URL: https://get.big-agi.com/launch?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
*/
export default function LaunchPage() {
return (
<AppLayout>
<AppShareTarget />
</AppLayout>
);
}
-18
View File
@@ -1,18 +0,0 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { AppChatLink } from '../../../src/apps/link/AppChatLink';
import { AppLayout } from '~/common/layout/AppLayout';
export default function ChatLinkPage() {
const { query } = useRouter();
const chatLinkId = query?.chatLinkId as string ?? '';
return (
<AppLayout suspendAutoModelsSetup>
<AppChatLink linkId={chatLinkId} />
</AppLayout>
);
}
-18
View File
@@ -1,18 +0,0 @@
import * as React from 'react';
import { AppNews } from '../src/apps/news/AppNews';
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
import { AppLayout } from '~/common/layout/AppLayout';
export default function NewsPage() {
// update the last seen news version
useMarkNewsAsSeen();
return (
<AppLayout suspendAutoModelsSetup>
<AppNews />
</AppLayout>
);
}
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import { AppPersonas } from '../src/apps/personas/AppPersonas';
import { AppLayout } from '~/common/layout/AppLayout';
export default function PersonasPage() {
return (
<AppLayout>
<AppPersonas />
</AppLayout>
);
}
-63
View File
@@ -1,63 +0,0 @@
// Prisma is the ORM for server-side (API) access to the database
//
// This file defines the schema for the database.
// - make sure to run 'prisma generate' after making changes to this file
// - make sure to run 'prisma db push' to sync the remote database with the schema
//
// Database is optional: when the environment variables are not set, the database is not used at all,
// and the storage of data in Big-AGI is limited to client-side (browser) storage.
//
// The database is used for:
// - the 'sharing' function, to let users share the chats with each other
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("POSTGRES_PRISMA_URL") // uses connection pooling
directUrl = env("POSTGRES_URL_NON_POOLING") // uses a direct connection
}
//
// Storage of Linked Data
//
model LinkStorage {
id String @id @default(uuid())
ownerId String
visibility LinkStorageVisibility
dataType LinkStorageDataType
dataTitle String?
dataSize Int
data Json
upVotes Int @default(0)
downVotes Int @default(0)
flagsCount Int @default(0)
readCount Int @default(0)
writeCount Int @default(1)
// time-based expiration
expiresAt DateTime?
// manual deletion
deletionKey String
isDeleted Boolean @default(false)
deletedAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
enum LinkStorageVisibility {
PUBLIC
UNLISTED
PRIVATE
}
enum LinkStorageDataType {
CHAT_V1
}
+4 -14
View File
@@ -1,8 +1,8 @@
{
"name": "big-AGI",
"short_name": "big-AGI",
"theme_color": "#32383E",
"background_color": "#9FA6AD",
"short_name": "AGI",
"theme_color": "#434356",
"background_color": "#B9B9C6",
"description": "Personal AGI App",
"display": "standalone",
"start_url": "/",
@@ -23,15 +23,5 @@
"sizes": "1024x1024",
"type": "image/png"
}
],
"share_target": {
"action": "/launch",
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
"title": "title",
"text": "text",
"url": "url"
}
}
]
}
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
+2 -1
View File
File diff suppressed because one or more lines are too long
-51
View File
@@ -1,51 +0,0 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { Container, Sheet } from '@mui/joy';
import { AppCallQueryParams } from '~/common/app.routes';
import { InlineError } from '~/common/components/InlineError';
import { CallUI } from './CallUI';
import { CallWizard } from './CallWizard';
export const APP_CALL_ENABLED = false;
export function AppCall() {
// external state
const { query } = useRouter();
// derived state
const { conversationId, personaId } = query as any as AppCallQueryParams;
const validInput = !!conversationId && !!personaId;
return (
<Sheet variant='solid' color='neutral' invertedColors sx={{
display: 'flex', flexDirection: 'column', justifyContent: 'center',
flexGrow: 1,
overflowY: 'auto',
minHeight: 96,
}}>
<Container maxWidth='sm' sx={{
display: 'flex', flexDirection: 'column',
alignItems: 'center',
minHeight: '80dvh', justifyContent: 'space-evenly',
gap: { xs: 2, md: 4 },
}}>
{!validInput && <InlineError error={`Something went wrong. ${JSON.stringify(query)}`} />}
{validInput && (
<CallWizard conversationId={conversationId}>
<CallUI conversationId={conversationId} personaId={personaId} />
</CallWizard>
)}
</Container>
</Sheet>
);
}
-392
View File
@@ -1,392 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { useRouter } from 'next/router';
import { Box, Card, ListItemDecorator, MenuItem, Switch, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import CallEndIcon from '@mui/icons-material/CallEnd';
import CallIcon from '@mui/icons-material/Call';
import ChatOutlinedIcon from '@mui/icons-material/ChatOutlined';
import MicIcon from '@mui/icons-material/Mic';
import MicNoneIcon from '@mui/icons-material/MicNone';
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
import { useChatLLMDropdown } from '../chat/components/applayout/useLLMDropdown';
import { EXPERIMENTAL_speakTextStream } from '~/modules/elevenlabs/elevenlabs.client';
import { SystemPurposeId, SystemPurposes } from '../../data';
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
import { streamChat } from '~/modules/llms/transports/streamChat';
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
import { Link } from '~/common/components/Link';
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
import { conversationTitle, createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { playSoundUrl, usePlaySoundUrl } from '~/common/util/audioUtils';
import { useLayoutPluggable } from '~/common/layout/store-applayout';
import { CallAvatar } from './components/CallAvatar';
import { CallButton } from './components/CallButton';
import { CallMessage } from './components/CallMessage';
import { CallStatus } from './components/CallStatus';
function CallMenuItems(props: {
pushToTalk: boolean,
setPushToTalk: (pushToTalk: boolean) => void,
override: boolean,
setOverride: (overridePersonaVoice: boolean) => void,
}) {
// external state
const { voicesDropdown } = useElevenLabsVoiceDropdown(false, !props.override);
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
return <>
<MenuItem onClick={handlePushToTalkToggle}>
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
Push to talk
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleChangeVoiceToggle}>
<ListItemDecorator><RecordVoiceOverIcon /></ListItemDecorator>
Change Voice
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem>
<ListItemDecorator>{' '}</ListItemDecorator>
{voicesDropdown}
</MenuItem>
<MenuItem component={Link} href='https://github.com/enricoros/big-agi/issues/175' target='_blank'>
<ListItemDecorator><ChatOutlinedIcon /></ListItemDecorator>
Voice Calls Feedback
</MenuItem>
</>;
}
export function CallUI(props: {
conversationId: string,
personaId: string,
}) {
// state
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
const [pushToTalk, setPushToTalk] = React.useState(true);
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
const responseAbortController = React.useRef<AbortController | null>(null);
// external state
const { push: routerPush } = useRouter();
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
const { chatTitle, messages } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
chatTitle: conversation ? conversationTitle(conversation) : 'no conversation',
messages: conversation ? conversation.messages : [],
};
}, shallow);
const persona = SystemPurposes[props.personaId as SystemPurposeId] ?? undefined;
const personaCallStarters = persona?.call?.starters ?? undefined;
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
const personaSystemMessage = persona?.systemMessage ?? undefined;
// hooks and speech
const [speechInterim, setSpeechInterim] = React.useState<SpeechResult | null>(null);
const onSpeechResultCallback = React.useCallback((result: SpeechResult) => {
setSpeechInterim(result.done ? null : { ...result });
if (result.done) {
const transcribed = result.transcript.trim();
if (transcribed.length >= 1)
setCallMessages(messages => [...messages, createDMessage('user', transcribed)]);
}
}, []);
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000, false);
// derived state
const isRinging = stage === 'ring';
const isConnected = stage === 'connected';
const isDeclined = stage === 'declined';
const isEnded = stage === 'ended';
/// Sounds
// pickup / hangup
React.useEffect(() => {
!isRinging && playSoundUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
}, [isRinging, isConnected]);
// ringtone
usePlaySoundUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
/// CONNECTED
const handleCallStop = () => {
stopRecording();
setStage('ended');
};
// [E] pickup -> seed message and call timer
// FIXME: Overriding the voice will reset the call - not a desired behavior
React.useEffect(() => {
if (!isConnected) return;
// show the call timer
setCallElapsedTime('00:00');
const start = Date.now();
const interval = setInterval(() => {
const elapsedSeconds = Math.floor((Date.now() - start) / 1000);
const minutes = Math.floor(elapsedSeconds / 60);
const seconds = elapsedSeconds % 60;
setCallElapsedTime(`${minutes < 10 ? '0' : ''}${minutes}:${seconds < 10 ? '0' : ''}${seconds}`);
}, 1000);
// seed the first message
const phoneMessages = personaCallStarters || ['Hello?', 'Hey!'];
const firstMessage = phoneMessages[Math.floor(Math.random() * phoneMessages.length)];
setCallMessages([createDMessage('assistant', firstMessage)]);
// fire/forget
void EXPERIMENTAL_speakTextStream(firstMessage, personaVoiceId);
return () => clearInterval(interval);
}, [isConnected, personaCallStarters, personaVoiceId]);
// [E] persona streaming response - upon new user message
React.useEffect(() => {
// only act when we have a new user message
if (!isConnected || callMessages.length < 1 || callMessages[callMessages.length - 1].role !== 'user')
return;
switch (callMessages[callMessages.length - 1].text) {
// do not respond
case 'Stop.':
return;
// command: close the call
case 'Goodbye.':
setStage('ended');
setTimeout(() => {
void routerPush('/');
}, 2000);
return;
// command: regenerate answer
case 'Retry.':
case 'Try again.':
setCallMessages(messages => messages.slice(0, messages.length - 2));
return;
// command: restart chat
case 'Restart.':
setCallMessages([]);
return;
}
// bail if no llm selected
if (!chatLLMId) return;
// temp fix: when the chat has no messages, only assume a single system message
const chatMessages: { role: VChatMessageIn['role'], text: string }[] = messages.length > 0
? messages
: personaSystemMessage
? [{ role: 'system', text: personaSystemMessage }]
: [];
// 'prompt' for a "telephone call"
// FIXME: can easily run ouf of tokens - if this gets traction, we'll fix it
const callPrompt: VChatMessageIn[] = [
{ role: 'system', content: 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.' },
...chatMessages.map(message => ({ role: message.role, content: message.text })),
{ role: 'system', content: 'You are now on the phone call related to the chat above. Respect your personality and answer with short, friendly and accurate thoughtful lines.' },
...callMessages.map(message => ({ role: message.role, content: message.text })),
];
// perform completion
responseAbortController.current = new AbortController();
let finalText = '';
let error: any | null = null;
streamChat(chatLLMId, callPrompt, responseAbortController.current.signal, (updatedMessage: Partial<DMessage>) => {
const text = updatedMessage.text?.trim();
if (text) {
finalText = text;
setPersonaTextInterim(text);
}
}).catch((err: DOMException) => {
if (err?.name !== 'AbortError')
error = err;
}).finally(() => {
setPersonaTextInterim(null);
setCallMessages(messages => [...messages, createDMessage('assistant', finalText + (error ? ` (ERROR: ${error.message || error.toString()})` : ''))]);
// fire/forget
void EXPERIMENTAL_speakTextStream(finalText, personaVoiceId);
});
return () => {
responseAbortController.current?.abort();
responseAbortController.current = null;
};
}, [isConnected, callMessages, chatLLMId, messages, personaVoiceId, personaSystemMessage, routerPush]);
// [E] Message interrupter
const abortTrigger = isConnected && isRecordingSpeech;
React.useEffect(() => {
if (abortTrigger && responseAbortController.current) {
responseAbortController.current.abort();
responseAbortController.current = null;
}
// TODO.. abort current speech
}, [abortTrigger]);
// [E] continuous speech recognition (reload)
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !isRecordingAudio;
React.useEffect(() => {
if (shouldStartRecording)
startRecording();
}, [shouldStartRecording, startRecording]);
// more derived state
const personaName = persona?.title ?? 'Unknown';
const isMicEnabled = isSpeechEnabled;
const isTTSEnabled = true;
const isEnabled = isMicEnabled && isTTSEnabled;
// pluggable UI
const menuItems = React.useMemo(() =>
<CallMenuItems
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
override={overridePersonaVoice} setOverride={setOverridePersonaVoice} />
, [overridePersonaVoice, pushToTalk],
);
useLayoutPluggable(chatLLMDropdown, null, menuItems);
return <>
<Typography
level='h1'
sx={{
fontSize: { xs: '2.5rem', md: '3rem' },
textAlign: 'center',
mx: 2,
}}
>
{isConnected ? personaName : 'Hello'}
</Typography>
<CallAvatar
symbol={persona?.symbol || '?'}
imageUrl={persona?.imageUri}
isRinging={isRinging}
onClick={() => setAvatarClickCount(avatarClickCount + 1)}
/>
<CallStatus
callerName={isConnected ? undefined : personaName}
statusText={isRinging ? 'is calling you' : isDeclined ? 'call declined' : isEnded ? 'call ended' : callElapsedTime}
regardingText={chatTitle}
micError={!isMicEnabled} speakError={!isTTSEnabled}
/>
{/* Live Transcript, w/ streaming messages, audio indication, etc. */}
{(isConnected || isEnded) && (
<Card variant='soft' sx={{
flexGrow: 1,
minHeight: '15dvh', maxHeight: '24dvh',
overflow: 'auto',
width: '100%',
borderRadius: 'lg',
flexDirection: 'column-reverse',
}}>
{/* Messages in reverse order, for auto-scroll from the bottom */}
<Box sx={{ display: 'flex', flexDirection: 'column-reverse', gap: 1 }}>
{/* Listening... */}
{isRecording && (
<CallMessage
text={<>{speechInterim?.transcript ? speechInterim.transcript + ' ' : ''}<i>{speechInterim?.interimTranscript}</i></>}
variant={isRecordingSpeech ? 'solid' : 'outlined'}
role='user'
/>
)}
{/* Persona streaming text... */}
{!!personaTextInterim && (
<CallMessage
text={personaTextInterim}
variant='solid' color='neutral'
role='assistant'
/>
)}
{/* Messages (last 6 messages, in reverse order) */}
{callMessages.slice(-6).reverse().map((message) =>
<CallMessage
key={message.id}
text={message.text}
variant={message.role === 'assistant' ? 'solid' : 'soft'} color='neutral'
role={message.role} />,
)}
</Box>
</Card>
)}
{/* Call Buttons */}
<Box sx={{ width: '100%', display: 'flex', justifyContent: 'space-evenly' }}>
{/* [ringing] Decline / Accept */}
{isRinging && <CallButton Icon={CallEndIcon} text='Decline' color='danger' onClick={() => setStage('declined')} />}
{isRinging && isEnabled && <CallButton Icon={CallIcon} text='Accept' color='success' variant='soft' onClick={() => setStage('connected')} />}
{/* [Calling] Hang / PTT (mute not enabled yet) */}
{isConnected && <CallButton Icon={CallEndIcon} text='Hang up' color='danger' onClick={handleCallStop} />}
{isConnected && (pushToTalk
? <CallButton Icon={MicIcon} onClick={toggleRecording}
text={isRecordingSpeech ? 'Listening...' : isRecording ? 'Listening' : 'Push To Talk'}
variant={isRecordingSpeech ? 'solid' : isRecording ? 'soft' : 'outlined'} />
: null
// <CallButton disabled={true} Icon={MicOffIcon} onClick={() => setMicMuted(muted => !muted)}
// text={micMuted ? 'Muted' : 'Mute'}
// color={micMuted ? 'warning' : undefined} variant={micMuted ? 'solid' : 'outlined'} />
)}
{/* [ended] Back / Call Again */}
{(isEnded || isDeclined) && <Link noLinkStyle href='/'><CallButton Icon={ArrowBackIcon} text='Back' variant='soft' /></Link>}
{(isEnded || isDeclined) && <CallButton Icon={CallIcon} text='Call Again' color='success' variant='soft' onClick={() => setStage('connected')} />}
</Box>
{/* DEBUG state */}
{avatarClickCount > 10 && (avatarClickCount % 2 === 0) && (
<Card variant='outlined' sx={{ maxHeight: '25dvh', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
Special commands: Stop, Retry, Try Again, Restart, Goodbye.
{JSON.stringify({ isSpeechEnabled, isRecordingAudio, speechInterim }, null, 2)}
</Card>
)}
{/*{isEnded && <Card variant='solid' size='lg' color='primary'>*/}
{/* <CardContent>*/}
{/* <Typography>*/}
{/* Please rate the call quality, 1 to 5 - Just a Joke*/}
{/* </Typography>*/}
{/* </CardContent>*/}
{/*</Card>}*/}
</>;
}
-211
View File
@@ -1,211 +0,0 @@
import * as React from 'react';
import { keyframes } from '@emotion/react';
import { Box, Button, Card, CardContent, IconButton, ListItemDecorator, Typography } from '@mui/joy';
import ArrowForwardIcon from '@mui/icons-material/ArrowForward';
import ChatIcon from '@mui/icons-material/Chat';
import CheckIcon from '@mui/icons-material/Check';
import CloseIcon from '@mui/icons-material/Close';
import MicIcon from '@mui/icons-material/Mic';
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
import WarningIcon from '@mui/icons-material/Warning';
import { navigateBack } from '~/common/app.routes';
import { openLayoutPreferences } from '~/common/layout/store-applayout';
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
import { useChatStore } from '~/common/state/store-chats';
import { useUICounter } from '~/common/state/store-ui';
const cssRainbowBackgroundKeyframes = keyframes`
100%, 0% {
background-color: rgb(128, 0, 0);
}
8% {
background-color: rgb(102, 51, 0);
}
16% {
background-color: rgb(64, 64, 0);
}
25% {
background-color: rgb(38, 76, 0);
}
33% {
background-color: rgb(0, 89, 0);
}
41% {
background-color: rgb(0, 76, 41);
}
50% {
background-color: rgb(0, 64, 64);
}
58% {
background-color: rgb(0, 51, 102);
}
66% {
background-color: rgb(0, 0, 128);
}
75% {
background-color: rgb(63, 0, 128);
}
83% {
background-color: rgb(76, 0, 76);
}
91% {
background-color: rgb(102, 0, 51);
}`;
function StatusCard(props: { icon: React.JSX.Element, hasIssue: boolean, text: string, button?: React.JSX.Element }) {
return (
<Card sx={{ width: '100%' }}>
<CardContent sx={{ flexDirection: 'row' }}>
<ListItemDecorator>
{props.icon}
</ListItemDecorator>
<Typography level='title-md' color={props.hasIssue ? 'warning' : undefined} sx={{ flexGrow: 1 }}>
{props.text}
{props.button}
</Typography>
<ListItemDecorator>
{props.hasIssue ? <WarningIcon color='warning' /> : <CheckIcon color='success' />}
</ListItemDecorator>
</CardContent>
</Card>
);
}
export function CallWizard(props: { strict?: boolean, conversationId: string, children: React.ReactNode }) {
// state
const [chatEmptyOverride, setChatEmptyOverride] = React.useState(false);
const [recognitionOverride, setRecognitionOverride] = React.useState(false);
// external state
const recognition = useCapabilityBrowserSpeechRecognition();
const synthesis = useCapabilityElevenLabs();
const chatIsEmpty = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return !(conversation?.messages?.length);
});
const { novel, touch } = useUICounter('call-wizard');
// derived state
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
const overriddenRecognition = recognitionOverride || recognition.mayWork;
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
const fatalGood = overriddenRecognition && synthesis.mayWork;
if (!novel && fatalGood)
return props.children;
const handleOverrideChatEmpty = () => setChatEmptyOverride(true);
const handleOverrideRecognition = () => setRecognitionOverride(true);
const handleConfigureElevenLabs = () => {
openLayoutPreferences(3);
};
const handleFinishButton = () => {
if (!allGood)
return navigateBack();
touch();
};
return <>
<Box sx={{ flexGrow: 0.5 }} />
<Typography level='title-lg' sx={{ fontSize: '3rem', fontWeight: 200, lineHeight: '1.5em', textAlign: 'center' }}>
Welcome to<br />
<Typography
component='span'
sx={{
backgroundColor: 'primary.solidActiveBg', mx: -0.5, px: 0.5,
animation: `${cssRainbowBackgroundKeyframes} 15s linear infinite`,
}}>
your first call
</Typography>
</Typography>
<Box sx={{ flexGrow: 0.5 }} />
<Typography level='body-lg'>
{/*Before you receive your first call, */}
Let&apos;s get you all set up.
</Typography>
{/* Chat Empty status */}
<StatusCard
icon={<ChatIcon />}
hasIssue={!overriddenEmptyChat}
text={overriddenEmptyChat ? 'Great! Your chat has messages.' : 'The chat is empty. Calls are effective when the caller has context.'}
button={overriddenEmptyChat ? undefined : (
<Button variant='outlined' onClick={handleOverrideChatEmpty} sx={{ mx: 1 }}>
Ignore
</Button>
)}
/>
{/* Add the speech to text feature status */}
<StatusCard
icon={<MicIcon />}
text={
((overriddenRecognition && !recognition.warnings.length) ? 'Speech recognition should be good to go.' : 'There might be a speech recognition issue.')
+ (recognition.isApiAvailable ? '' : ' Your browser does not support the speech recognition API.')
+ (recognition.isDeviceNotSupported ? ' Your device does not provide this feature.' : '')
+ (recognition.warnings.length ? ' ⚠️ ' + recognition.warnings.join(' · ') : '')
}
button={overriddenRecognition ? undefined : (
<Button variant='outlined' onClick={handleOverrideRecognition} sx={{ mx: 1 }}>
Ignore
</Button>
)}
hasIssue={!overriddenRecognition}
/>
{/* Text to Speech status */}
<StatusCard
icon={<RecordVoiceOverIcon />}
text={
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
}
button={synthesis.mayWork ? undefined : (
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
Configure
</Button>
)}
hasIssue={!synthesis.mayWork}
/>
{/*<Typography>*/}
{/* 1. To start a call, click the "Accept" button when you receive an incoming call.*/}
{/* 2. If your mic is enabled, you'll see a "Push to Talk" button. Press and hold it to speak, then release it to stop speaking.*/}
{/* 3. If your mic is disabled, you can still type your messages in the chat and the assistant will respond.*/}
{/* 4. During the call, you can control the voice synthesis settings from the menu in the top right corner.*/}
{/* 5. To end the call, click the "Hang up" button.*/}
{/*</Typography>*/}
<Box sx={{ flexGrow: 2 }} />
{/* bottom: text & button */}
<Box sx={{ display: 'flex', justifyContent: 'space-around', alignItems: 'center', width: '100%', gap: 2, px: 0.5 }}>
<Typography level='body-lg'>
{allGood ? 'Ready, Set, Call' : 'Please resolve the issues above before proceeding with the call'}
</Typography>
<IconButton
size='lg' variant={allGood ? 'soft' : 'solid'} color={allGood ? 'success' : 'danger'}
onClick={handleFinishButton} sx={{ borderRadius: '50px' }}
>
{allGood ? <ArrowForwardIcon sx={{ fontSize: '1.5em' }} /> : <CloseIcon sx={{ fontSize: '1.5em' }} />}
</IconButton>
</Box>
<Box sx={{ flexGrow: 0.5 }} />
</>;
}
-48
View File
@@ -1,48 +0,0 @@
import * as React from 'react';
import { keyframes } from '@emotion/react';
import { Avatar, Box } from '@mui/joy';
const cssScaleKeyframes = keyframes`
0% {
transform: scale(1);
}
50% {
transform: scale(1.2);
}
100% {
transform: scale(1);
}`;
export function CallAvatar(props: { symbol: string, imageUrl?: string, isRinging: boolean, onClick: () => void }) {
return (
<Avatar
variant='soft' color='neutral'
onClick={props.onClick}
src={props.imageUrl}
sx={{
'--Avatar-size': { xs: '160px', md: '200px' },
'--variant-borderWidth': '4px',
boxShadow: !props.imageUrl ? 'md' : null,
fontSize: { xs: '100px', md: '120px' },
}}
>
{/* As fallback, show the large Persona Symbol */}
{!props.imageUrl && (
<Box
sx={{
...(props.isRinging
? { animation: `${cssScaleKeyframes} 1.4s ease-in-out infinite` }
: {}),
}}
>
{props.symbol}
</Box>
)}
</Avatar>
);
}
-43
View File
@@ -1,43 +0,0 @@
import * as React from 'react';
import { Box, ColorPaletteProp, IconButton, Typography, VariantProp } from '@mui/joy';
/**
* Large button to operate the call, e.g.
* --------
* | 🎤 |
* | Mute |
* --------
*/
export function CallButton(props: {
Icon: React.FC, text: string,
variant?: VariantProp, color?: ColorPaletteProp, disabled?: boolean,
onClick?: () => void,
}) {
return (
<Box
onClick={() => !props.disabled && props.onClick?.()}
sx={{
display: 'flex', flexDirection: 'column', alignItems: 'center',
gap: { xs: 1, md: 2 },
}}
>
<IconButton
disabled={props.disabled} variant={props.variant || 'solid'} color={props.color}
sx={{
'--IconButton-size': { xs: '4.2rem', md: '5rem' },
borderRadius: '50%',
// boxShadow: 'lg',
}}>
<props.Icon />
</IconButton>
<Typography level='title-md' variant={props.disabled ? 'soft' : undefined}>
{props.text}
</Typography>
</Box>
);
}
-33
View File
@@ -1,33 +0,0 @@
import * as React from 'react';
import { Chip, ColorPaletteProp, VariantProp } from '@mui/joy';
import { SxProps } from '@mui/system';
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
export function CallMessage(props: {
text?: string | React.JSX.Element,
variant?: VariantProp, color?: ColorPaletteProp,
role: VChatMessageIn['role'],
sx?: SxProps,
}) {
return (
<Chip
color={props.color} variant={props.variant}
sx={{
alignSelf: props.role === 'user' ? 'end' : 'start',
whiteSpace: 'break-spaces',
borderRadius: 'lg',
mt: 'auto',
// boxShadow: 'md',
py: 1,
...(props.sx || {}),
}}
>
{props.text}
</Chip>
);
}
-47
View File
@@ -1,47 +0,0 @@
import * as React from 'react';
import { Box, Typography } from '@mui/joy';
import { InlineError } from '~/common/components/InlineError';
/**
* A status message for the call, such as:
*
* $Name
* "Connecting..." or "Call ended",
* re: $Regarding
*/
export function CallStatus(props: {
callerName?: string,
statusText: string,
regardingText?: string,
micError: boolean, speakError: boolean,
// llmComponent?: React.JSX.Element,
}) {
return (
<Box sx={{ display: 'flex', flexDirection: 'column' }}>
{!!props.callerName && <Typography level='h3' sx={{ textAlign: 'center' }}>
<b>{props.callerName}</b>
</Typography>}
{/*{props.llmComponent}*/}
<Typography level='body-md' sx={{ textAlign: 'center' }}>
{props.statusText}
</Typography>
{!!props.regardingText && <Typography level='body-md' sx={{ textAlign: 'center', mt: 0 }}>
re: {props.regardingText}
</Typography>}
{props.micError && <InlineError
severity='danger' error='But this browser does not support speech recognition... 🤦‍♀️ - Try Chrome on Windows?' />}
{props.speakError && <InlineError
severity='danger' error='And text-to-speech is not configured... 🤦‍♀️ - Configure it in Settings?' />}
</Box>
);
}
-293
View File
@@ -1,293 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
import { CmdRunReact } from '~/modules/aifn/react/react';
import { DiagramConfig, DiagramsModal } from '~/modules/aifn/digrams/DiagramsModal';
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
import { useModelsStore } from '~/modules/llms/store-llms';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { useGlobalShortcut } from '~/common/components/useGlobalShortcut';
import { useLayoutPluggable } from '~/common/layout/store-applayout';
import { ChatDrawerItems } from './components/applayout/ChatDrawerItems';
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
import { ChatMessageList } from './components/ChatMessageList';
import { ChatModeId } from './components/composer/store-composer';
import { CmdAddRoleMessage, extractCommands } from './commands';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/Ephemerals';
import { TradeConfig, TradeModal } from './trade/TradeModal';
import { runAssistantUpdatingState } from './editors/chat-stream';
import { runImageGenerationUpdatingState } from './editors/image-generate';
import { runReActUpdatingState } from './editors/react-tangent';
const SPECIAL_ID_ALL_CHATS = 'all-chats';
export function AppChat() {
// state
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [diagramConfig, setDiagramConfig] = React.useState<DiagramConfig | null>(null);
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
const [flattenConversationId, setFlattenConversationId] = React.useState<string | null>(null);
const composerTextAreaRef = React.useRef<HTMLTextAreaElement>(null);
// external state
const { activeConversationId, isConversationEmpty, hasAnyContent, newConversation, duplicateConversation, deleteAllConversations, setMessages, systemPurposeId, setAutoTitle } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
const isConversationEmpty = conversation ? !conversation.messages.length : true;
const hasAnyContent = state.conversations.length > 1 || !isConversationEmpty;
return {
activeConversationId: state.activeConversationId,
isConversationEmpty,
hasAnyContent,
newConversation: state.createConversationOrSwitch,
duplicateConversation: state.duplicateConversation,
deleteAllConversations: state.deleteAllConversations,
setMessages: state.setMessages,
systemPurposeId: conversation?.systemPurposeId ?? null,
setAutoTitle: state.setAutoTitle,
};
}, shallow);
const handleExecuteConversation = async (chatModeId: ChatModeId, conversationId: string, history: DMessage[]) => {
const { chatLLMId } = useModelsStore.getState();
if (!chatModeId || !conversationId || !chatLLMId) return;
// "/command ...": overrides the chat mode
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const [command, prompt] = [pieces[0].value, pieces[1].value];
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatLLMId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatLLMId);
}
if (CmdAddRoleMessage.includes(command)) {
lastMessage.role = command.startsWith('/s') ? 'system' : command.startsWith('/a') ? 'assistant' : 'user';
lastMessage.sender = 'Bot';
lastMessage.text = prompt;
return setMessages(conversationId, history);
}
}
}
// synchronous long-duration tasks, which update the state as they go
if (chatLLMId && systemPurposeId) {
switch (chatModeId) {
case 'immediate':
return await runAssistantUpdatingState(conversationId, history, chatLLMId, systemPurposeId);
case 'write-user':
return setMessages(conversationId, history);
case 'react':
if (!lastMessage?.text)
break;
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatLLMId);
case 'draw-imagine':
case 'draw-imagine-plus':
if (!lastMessage?.text)
break;
const imagePrompt = chatModeId == 'draw-imagine-plus'
? await imaginePromptFromText(lastMessage.text) || 'An error sign.'
: lastMessage.text;
setMessages(conversationId, history.map(message => message.id !== lastMessage.id ? message : {
...message,
text: `${CmdRunProdia[0]} ${imagePrompt}`,
}));
return await runImageGenerationUpdatingState(conversationId, imagePrompt);
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
console.log('handleExecuteConversation: issue running', chatModeId, conversationId, lastMessage);
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleExecuteChatHistory = async (conversationId: string, history: DMessage[]) =>
await handleExecuteConversation('immediate', conversationId, history);
const handleDiagramFromText = async (diagramConfig: DiagramConfig | null) => setDiagramConfig(diagramConfig);
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation('draw-imagine-plus', conversationId, [...conversation.messages, createDMessage('user', messageText)]);
};
const handleComposerNewMessage = async (chatModeId: ChatModeId, conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(chatModeId, conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleRegenerateAssistant = async () => {
const conversation = activeConversationId ? _findConversation(activeConversationId) : null;
if (conversation?.messages?.length) {
const lastMessage = conversation.messages[conversation.messages.length - 1];
if (lastMessage.role === 'assistant') {
const newMessages = [...conversation.messages];
newMessages.pop();
return await handleExecuteConversation('immediate', conversation.id, newMessages);
}
}
};
useGlobalShortcut('r', true, true, false, handleRegenerateAssistant);
const handleImportConversation = () => setTradeConfig({ dir: 'import' });
const handleExportConversation = (conversationId: string | null) => setTradeConfig({ dir: 'export', conversationId });
const handleFlattenConversation = (conversationId: string) => setFlattenConversationId(conversationId);
useGlobalShortcut('n', true, false, true, () => {
newConversation();
composerTextAreaRef.current?.focus();
});
const handleCloneConversation = (conversationId: string) => duplicateConversation(conversationId);
useGlobalShortcut('f', true, false, true, () => isConversationEmpty || activeConversationId && handleCloneConversation(activeConversationId));
const handleClearConversation = (conversationId: string) => setClearConfirmationId(conversationId);
useGlobalShortcut('x', true, false, true, () => isConversationEmpty || setClearConfirmationId(activeConversationId));
const handleConfirmedClearConversation = () => {
if (clearConfirmationId) {
setMessages(clearConfirmationId, []);
setAutoTitle(clearConfirmationId, '');
setClearConfirmationId(null);
}
};
const handleDeleteAllConversations = () => setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
const handleConfirmedDeleteConversation = () => {
if (deleteConfirmationId) {
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
deleteAllConversations();
}// else
// deleteConversation(deleteConfirmationId);
setDeleteConfirmationId(null);
}
};
useGlobalShortcut('d', true, false, true, () => isConversationEmpty || setDeleteConfirmationId(activeConversationId));
// Pluggable ApplicationBar components
const centerItems = React.useMemo(() =>
<ChatDropdowns conversationId={activeConversationId} />,
[activeConversationId],
);
const drawerItems = React.useMemo(() =>
<ChatDrawerItems
conversationId={activeConversationId}
onImportConversation={handleImportConversation}
onDeleteAllConversations={handleDeleteAllConversations}
/>,
[activeConversationId],
);
const menuItems = React.useMemo(() =>
<ChatMenuItems
conversationId={activeConversationId} isConversationEmpty={isConversationEmpty} hasConversations={hasAnyContent}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onClearConversation={handleClearConversation}
onDuplicateConversation={duplicateConversation}
onExportConversation={handleExportConversation}
onFlattenConversation={handleFlattenConversation}
/>,
[activeConversationId, duplicateConversation, hasAnyContent, isConversationEmpty, isMessageSelectionMode],
);
useLayoutPluggable(centerItems, drawerItems, menuItems);
return <>
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteChatHistory={handleExecuteChatHistory}
onDiagramFromText={handleDiagramFromText}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
backgroundColor: 'background.level1',
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
isDeveloperMode={systemPurposeId === 'Developer'}
composerTextAreaRef={composerTextAreaRef}
onNewMessage={handleComposerNewMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
backgroundColor: 'background.surface',
borderTop: `1px solid`,
borderTopColor: 'divider',
p: { xs: 1, md: 2 },
}} />
{/* Diagrams */}
{!!diagramConfig && <DiagramsModal config={diagramConfig} onClose={() => setDiagramConfig(null)} />}
{/* Flatten */}
{!!flattenConversationId && <FlattenerModal conversationId={flattenConversationId} onClose={() => setFlattenConversationId(null)} />}
{/* Import / Export */}
{!!tradeConfig && <TradeModal config={tradeConfig} onClose={() => setTradeConfig(null)} />}
{/* [confirmation] Reset Conversation */}
{!!clearConfirmationId && <ConfirmationModal
open onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
/>}
{/* [confirmation] Delete All */}
{!!deleteConfirmationId && <ConfirmationModal
open onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
: 'Are you sure you want to delete this conversation?'}
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Yes, delete all'
: 'Delete conversation'}
/>}
</>;
}
+193
View File
@@ -0,0 +1,193 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
import { CmdRunReact } from '@/modules/search/search.client';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { PublishedModal } from '@/modules/pastegg/PublishedModal';
import { callPublish } from '@/modules/pastegg/pastegg.client';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { Link } from '@/common/components/Link';
import { conversationToMarkdown } from '@/common/util/conversationToMarkdown';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { extractCommands } from '@/common/util/extractCommands';
import { useComposerStore } from '@/common/state/store-composer';
import { useSettingsStore } from '@/common/state/store-settings';
import { ApplicationBar } from './components/appbar/ApplicationBar';
import { ChatMessageList } from './components/ChatMessageList';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/ephemerals/Ephemerals';
import { imaginePromptFromText } from './util/ai-functions';
import { runAssistantUpdatingState } from './util/agi-immediate';
import { runImageGenerationUpdatingState } from './util/imagine';
import { runReActUpdatingState } from './util/agi-react';
export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
// state
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
const [publishResponse, setPublishResponse] = React.useState<PasteGG.API.Publish.Response | null>(null);
// external state
const theme = useTheme();
const { sendModeId } = useComposerStore(state => ({ sendModeId: state.sendModeId }), shallow);
const { activeConversationId, setMessages, chatModelId, systemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
return {
activeConversationId: state.activeConversationId,
setMessages: state.setMessages,
chatModelId: conversation?.chatModelId ?? null,
systemPurposeId: conversation?.systemPurposeId ?? null,
};
}, shallow);
const handleExecuteConversation = async (conversationId: string, history: DMessage[]) => {
if (!conversationId) return;
// Command - last user message is a cmd
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const command = pieces[0].value;
const prompt = pieces[1].value;
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatModelId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatModelId);
}
// if (CmdRunSearch.includes(command))
// return await run...
}
}
// synchronous long-duration tasks, which update the state as they go
if (sendModeId && chatModelId && systemPurposeId) {
switch (sendModeId) {
case 'immediate':
return await runAssistantUpdatingState(conversationId, history, chatModelId, systemPurposeId);
case 'react':
if (lastMessage?.text) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatModelId);
}
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleSendUserMessage = async (conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation && chatModelId) {
const prompt = await imaginePromptFromText(messageText, chatModelId);
if (prompt)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
}
};
const handlePublishConversation = (conversationId: string) => setPublishConversationId(conversationId);
const handleConfirmedPublishConversation = async () => {
if (publishConversationId) {
const conversation = _findConversation(publishConversationId);
setPublishConversationId(null);
if (conversation) {
const markdownContent = conversationToMarkdown(conversation, !useSettingsStore.getState().showSystemMessages);
const publishResponse = await callPublish('paste.gg', markdownContent);
setPublishResponse(publishResponse);
}
}
};
return (
<Box
sx={{
display: 'flex', flexDirection: 'column', height: '100vh',
...(props.sx || {}),
}}>
<ApplicationBar
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onPublishConversation={handlePublishConversation}
onShowSettings={props.onShowSettings}
sx={{
zIndex: 20, // position: 'sticky', top: 0,
// ...(process.env.NODE_ENV === 'development' ? { background: theme.vars.palette.danger.solidBg } : {}),
}} />
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteConversation={handleExecuteConversation}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
background: theme.vars.palette.background.level2,
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
isDeveloperMode={systemPurposeId === 'Developer'}
onSendMessage={handleSendUserMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
background: theme.vars.palette.background.surface,
borderTop: `1px solid ${theme.vars.palette.divider}`,
p: { xs: 1, md: 2 },
}} />
{/* Confirmation for Publishing */}
<ConfirmationModal
open={!!publishConversationId} onClose={() => setPublishConversationId(null)} onPositive={handleConfirmedPublishConversation}
confirmationText={<>
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
Are you sure you want to proceed?
</>} positiveActionText={'Understood, upload to paste.gg'}
/>
{/* Show the Published details */}
{!!publishResponse && (
<PublishedModal open onClose={() => setPublishResponse(null)} response={publishResponse} />
)}
</Box>
);
}
+72 -139
View File
@@ -4,156 +4,100 @@ import { shallow } from 'zustand/shallow';
import { Box, List } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
import { useChatLLM } from '~/modules/llms/store-llms';
import { GlobalShortcut, useGlobalShortcut } from '~/common/components/useGlobalShortcut';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { openLayoutPreferences } from '~/common/layout/store-applayout';
import { useCapabilityElevenLabs, useCapabilityProdia } from '~/common/components/useCapabilities';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { ChatMessage } from './message/ChatMessage';
import { CleanerMessage, MessagesSelectionHeader } from './message/CleanerMessage';
import { PersonaSelector } from './persona-selector/PersonaSelector';
import { useChatShowSystemMessages } from '../store-app-chat';
import { ChatMessageSelectable, MessagesSelectionHeader } from './message/ChatMessageSelectable';
import { PurposeSelector } from './PurposeSelector';
/**
* A list of ChatMessages
*/
export function ChatMessageList(props: {
conversationId: string | null,
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
onExecuteChatHistory: (conversationId: string, history: DMessage[]) => void,
onDiagramFromText: (diagramConfig: DiagramConfig | null) => Promise<any>,
onImagineFromText: (conversationId: string, selectedText: string) => Promise<any>,
sx?: SxProps
}) {
export function ChatMessageList(props: { conversationId: string | null, isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void, onExecuteConversation: (conversationId: string, history: DMessage[]) => void, onImagineFromText: (conversationId: string, userText: string) => void, sx?: SxProps }) {
// state
const [isImagining, setIsImagining] = React.useState(false);
const [isSpeaking, setIsSpeaking] = React.useState(false);
const [selectedMessages, setSelectedMessages] = React.useState<Set<string>>(new Set());
// external state
const [showSystemMessages] = useChatShowSystemMessages();
const { messages, editMessage, deleteMessage, historyTokenCount } = useChatStore(state => {
const showSystemMessages = useSettingsStore(state => state.showSystemMessages);
const { editMessage, deleteMessage } = useChatStore(state => ({ editMessage: state.editMessage, deleteMessage: state.deleteMessage }), shallow);
const messages = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
messages: conversation ? conversation.messages : [],
editMessage: state.editMessage, deleteMessage: state.deleteMessage,
historyTokenCount: conversation ? conversation.tokenCount : 0,
};
return conversation ? conversation.messages : [];
}, shallow);
const { chatLLM } = useChatLLM();
const { mayWork: isImaginable } = useCapabilityProdia();
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
// text actions
const handleAppendMessage = (text: string) =>
props.conversationId && props.onExecuteChatHistory(props.conversationId, [...messages, createDMessage('user', text)]);
const handleTextDiagram = async (messageId: string, text: string) => {
if (props.conversationId) {
await props.onDiagramFromText({ conversationId: props.conversationId, messageId, text });
} else
return Promise.reject('No conversation');
};
const handleTextImagine = async (text: string) => {
if (!isImaginable) {
openLayoutPreferences(2);
} else if (props.conversationId) {
setIsImagining(true);
await props.onImagineFromText(props.conversationId, text);
setIsImagining(false);
} else
return Promise.reject('No conversation');
};
const handleTextSpeak = async (text: string) => {
if (!isSpeakable) {
openLayoutPreferences(3);
} else {
setIsSpeaking(true);
await speakText(text);
setIsSpeaking(false);
}
};
// message menu methods proxy
const handleMessageDelete = (messageId: string) =>
props.conversationId && deleteMessage(props.conversationId, messageId);
const handleMessageEdit = (messageId: string, newText: string) =>
props.conversationId && editMessage(props.conversationId, messageId, { text: newText }, true);
const handleMessageRestartFrom = (messageId: string, offset: number) => {
const handleImagineFromText = (messageText: string) =>
props.conversationId && props.onImagineFromText(props.conversationId, messageText);
const handleRestartFromMessage = (messageId: string, offset: number) => {
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + offset + 1);
props.conversationId && props.onExecuteChatHistory(props.conversationId, truncatedHistory);
props.conversationId && props.onExecuteConversation(props.conversationId, truncatedHistory);
};
// operate on the local selection set
const handleSelectAll = (selected: boolean) => {
const newSelected = new Set<string>();
if (selected)
for (const message of messages)
newSelected.add(message.id);
setSelectedMessages(newSelected);
};
const handleSelectMessage = (messageId: string, selected: boolean) => {
const newSelected = new Set(selectedMessages);
selected ? newSelected.add(messageId) : newSelected.delete(messageId);
setSelectedMessages(newSelected);
};
const handleSelectionDelete = () => {
if (props.conversationId)
for (const selectedMessage of selectedMessages)
deleteMessage(props.conversationId, selectedMessage);
setSelectedMessages(new Set());
};
useGlobalShortcut(props.isMessageSelectionMode && GlobalShortcut.Esc, false, false, false, () => {
props.setIsMessageSelectionMode(false);
});
const handleRunExample = (text: string) =>
props.conversationId && props.onExecuteConversation(props.conversationId, [...messages, createDMessage('user', text)]);
// text-diff functionality, find the messages to diff with
const { diffMessage, diffText } = React.useMemo(() => {
const [msgB, msgA] = messages.filter(m => m.role === 'assistant').reverse();
if (msgB?.text && msgA?.text && !msgB?.typing) {
const textA = msgA.text, textB = msgB.text;
const lenA = textA.length, lenB = textB.length;
if (lenA > 80 && lenB > 80 && lenA > lenB / 3 && lenB > lenA / 3)
return { diffMessage: msgB, diffText: textA };
}
return { diffMessage: undefined, diffText: undefined };
}, [messages]);
// no content: show the persona selector
const filteredMessages = messages
.filter(m => m.role !== 'system' || showSystemMessages) // hide the System message if the user choses to
.reverse(); // 'reverse' is because flexDirection: 'column-reverse' to auto-snap-to-bottom
// hide system messages if the user chooses so
// NOTE: reverse is because we'll use flexDirection: 'column-reverse' to auto-snap-to-bottom
const filteredMessages = messages.filter(m => m.role !== 'system' || showSystemMessages).reverse();
// when there are no messages, show the purpose selector
if (!filteredMessages.length)
return props.conversationId ? (
<Box sx={props.sx || {}}>
<PersonaSelector conversationId={props.conversationId} runExample={handleAppendMessage} />
<PurposeSelector conversationId={props.conversationId} runExample={handleRunExample} />
</Box>
) : null;
const handleToggleSelected = (messageId: string, selected: boolean) => {
const newSelected = new Set(selectedMessages);
selected ? newSelected.add(messageId) : newSelected.delete(messageId);
setSelectedMessages(newSelected);
};
const handleSelectAllMessages = (selected: boolean) => {
const newSelected = new Set<string>();
if (selected)
for (let message of messages)
newSelected.add(message.id);
setSelectedMessages(newSelected);
};
const handleDeleteSelectedMessages = () => {
if (props.conversationId)
for (let selectedMessage of selectedMessages)
deleteMessage(props.conversationId, selectedMessage);
setSelectedMessages(new Set());
};
// scrollbar style
// const scrollbarStyle: SxProps = {
// '&::-webkit-scrollbar': {
// md: {
// width: 8,
// background: theme.vars.palette.neutral.plainHoverBg,
// },
// },
// '&::-webkit-scrollbar-thumb': {
// background: theme.vars.palette.neutral.solidBg,
// borderRadius: 6,
// },
// '&::-webkit-scrollbar-thumb:hover': {
// background: theme.vars.palette.neutral.solidHoverBg,
// },
// };
return (
<List sx={{
p: 0, ...(props.sx || {}),
@@ -165,29 +109,19 @@ export function ChatMessageList(props: {
{filteredMessages.map((message, idx) =>
props.isMessageSelectionMode ? (
<CleanerMessage
key={'sel-' + message.id}
message={message}
isBottom={idx === 0} remainingTokens={(chatLLM ? chatLLM.contextTokens : 0) - historyTokenCount}
selected={selectedMessages.has(message.id)} onToggleSelected={handleSelectMessage}
/>
) : (
<ChatMessage
key={'msg-' + message.id}
message={message}
diffPreviousText={message === diffMessage ? diffText : undefined}
<ChatMessageSelectable
key={'sel-' + message.id} message={message}
isBottom={idx === 0}
selected={selectedMessages.has(message.id)} onToggleSelected={handleToggleSelected}
/>
) : (
<ChatMessage
key={'msg-' + message.id} message={message}
isBottom={idx === 0}
isImagining={isImagining} isSpeaking={isSpeaking}
onMessageDelete={() => handleMessageDelete(message.id)}
onMessageEdit={newText => handleMessageEdit(message.id, newText)}
onMessageRunFrom={(offset: number) => handleMessageRestartFrom(message.id, offset)}
onTextDiagram={(text: string) => handleTextDiagram(message.id, text)}
onTextImagine={handleTextImagine} onTextSpeak={handleTextSpeak}
/>
onMessageRunFrom={(offset: number) => handleRestartFromMessage(message.id, offset)}
onImagine={handleImagineFromText} />
),
)}
@@ -196,10 +130,9 @@ export function ChatMessageList(props: {
<MessagesSelectionHeader
hasSelected={selectedMessages.size > 0}
isBottom={filteredMessages.length === 0}
sumTokens={historyTokenCount}
onClose={() => props.setIsMessageSelectionMode(false)}
onSelectAll={handleSelectAll}
onDeleteMessages={handleSelectionDelete}
onSelectAll={handleSelectAllMessages}
onDeleteMessages={handleDeleteSelectedMessages}
/>
)}
@@ -1,18 +1,14 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, Button, Checkbox, Grid, IconButton, Input, Stack, Textarea, Typography } from '@mui/joy';
import { Box, Button, Checkbox, Grid, IconButton, Input, Stack, Textarea, Typography, useTheme } from '@mui/joy';
import ClearIcon from '@mui/icons-material/Clear';
import ScienceIcon from '@mui/icons-material/Science';
import SearchIcon from '@mui/icons-material/Search';
import TelegramIcon from '@mui/icons-material/Telegram';
import { Link } from '~/common/components/Link';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { SystemPurposeId, SystemPurposes } from '../../../../data';
import { usePurposeStore } from './store-purposes';
import { SystemPurposeId, SystemPurposes } from '../../../data';
import { useChatStore } from '@/common/state/store-chats';
import { usePurposeStore } from '@/common/state/store-purposes';
import { useSettingsStore } from '@/common/state/store-settings';
// Constants for tile sizes / grid width - breakpoints need to be computed here to work around
@@ -27,25 +23,26 @@ const bpMaxWidth = Object.entries(bpTileSize).reduce((acc, [key, value], index)
acc[key] = tileCols[index] * (value + 8 * tileSpacing) - 8 * tileSpacing;
return acc;
}, {} as Record<string, number>);
const bpTileGap = { xs: 0.5, md: 1 };
const bpTileGap = { xs: 2, md: 3 };
// Add this utility function to get a random array element
const getRandomElement = <T, >(array: T[]): T | undefined =>
const getRandomElement = <T extends any>(array: T[]): T | undefined =>
array.length > 0 ? array[Math.floor(Math.random() * array.length)] : undefined;
/**
* Purpose selector for the current chat. Clicking on any item activates it for the current chat.
*/
export function PersonaSelector(props: { conversationId: string, runExample: (example: string) => void }) {
export function PurposeSelector(props: { conversationId: string, runExample: (example: string) => void }) {
// state
const [searchQuery, setSearchQuery] = React.useState('');
const [filteredIDs, setFilteredIDs] = React.useState<SystemPurposeId[] | null>(null);
const [editMode, setEditMode] = React.useState(false);
// external state
const showFinder = useUIPreferencesStore(state => state.showPurposeFinder);
const theme = useTheme();
const showPurposeFinder = useSettingsStore(state => state.showPurposeFinder);
const { systemPurposeId, setSystemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
@@ -108,7 +105,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
// we show them all if the filter is clear (null)
const unfilteredPurposeIDs = (filteredIDs && showFinder) ? filteredIDs : Object.keys(SystemPurposes);
const unfilteredPurposeIDs = (filteredIDs && showPurposeFinder) ? filteredIDs : Object.keys(SystemPurposes);
const purposeIDs = editMode ? unfilteredPurposeIDs : unfilteredPurposeIDs.filter(id => !hiddenPurposeIDs.includes(id));
const selectedPurpose = purposeIDs.length ? (SystemPurposes[systemPurposeId] ?? null) : null;
@@ -116,7 +113,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
return <>
{showFinder && <Box sx={{ p: 2 * tileSpacing }}>
{showPurposeFinder && <Box sx={{ p: 2 * tileSpacing }}>
<Input
fullWidth
variant='outlined' color='neutral'
@@ -130,7 +127,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
</IconButton>
)}
sx={{
boxShadow: 'sm',
boxShadow: theme.vars.shadow.sm,
}}
/>
</Box>}
@@ -140,8 +137,8 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
<Box sx={{ maxWidth: bpMaxWidth }}>
<Box sx={{ display: 'flex', flexDirection: 'row', alignItems: 'baseline', justifyContent: 'space-between', gap: 2, mb: 1 }}>
<Typography level='title-sm'>
AI Persona
<Typography level='body2' color='neutral'>
Select an AI purpose
</Typography>
<Button variant='plain' color='neutral' size='sm' onClick={toggleEditMode}>
{editMode ? 'Done' : 'Edit'}
@@ -162,14 +159,14 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
height: bpTileSize,
width: bpTileSize,
...((editMode || systemPurposeId !== spId) ? {
boxShadow: 'md',
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { backgroundColor: 'background.surface' }),
boxShadow: theme.vars.shadow.md,
...(SystemPurposes[spId as SystemPurposeId]?.highlighted ? {} : { background: theme.vars.palette.background.level1 }),
} : {}),
}}
>
{editMode && (
<Checkbox
label={<Typography level='body-sm'>show</Typography>}
label={<Typography level='body2'>show</Typography>}
checked={!hiddenPurposeIDs.includes(spId)} onChange={() => toggleHiddenPurposeId(spId)}
sx={{ alignSelf: 'flex-start' }}
/>
@@ -183,35 +180,10 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
</Button>
</Grid>
))}
{/* Button to start the YouTube persona creator */}
<Grid>
<Button
variant='soft' color='neutral'
component={Link} noLinkStyle href='/personas'
sx={{
'--Icon-fontSize': '2rem',
flexDirection: 'column',
fontWeight: 500,
// gap: bpTileGap,
height: bpTileSize,
width: bpTileSize,
border: `1px dashed`,
boxShadow: 'md',
backgroundColor: 'background.surface',
}}
>
<div>
<ScienceIcon />
</div>
<div>
YouTube persona creator
</div>
</Button>
</Grid>
</Grid>
<Typography
level='body-sm'
level='body2'
sx={{
mt: selectedExample ? 1 : 3,
display: 'flex', alignItems: 'center', gap: 1,
@@ -219,16 +191,16 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
'&:hover > button': { opacity: 1 },
}}>
{!selectedPurpose
? 'Oops! No AI persona found for your search.'
? 'Oops! No AI purposes found for your search.'
: (selectedExample
? <>
Example: {selectedExample}
<i>{selectedExample}</i>
<IconButton
variant='plain' color='primary' size='md'
variant='plain' color='neutral' size='md'
onClick={() => props.runExample(selectedExample)}
sx={{ opacity: 0, transition: 'opacity 0.3s' }}
>
<TelegramIcon />
💬
</IconButton>
</>
: selectedPurpose.description
@@ -241,10 +213,7 @@ export function PersonaSelector(props: { conversationId: string, runExample: (ex
minRows={3}
defaultValue={SystemPurposes['Custom']?.systemMessage} onChange={handleCustomSystemMessageChange}
sx={{
backgroundColor: 'background.level1',
'&:focus-within': {
backgroundColor: 'background.popup',
},
background: theme.vars.palette.background.level1,
lineHeight: 1.75,
mt: 1,
}} />
@@ -0,0 +1,47 @@
import * as React from 'react';
import { Option, Select } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
/**
* A Select component that blends-in nicely (cleaner, easier to the eyes)
*/
export const AppBarDropdown = <TValue extends string>(props: { value: TValue, items: Record<string, { title: string }>, onChange: (event: any, value: TValue | null) => void, sx?: SxProps }) =>
<Select
variant='solid' color='neutral' size='md'
value={props.value} onChange={props.onChange}
indicator={<KeyboardArrowDownIcon />}
slotProps={{
root: {
sx: {
backgroundColor: 'transparent',
},
},
listbox: {
variant: 'plain', color: 'neutral', size: 'lg',
disablePortal: false,
sx: {
minWidth: 160,
},
},
indicator: {
sx: {
opacity: 0.5,
},
},
}}
sx={{
mx: 0,
/*fontFamily: theme.vars.fontFamily.code,*/
fontWeight: 500,
...(props.sx || {}),
}}
>
{Object.keys(props.items).map((key: string) => (
<Option key={key} value={key}>
{props.items[key].title}
</Option>
))}
</Select>;
@@ -0,0 +1,31 @@
import * as React from 'react';
import { AppBarDropdown } from './AppBarDropdown';
import { SxProps } from '@mui/joy/styles/types';
/**
* Wrapper for AppBarDropdown that adds a symbol in front of the title
*/
type Props<TValue extends string> = {
value: TValue;
items: Record<string, { title: string, symbol: string }>;
onChange: (event: any, value: TValue | null) => void;
sx?: SxProps;
};
export const AppBarDropdownWithSymbol = <TValue extends string>({ value, items, onChange, sx }: Props<TValue>) => {
const itemsWithSymbol = Object.keys(items).map((key: string) => ({
key,
value: (!!items[key].symbol ? items[key].symbol + ' ' : '') + items[key].title,
}));
return (
<AppBarDropdown
value={value}
items={Object.fromEntries(itemsWithSymbol.map(({ key, value }) => [key, { title: value }]))}
onChange={onChange}
sx={sx}
/>
);
};

Some files were not shown because too many files have changed in this diff Show More