Compare commits

..

4 Commits

Author SHA1 Message Date
Enrico Ros 5c44804d50 Desktop: innovate 2024-07-08 15:48:26 -07:00
Enrico Ros 8589376c66 Desktop: improve looks 2024-07-08 15:33:31 -07:00
Enrico Ros d53a8b4941 Desktop: move files around 2024-07-08 15:08:16 -07:00
Enrico Ros af819da623 Desktop: electron build 2024-07-08 15:02:02 -07:00
661 changed files with 24398 additions and 44957 deletions
@@ -51,7 +51,7 @@ To familiarize yourself with the application, the following are the Website and
```
- paste the URL: https://big-agi.com
- drag & drop: [README.md](https://raw.githubusercontent.com/enricoros/big-AGI/v2-dev/README.md)
- drag & drop: [README.md](https://raw.githubusercontent.com/enricoros/big-AGI/main/README.md)
```markdown
I am announcing a new version, 1.2.3.
+9 -25
View File
@@ -12,9 +12,8 @@ name: Create and publish Docker images
on:
push:
branches:
- v2-dev
#- v1-dev # Disabled because this is not needed anymore
#- v1-stable # Disabled as the v* tag is used for stable releases
- main
#- main-stable # Disabled as the v* tag is used for stable releases
tags:
- 'v*' # Trigger on version tags (e.g., v1.7.0)
@@ -28,13 +27,10 @@ jobs:
permissions:
contents: read
packages: write
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -43,7 +39,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
@@ -51,23 +47,18 @@ jobs:
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=raw,value=development,enable=${{ github.ref == 'refs/heads/v2-dev' }} # For v2-dev branch
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/v1-stable' }}
type=raw,value=development,enable=${{ github.ref == 'refs/heads/main' }}
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/main-stable' }}
type=ref,event=tag # Use the tag name as a tag for tag builds
type=semver,pattern={{version}} # Generate semantic versioning tags for tag builds
type=sha,format=short,prefix=sha- # Just in case none of the above applies
labels: |
org.opencontainers.image.title=Big-AGI
org.opencontainers.image.description=Generative AI suite powered by state-of-the-art models
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.documentation=https://big-agi.com
type=sha # Just in case none of the above applies
- name: Build and push Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
file: Dockerfile
@@ -75,11 +66,4 @@ jobs:
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
# Enable build cache (future)
#cache-from: type=gha
#cache-to: type=gha,mode=max
# Enable provenance and SBOM (future)
#provenance: true
#sbom: true
build-args: NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
+1 -12
View File
@@ -3,10 +3,6 @@
# Frontend Build: ignore API files disabled for this build
/app/**/*.backup
# Supabase - ignored for now
/supabase/
/*.sql
# dependencies
/node_modules
/.pnp
@@ -45,11 +41,4 @@ yarn-error.log*
next-env.d.ts
# other
.idea/
# Ingore k8s/env-secret.yaml
./k8s/env-secret.yaml
/certificates
.env*.local
/.run/dev (ENV).run.xml
/src/modules/3rdparty/aider/scratch*
.idea/
+3 -5
View File
@@ -1,7 +1,8 @@
# Base
FROM node:22-alpine AS base
FROM node:18-alpine AS base
ENV NEXT_TELEMETRY_DISABLED 1
# Dependencies
FROM base AS deps
WORKDIR /app
@@ -10,9 +11,6 @@ WORKDIR /app
COPY package*.json ./
COPY src/server/prisma ./src/server/prisma
# link ssl3 for latest Alpine
RUN sh -c '[ ! -e /lib/libssl.so.3 ] && ln -s /usr/lib/libssl.so.3 /lib/libssl.so.3 || echo "Link already exists"'
# Install dependencies, including dev (release builds should use npm ci)
ENV NODE_ENV development
RUN npm ci
@@ -63,4 +61,4 @@ USER nextjs
EXPOSE 3000
# Start the application
CMD ["next", "start"]
CMD ["next", "start"]
+7 -31
View File
@@ -11,37 +11,20 @@ Stay ahead of the curve with big-AGI. 🚀 Pros & Devs love big-AGI. 🤖
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=launch)](https://big-agi.com)
> 🚀 Big-AGI 2 is launching Q4 2024. Be the first to experience it before the public release.
>
> 👉 [Apply for Early Access](https://y2rjg0zillz.typeform.com/to/ZSADpr5u?utm_source=gh-2&utm_medium=readme&utm_campaign=ea2)
Or fork & run on Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-AGI)
### New Version
## 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [installation](docs/installation.md) 👉 [documentation](docs/README.md)
This repository contains two main versions:
> Note: bigger better features (incl. Beam-2) are being cooked outside of `main`.
- Big-AGI 2: next-generation, bringing the most advanced AI experience
- `v2-dev`: V2 development branch, the exciting one, future default
- Big-AGI Stable: as deployed on big-agi.com
- `v1-dev`: V1 development branch (this branch)
- `v1-stable`: Current stable version
[//]: # (big-AGI is an open book; see the **[ready-to-ship and future ideas](https://github.com/users/enricoros/projects/4/views/2)** in our open roadmap)
Note: After the V2 release in Q4, `v2-dev` will become the default branch and `v1-dev` will reach EOL.
### What's New in 1.16.1...1.16.3 · Jun 20, 2024 (patch releases)
### Quick links: 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [installation](docs/installation.md) 👉 [documentation](docs/README.md)
### What's New in 1.16.1...1.16.8 · Sep 13, 2024 (patch releases)
- 1.16.8: OpenAI ChatGPT-4o Latest (o1-preview and o1-mini are supported in Big-AGI 2)
- 1.16.7: OpenAI support for GPT-4o 2024-08-06
- 1.16.6: Groq support for Llama 3.1 models
- 1.16.5: GPT-4o Mini support
- 1.16.4: 8192 tokens support for Claude 3.5 Sonnet
- 1.16.3: Anthropic Claude 3.5 Sonnet model support
- 1.16.2: Improve web downloads, as text, markdown, or HTML
- 1.16.2: Improve web downloads, as text, markdwon, or HTML
- 1.16.2: Proper support for Gemini models
- 1.16.2: Added the latest Mistral model
- 1.16.2: Tokenizer support for gpt-4o
@@ -160,7 +143,7 @@ You can easily configure 100s of AI models in big-AGI:
| **AI models** | _supported vendors_ |
|:--------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Opensource Servers | [LocalAI](https://localai.io/) (multimodal) · [Ollama](https://ollama.com/) |
| Opensource Servers | [LocalAI](https://localai.io/) (multimodal) · [Ollama](https://ollama.com/) · [Oobabooga](https://github.com/oobabooga/text-generation-webui) |
| Local Servers | [LM Studio](https://lmstudio.ai/) |
| Multimodal services | [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
| Language services | [Anthropic](https://anthropic.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) |
@@ -232,13 +215,6 @@ Or bring your API keys and jump straight into our free instance on [big-AGI.com]
[//]: # ([![License](https://img.shields.io/github/license/enricoros/big-agi)](https://github.com/enricoros/big-agi/LICENSE))
## 📜 Licensing
Big-AGI incorporates third-party software components that are subject
to separate license terms. For detailed information about these
components and their respective licenses, please refer to
the [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md).
---
2023-2024 · Enrico Ros x [Big-AGI](https://big-agi.com) · Like this project? Leave a star! 💫⭐
2023-2024 · Enrico Ros x [big-AGI](https://big-agi.com) · License: [MIT](LICENSE) · Made with 💙
-24
View File
@@ -1,24 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterCloud } from '~/server/trpc/trpc.router-cloud';
import { createTRPCFetchContext } from '~/server/trpc/trpc.server';
const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
endpoint: '/api/cloud',
router: appRouterCloud,
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-cloud failed on ${path ?? 'unk-path'}: ${error.message}`)
: undefined,
});
// NOTE: the following statement breaks the build on non-pro deployments, and conditionals don't work either
// so we resorted to raising the timeout from 10s to 25s in the vercel.json file instead
// export const maxDuration = 25;
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
-18
View File
@@ -1,18 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterEdge } from '~/server/trpc/trpc.router-edge';
import { createTRPCFetchContext } from '~/server/trpc/trpc.server';
const handlerEdgeRoutes = (req: Request) => fetchRequestHandler({
endpoint: '/api/edge',
router: appRouterEdge,
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
: undefined,
});
export const runtime = 'edge';
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
+2
View File
@@ -0,0 +1,2 @@
export const runtime = 'edge';
export { elevenLabsHandler as POST } from '~/modules/elevenlabs/elevenlabs.server';
+2
View File
@@ -0,0 +1,2 @@
export const runtime = 'edge';
export { llmStreamingRelayHandler as POST } from '~/modules/llms/server/llm.server.streaming';
+19
View File
@@ -0,0 +1,19 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterEdge } from '~/server/api/trpc.router-edge';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerEdgeRoutes = (req: Request) =>
fetchRequestHandler({
endpoint: '/api/trpc-edge',
router: appRouterEdge,
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
: undefined,
});
export const runtime = 'edge';
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
+25
View File
@@ -0,0 +1,25 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterNode } from '~/server/api/trpc.router-node';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerNodeRoutes = (req: Request) =>
fetchRequestHandler({
endpoint: '/api/trpc-node',
router: appRouterNode,
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? 'unk-path'}: ${error.message}`)
: undefined,
});
// NOTE: the following statement breaks the build on non-pro deployments, and conditionals don't work either
// so we resorted to raising the timeout from 10s to 25s in the vercel.json file instead
// export const maxDuration = 25;
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
-70
View File
@@ -1,70 +0,0 @@
# AIX dispatch server - API features comparison
This is updated as of 2024-07-09, and includes the latest features and capabilities of the three major AI APIs: Anthropic, Gemini, and OpenAI.
The comparison covers a wide range of features, including function calling, vision, system instructions, etc.
| Feature Category | Specific Feature | Anthropic | Gemini | OpenAI |
|------------------------------------------|-------------------------------|--------------------------------------------------------------------|------------------------------------------------------------------|---------------------------------------------------------------------|
| **Message Structure** |
| | Role types | user, assistant | user, model | user, assistant, system, tool |
| | Named participants | No | No | Yes |
| | Content array | Yes | Yes | Yes |
| **Content Types and Multimodal Support** |
| | Text generation | Yes | Yes | Yes |
| | Image understanding | Yes | Yes | Yes |
| | Audio processing | No | **Yes** | No |
| | Video processing | No | **Yes** | No |
| **Image Handling** |
| | Supported formats | JPEG, PNG, GIF, WebP | JPEG, PNG, WebP, HEIC, HEIF | PNG, JPEG, WebP, non-animated GIF |
| | Max image size | 5MB per image | (20MB per prompt) | 20MB per image |
| | Image detail level | N/A | N/A | **Low, high, auto** |
| | Image resolution | max: 1568x1568 | min: 768x768, max: 3072x3072 | min: 512x512, max: 2048 x 2048 |
| | Token calculation for images | (width * height)/750; max 1,600 | 258 tokens | 85 + 170 * {patches} |
| | Image retention | Deleted after processing | Not specified | Deleted after processing |
| **Audio and Video Handling** |
| | Audio formats | N/A | WAV, MP3, AIFF, AAC, OGG, FLAC | N/A |
| | Video formats | N/A | MP4, MPEG, MOV, AVI, MPG, WebM, WMV, 3GPP | N/A |
| **System Instructions and Tool Use** |
| | System instructions | Yes (array of text blocks) | Yes (parts array) | Yes (as system message) |
| **Function/Tool Handling** |
| | Parallel tool calls | No | No | **Yes** |
| | Tool Declaration | Defined in `tools` array | Defined in `tools` array | Defined in `tools` array |
| | FC name restrictions | Yes | Yes (max 63 chars) | Yes (max 64 chars) |
| | FC declaration | name, description, input_schema | name, description, parameters | name, description, parameters |
| | FC options structure | JSON Schema for input | Object with properties | JSON Schema for parameters |
| | FC Force invocation | Via `tool_choice` parameter | Via `toolConfig` parameter | Via `tool_choice` parameter |
| | FC Model invocation | Model generates a `tool_use` block with predicted parameters | Generates a `functionCall` part with predicted parameters | Generates a message.`tool_calls` item with predicted arguments |
| | FC Execution | Client-side | Client-side | Client-side |
| | FC Result injection | Client appends a `user` message with a `tool_result` content block | Client appends a `function` message with `functionResponse` part | Client sends a new `tool` message with `tool_call_id` and `content` |
| | Built-in Code execution | No | **Yes** | No |
| | Tool use with vision | Yes | Yes | Yes |
| **Generation Configuration** |
| | temperature | Yes | Yes | Yes |
| | max_tokens | Yes | Yes | Yes |
| | stop_sequences | Yes | Yes | Yes |
| | top_k | Yes | Yes | **No** |
| | top_p | Yes | Yes | Yes |
| | seed | No | No | **Yes** |
| | Multiple candidates | No | No | Yes (with 'n' parameter, breaks streaming?) |
| **Streaming and Response Structure** |
| | Streaming support | Yes | Yes | Yes |
| | Streaming initiation | stream=true | streamGenerateContent path | stream=true |
| | Streaming event types | **Multiple specific types** | Not specified | Single delta type |
| | Response container | content (array) | candidates (array) | choices (array) |
| **Usage Metrics and Error Handling** |
| | Token counts | Yes | Yes | Yes |
| | Detailed token breakdown | input, output | prompt, cached, candidates, total | prompt, completion, total |
| | Usage in stream | No | No | **Optional** |
| | Error handling in response | Not specified | Not specified | **Yes (undocumented)** |
| | Error handling in stream | Not specified | Not specified | **Yes (undocumented)** |
| **Advanced Features** |
| | JSON mode | **Partial (via structured prompts)** | **Yes (responseMimeType)** | **Yes** |
| | Output consistency techniques | **Yes (multiple methods)** | Not specified | Not specified |
| | Logprobs | No | No | **Yes (disabled in schema)** |
| | System fingerprint | No | No | **Yes** |
| | Semantic caching | No | **Yes** | No |
| | Assistant prefill | **Yes** | No | No |
| | Preferred formatting | **XML tags, JSON** | Not specified | Markdown |
| **Safety and Compliance** |
| | Safety settings in request | **Stop sequences** | **Detailed category-based** | **Moderation API** |
| | Safety feedback in response | Yes | Yes | Not specified |
+29 -32
View File
@@ -1,62 +1,59 @@
# Big-AGI Documentation
# big-AGI Documentation
Information you need to get started, configure, and use big-AGI productively.
Find all the information you need to get started, configure, and effectively use big-AGI.
## Getting Started
[//]: # (## Quick Start)
Guides for basic big-AGI features:
[//]: # (- **[Introduction](big-agi.md)**: Overview of big-AGI's features.)
- **[Enabling Microphone for Speech Recognition](help-feature-microphone.md)**: Instructions to
allow speech recognition in browsers and apps.
## Configuration Guides
## AI Model Configuration
Detailed guides to configure your big-AGI interface and models.
Detailed guides to configure AI models and advanced features in big-AGI.
👉 The following applies to the users of big-AGI.com, as the public instance is empty and to be configured by the user.
> 👉 The following applies to users of big-AGI.com, as the public instance is empty and requires user configuration.
- **Cloud AI Services**:
- **Cloud Model Services**:
- **[Azure OpenAI](config-azure-openai.md)**
- **[OpenRouter](config-openrouter.md)**
- Easy API key setup: **Anthropic**, **Deepseek**, **Google AI**, **Groq**, **Mistral**, **OpenAI**, **OpenPipe**, **Perplexity**, **TogetherAI**, **xAI**
- easy API key: **Anthropic**, **Google AI**, **Groq**, **Mistral**, **OpenAI**, **Perplexity**, **TogetherAI**
- **Local AI Integrations**:
- **Local Model Servers**:
- **[LocalAI](config-local-localai.md)**
- **[LM Studio](config-local-lmstudio.md)**
- **[Ollama](config-local-ollama.md)**
- **[Oobabooga](config-local-oobabooga.md)**
- **Enhanced AI Features**:
- **[Web Browsing](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud (advanced)
- **Web Search**: Google Search API (see '[Environment Variables](environment-variables.md)')
- **Image Generation**: DALL·E 3 and 2, or Prodia API for Stable Diffusion XL
- **Voice Synthesis**: ElevenLabs API for voice generation
- **Advanced Feature Configuration**:
- **[Browse](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud (advanced)
- **ElevenLabs API**: Voice and cutom voice generation, only requires their API key
- **Google Search API**: guide not yet available, see the Google options in '[Environment Variables](environment-variables.md)'
- **Prodia API**: Stable Diffusion XL image generation, only requires their API key, alternative to DALL·E
## Deployment & Customization
## Deployment
> 👉 The following applies to developers and experts who deploy their own big-AGI instance.
System integrators, administrators, whitelabelers: instead of using the public big-AGI instance on get.big-agi.com, you can deploy your own instance.
For deploying a custom big-AGI instance:
Step-by-step deployment and system configuration instructions.
- **[Installation Guide](installation.md)**: Set up your own big-AGI instance
- Source build or pre-built options
- Local, cloud, or on-premises deployment
- **[Installation](installation.md)**: Set up your own instance of big-AGI and related products
- build from source or use pre-built
- locally, in the public cloud, or on your own servers
- **Advanced Setup**:
- **[Source Code Customization Guide](customizations.md)**: Modify the source code
- **[Access Control](deploy-authentication.md)**: Optional, add basic user authentication
- **Advanced Customizations**:
- **[Source code alterations guide](customizations.md)**: source code primer and alterations guidelines
- **[Basic Authentication](deploy-authentication.md)**: Optional, adds a username and password wall
- **[Database Setup](deploy-database.md)**: Optional, enables "Chat Link Sharing"
- **[Reverse Proxy](deploy-reverse-proxy.md)**: Optional, enables custom domains and SSL
- **[Environment Variables](environment-variables.md)**: Pre-configures models and services
- **[Environment Variables](environment-variables.md)**: 📌 Pre-configures models and services
## Community & Support
## Support and Community
Connect with the growing big-AGI community:
Join our community or get support:
- Visit our [GitHub repository](https://github.com/enricoros/big-AGI) for source code and issue tracking
- Check the latest updates and features on [Changelog](changelog.md) or the in-app [News](https://get.big-agi.com/news)
- Connect with us and other users on [Discord](https://discord.gg/MkH4qj2Jp9) for discussions, help, and sharing your experiences with big-AGI
Thank you for choosing big-AGI. We're excited to give you the best tools to amplify yourself.
Thank you for choosing big-AGI. We're excited to see what you'll build.
+5 -9
View File
@@ -10,15 +10,10 @@ by release.
- milestone: [1.17.0](https://github.com/enricoros/big-agi/milestone/17)
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
### What's New in 1.16.1...1.16.8 · Sep 13, 2024 (patch releases)
### What's New in 1.16.1...1.16.3 · Jun 20, 2024 (patch releases)
- 1.16.8: OpenAI ChatGPT-4o Latest (o1-preview and o1-mini are supported in Big-AGI 2)
- 1.16.7: OpenAI support for GPT-4o 2024-08-06
- 1.16.6: Groq support for Llama 3.1 models
- 1.16.5: GPT-4o Mini support
- 1.16.4: 8192 tokens support for Claude 3.5 Sonnet
- 1.16.3: Anthropic Claude 3.5 Sonnet model support
- 1.16.2: Improve web downloads, as text, markdown, or HTML
- 1.16.2: Improve web downloads, as text, markdwon, or HTML
- 1.16.2: Proper support for Gemini models
- 1.16.2: Added the latest Mistral model
- 1.16.2: Tokenizer support for gpt-4o
@@ -138,7 +133,7 @@ https://github.com/enricoros/big-AGI/assets/1590910/a6b8e172-0726-4b03-a5e5-10cf
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
- Optimized Voice Input and Performance
- Latest Ollama models
- Latest Ollama and Oobabooga models
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
### What's New in 1.6.0 - Nov 28, 2023 · Surf's Up
@@ -170,7 +165,7 @@ For Developers:
first request to get the configuration. See
https://github.com/enricoros/big-agi/blob/main/src/modules/backend/backend.router.ts.
- CloudFlare developers: please change the deployment command to
`rm app/api/cloud/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`,
`rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`,
as we transitioned to the App router in NextJS 14. The documentation in
[docs/deploy-cloudflare.md](../docs/deploy-cloudflare.md) is updated
@@ -187,6 +182,7 @@ For Developers:
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
- **Anthropic models** support, e.g. Claude
- **Backup/Restore** - save chats, and restore them later
- **[Local model support with Oobabooga server](../docs/config-local-oobabooga)** - run your own LLMs!
- **Flatten conversations** - conversations summarizer with 4 modes
- **Fork conversations** - create a new chat, to try with different endings
- New commands: /s to add a System message, and /a for an Assistant message
+1 -1
View File
@@ -54,7 +54,7 @@ If the running LocalAI instance is configured with a [Model Gallery](https://loc
At the time of writing, LocalAI does not publish the model `context window size`.
Every model is assumed to be capable of chatting, and with a context window of 4096 tokens.
Please update the [src/modules/llms/transports/server/openai/models/models.data.ts](../src/modules/llms/server/openai/models/models.data.ts)
Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/server/openai/models.data.ts)
file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
# 🤝 Support
+1 -2
View File
@@ -81,8 +81,7 @@ Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and a
proxy_buffering off;
proxy_cache off;
# Longer timeouts (1hr)
keepalive_timeout 3600;
# Longer timeouts
proxy_read_timeout 3600;
proxy_connect_timeout 3600;
proxy_send_timeout 3600;
+61
View File
@@ -0,0 +1,61 @@
# Local LLM Integration with `text-web-ui` :llama:
Integrate local Large Language Models (LLMs) with
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
_Last updated on Dec 7, 2023_
### Components
The implementation of local LLMs involves the following components:
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
## Instructions
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
### Text-web-ui Installation & Configuration:
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation):
- Follow the instructions in the official page (basicall clone the repo and run a script) [~10 minutes]
- Stop the Web UI as we need to modify the startup flags to enable the OpenAI API
2. Enable the **openai extension**
- Edit `CMD_FLAGS.txt`
- Make sure that `--listen --api` is present and uncommented
3. Restart text-generation-webui
- Double-click on "start"
- You should see something like:
```
2023-12-07 21:51:21 INFO:Loading the extension "openai"...
2023-12-07 21:51:21 INFO:OpenAI-compatible API URL:
http://0.0.0.0:5000
...
INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)
Running on local URL: http://0.0.0.0:7860
```
- This shows that:
- The Web UI is running on port 7860: http://127.0.0.1:7860
- **The OpenAI API is running on port 5000: http://127.0.0.1:5000**
4. Load your first model
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
- Switch to the **Model** tab
- Download, for instance, `TheBloke/Llama-2-7B-Chat-GPTQ`
- Select the model once it's loaded
### Integrating text-web-ui with big-AGI:
1. Integrating Text-Generation-WebUI with big-AGI:
- Go to Models > Add a model source of type: **Oobabooga**
- Enter the address: `http://127.0.0.1:5000`
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
- Select model & Chat
![config-oobabooga-0.png](pixels/config-oobabooga-0.png)
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
-1
View File
@@ -61,7 +61,6 @@ Test your application thoroughly using local development (refer to README.md for
- [deploy-cloudflare.md](deploy-cloudflare.md): for Cloudflare Workers deployment
- [deploy-docker.md](deploy-docker.md): for Docker deployment instructions and examples
- [deploy-k8s.md](deploy-k8s.md): for Kubernetes deployment instructions and examples
## Debugging
+1 -1
View File
@@ -19,7 +19,7 @@ To enable it in `big-AGI`, you **must manually build the application**:
- Build `big-AGI` with HTTP authentication enabled:
- Clone the repository
- Rename `middleware_BASIC_AUTH.ts` to `middleware.ts`
- Build: usual simple build procedure (e.g. [Deploy manually](installation.md#Local-Production-build) or [Deploying with Docker](deploy-docker.md))
- Build: usual simple build procedure (e.g. [Deploy manually](../README.md#-deploy-manually) or [Deploying with Docker](deploy-docker.md))
- Configure the following [environment variables](environment-variables.md) before launching `big-AGI`:
```dotenv
+1 -1
View File
@@ -34,7 +34,7 @@ Fork the repository to your personal GitHub account.
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
3. Choose `Next.js` from the **Framework preset** dropdown menu
4. Set a custom **Build Command**:
- `rm app/api/cloud/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
- see the tradeoffs for this deletion on the notice at the top
5. Keep the **Build output directory** as default
6. Click the **Save and Deploy** button
-11
View File
@@ -59,17 +59,6 @@ To make local services running on your host machine accessible to a Docker conta
<br/>
### Reverse Proxy Configuration
A reverse proxy is a server that sits in front of big-AGI's container and can forwards web
requests to it. Often used to run multiple web applications, expose them to the internet,
increase security.
If you're deploying big-AGI behind a reverse proxy, you may want to see
our [Reverse Proxy Deployment Guide](deploy-reverse-proxy.md) for more information.
<br/>
### More Information
The [`Dockerfile`](../Dockerfile) describes how to create a Docker image. It establishes a Node.js environment,
-85
View File
@@ -1,85 +0,0 @@
# Deploy `big-AGI` with Kubernetes ☸️
In this tutorial, we will guide you through the process of deploying big-AGI
in a Kubernetes environment using the kubectl command-line tool.
## First Deployment
### Step 1: Clone the big-AGI repository
```bash
$ git clone https://github.com/enricoros/big-agi
$ cd ./big-agi/docs/k8s
```
### Step 2: Create the namespace
```bash
$ kubectl create namespace ns-big-agi
```
### Step 3: Fill in the key information into env-secret.yaml
All variables are optional. By default, Kubernetes Secret uses Base64 for
encode/decode, so please don't do a git commit after filling in the keys
to avoid leaking sensitive information.
We provide an empty `env-secret.yaml` file as a template.
You can fill in the necessary information using a text editor.
```bash
$ nano env-secret.yaml
```
### Step 4: Deploying Kubernetes Resources
```bash
$ kubectl apply -f big-agi-deployment.yaml -f env-secret.yaml
```
### Step 5: Verifying the Resource Statuses
```bash
$ kubectl -n ns-big-agi get svc,pod,deployment
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-big-agi ClusterIP 10.0.198.118 <none> 3000/TCP 63m
NAME READY STATUS RESTARTS AGE
pod/deployment-big-agi-xxxxxxxx-yyyyy 1/1 Running 0 39m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deployment-big-agi 1/1 1 1 63m
```
### Step 6: Testing the Service
You can test the service by port-forwarding the service to your local machine:
```bash
$ kubectl -n ns-big-agi port-forward service/svc-big-agi 3000
Forwarding from 127.0.0.1:3000 -> 3000
Forwarding from [::1]:3000 -> 3000
```
Now you can access the service at `http://localhost:3000`, and you should see the big-AGI homepage.
## Updating big-AGI
To update big-AGI to the latest version:
1. Pull the latest changes from the repository:
```bash
$ git pull origin main
```
2. Apply the updated deployment:
```bash
$ kubectl apply -f big-agi-deployment.yaml
```
This will trigger a rolling update of the deployment with the latest image.
**Note**: If you're deploying big-AGI behind a reverse proxy, you may need to configure
your proxy to support streaming. See our [Reverse Proxy Deployment Guide](deploy-reverse-proxy.md) for more information.
Note: For production use, consider setting up an Ingress Controller or Load Balancer instead of using port-forward.
-58
View File
@@ -1,58 +0,0 @@
# Advanced: Deploying big-AGI behind a Reverse Proxy
Note: if you don't have a reverse proxy set up, you can skip this guide.
If you're deploying big-AGI behind a reverse proxy, you may want to configure your proxy to support streaming output.
This guide provides instructions on how to configure your reverse proxy to support streaming output from big-AGI.
This is for advanced deployments, and you should have a basic understanding of how reverse proxies work.
## Nginx Configuration
If you're using Nginx as your reverse proxy, add the following configuration to your server block:
```nginx
server {
listen 80;
server_name your-domain.com;
location / {
# ...your specific proxy_pass configuration, example below...
proxy_pass http://localhost:3000; # Assuming big-AGI is running on port 3000
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# ...
# Important: Disable buffering for the streaming responses (SSE)
chunked_transfer_encoding on; # Turn on chunked transfer encoding
proxy_buffering off; # Turn off proxy buffering
proxy_cache off; # Turn off caching
tcp_nodelay on; # Turn on TCP NODELAY option, disable delay ACK algorithm
tcp_nopush on; # Turn on TCP NOPUSH option, disable Nagle algorithm
# Important: Longer timeouts (5 min)
keepalive_timeout 300;
proxy_connect_timeout 300;
proxy_read_timeout 300;
proxy_send_timeout 300;
}
}
```
This configuration disables caching and buffering, enables chunked transfer encoding, and adjusts TCP settings to optimize for streaming content.
## Troubleshooting
If you're experiencing issues with streaming not working, especially when deploying behind a reverse proxy,
ensure that your proxy is configured to support streaming output as described above.
## Additional Resources
- For Docker deployments, see our [Docker Deployment Guide](deploy-docker.md)
- For Kubernetes deployments, see our [Kubernetes Deployment Guide](deploy-k8s.md)
- For general installation instructions, see our [Installation Guide](installation.md)
If you continue to experience issues, please reach out to our [community support channels](../README.md#-get-involved).
+29 -34
View File
@@ -27,41 +27,38 @@ AZURE_OPENAI_API_ENDPOINT=
AZURE_OPENAI_API_KEY=
ANTHROPIC_API_KEY=
ANTHROPIC_API_HOST=
DEEPSEEK_API_KEY=
GEMINI_API_KEY=
GROQ_API_KEY=
LOCALAI_API_HOST=
LOCALAI_API_KEY=
MISTRAL_API_KEY=
OLLAMA_API_HOST=
OPENPIPE_API_KEY=
OPENROUTER_API_KEY=
PERPLEXITY_API_KEY=
TOGETHERAI_API_KEY=
XAI_API_KEY=
# Model Observability: Helicone
HELICONE_API_KEY=
# Browse
PUPPETEER_WSS_ENDPOINT=
# Search
GOOGLE_CLOUD_API_KEY=
GOOGLE_CSE_ID=
# Text-To-Speech: ElevenLabs
# Text-To-Speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# Text-To-Image: Prodia
# Text-To-Image
PRODIA_API_KEY=
# Google Custom Search
GOOGLE_CLOUD_API_KEY=
GOOGLE_CSE_ID=
# Browse
PUPPETEER_WSS_ENDPOINT=
# Backend Analytics
BACKEND_ANALYTICS=
# Backend HTTP Basic Authentication (see `deploy-authentication.md` for turning on authentication)
HTTP_BASIC_AUTH_USERNAME=
HTTP_BASIC_AUTH_PASSWORD=
# Frontend variables
NEXT_PUBLIC_GA4_MEASUREMENT_ID=
NEXT_PUBLIC_PLANTUML_SERVER_URL=
@@ -83,27 +80,24 @@ For Database configuration see [deploy-database.md](deploy-database.md).
The following variables when set will enable the corresponding LLMs on the server-side, without
requiring the user to enter an API key
| Variable | Description | Required |
|-----------------------------|----------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as AWS Bedrock | Optional |
| `DEEPSEEK_API_KEY` | The API key for Deepseek AI | Optional |
| `GEMINI_API_KEY` | The API key for Google AI's Gemini | Optional |
| `GROQ_API_KEY` | The API key for Groq Cloud | Optional |
| `LOCALAI_API_HOST` | Sets the URL of the LocalAI server, or defaults to http://127.0.0.1:8080 | Optional |
| `LOCALAI_API_KEY` | The (Optional) API key for LocalAI | Optional |
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-local-ollama.md](config-local-ollama.md) | |
| `OPENPIPE_API_KEY` | The API key for OpenPipe | Optional |
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
| `PERPLEXITY_API_KEY` | The API key for Perplexity | Optional |
| `TOGETHERAI_API_KEY` | The API key for Together AI | Optional |
| `XAI_API_KEY` | The API key for xAI | Optional |
| Variable | Description | Required |
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
| `GEMINI_API_KEY` | The API key for Google AI's Gemini | Optional |
| `GROQ_API_KEY` | The API key for Groq Cloud | Optional |
| `LOCALAI_API_HOST` | Sets the URL of the LocalAI server, or defaults to http://127.0.0.1:8080 | Optional |
| `LOCALAI_API_KEY` | The (Optional) API key for LocalAI | Optional |
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-local-ollama.md](config-local-ollama) | |
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
| `PERPLEXITY_API_KEY` | The API key for Perplexity | Optional |
| `TOGETHERAI_API_KEY` | The API key for Together AI | Optional |
### LLM Observability: Helicone
@@ -135,6 +129,7 @@ Enable the app to Talk, Draw, and Google things up.
| **Browse** | |
| `PUPPETEER_WSS_ENDPOINT` | Puppeteer WebSocket endpoint - used for browsing (pade downloadeing), etc. |
| **Backend** | |
| `BACKEND_ANALYTICS` | Semicolon-separated list of analytics flags (see backend.analytics.ts). Flags: `domain` logs the responding domain. |
| `HTTP_BASIC_AUTH_USERNAME` | See the [Authentication](deploy-authentication.md) guide. Username for HTTP Basic Authentication. |
| `HTTP_BASIC_AUTH_PASSWORD` | Password for HTTP Basic Authentication. |
-42
View File
@@ -1,42 +0,0 @@
# Big-AGI Advanced Tips & Tricks
> 🚨 This file is not meant for publication, and it's just been created as a handbook with tips
> and tricks to make Big-AGI more efficient and productive. 🚨
Welcome to the advanced tips and tricks guide for Big-AGI. This document will help you make the most of the platform's existing features.
---
## Hidden Gems
- **Shift + Double-Click** on a chat message to **edit** it.
- **Shift + Trash Icon** to **delete** a chats and messages without confirmation.
- also applies elsewhere: delete Attachments, etc.
- **Shift + Click** on **New Chat** to create an incognito chat.
- Drag a big-AGI saved chat into Big-AGI to load (or attach) it.
## Not-so-obvious Shortcuts
- When sending a message:
- Enter is for newlines
- **Shift + Enter** to send the message.
- **Ctrl + Enter** to **Beam** the message.
- **Alt/Option + Enter** to send the message without an answer.
- When editing a message:
- **Ctrl + Enter** to **Save** the changes.
- **Shift + Ctrl + Enter** to **Save & Regenerate**.
- Scroll between messages:
- **Ctrl + Up/Down** to scroll between **messages** and/or **Beams**.
## Worth the Effort:
- [LiveFile](help-feature-livefile.md) works on **Chrome**: Pair and synchronize your documents and code blocks with files on your local system: refresh, save, update them.
## Best User Hacks:
-
---
Note: this document is just at the beginning. It's here so we can capture
the best tips over time.
-167
View File
@@ -1,167 +0,0 @@
# LiveFile: Synchronize Your Documents with Local Files
## Introduction
**LiveFile** is a powerful feature in big-AGI that allows you to **pair and synchronize
your documents and code blocks** with files on your local system.
This feature enables a **two-way connection between big-AGI and your local files on disk**,
saving you time and effort.
With LiveFile, you can:
- **Pair** documents and code blocks with local files.
- **Monitor** changes in local files and update content in big-AGI.
- **Refresh** chat attachments with the latest content.
- **Save** edits made in big-AGI back to your local files.
- **Store** AI-generated code and content.
---
## Requirements
- **Supported Browsers:**
- **Google Chrome** (desktop)
- **Microsoft Edge** (desktop)
- **Operating Systems:**
- **Desktop platforms only**
- **Note:** Mobile devices (iOS and Android) are **not supported** due to browser limitations.
- **File Types:**
- Designed for **text-based files** (e.g., `.txt`, `.md`, `.js`, `.py`).
- **Performance:**
- Can handle **dozens of files efficiently**.
- **Limitations:**
- **File Size Limit**:
- Supports text files up to **10 MB**.
- **Pairing Persistence:**
- LiveFile connections **do not persist across sessions**.
- After reloading the page, you will need to re-pair your files.
- **Saving Overwrites:**
- Saving changes in big-AGI will **overwrite the entire file**.
- Use external tools for version control or incremental backups.
---
## Enabling LiveFile
LiveFile can be enabled automatically or manually in your Big-AGI workflow.
### Automatic Pairing
When you:
- **Attach**, **drop**, or **paste** a file into a chat message,
LiveFile is **automatically enabled** for that attachment. This means you can start
monitoring and reloading changes without any additional setup.
### Manual Pairing
For existing attachments or code blocks that:
- **Do not have LiveFile enabled** (e.g., created on other devices),
- **Are AI-generated code snippets without an associated file**,
You can manually pair them with a local file.
#### Pairing Attachments
1. **Select the Attachment:**
- Click on the attachment in the chat to view it in the previewer.
2. **Initiate Pairing:**
- Click on **"Pair File"** (🔗).
- If you have open LiveFiles, they will be listed for easy selection.
- Alternatively, you can select a new file from your local system.
3. **Grant Permissions**
- When prompted, allow big-AGI to access the file.
#### Pairing Code Blocks
1. **Access Code Block Options:**
- Click on the code block to reveal the header with options.
2. **Initiate Pairing:**
- Click the **"Pair File"** button (🔗).
- Select from your open LiveFiles or choose a new file.
3. **Confirm Pairing:**
- Grant permission when prompted.
---
## Using LiveFile
### Monitoring Changes
- **Automatic Monitoring:**
- LiveFile watches for changes in your paired local files.
- If the file is modified outside of big-AGI, you'll be shown the changes in the LiveFile bar.
- There is also a **"Replace with File"** option to manually load the latest content and see the changes.
- **Refreshing Content:**
- Click **"Replace with File"** (🔄) to load the latest content from the paired file into big-AGI.
### Saving Edits Back to Paired Files
- **Editing Attachments or Code Blocks:**
- Modify the content directly within big-AGI.
- Attachments: Click on the attachment to open the previewer and click on "Edit" to make changes.
- Code Blocks: Select "Edit" on the chat message to update code blocks.
- **Saving Changes:**
- Click **"Save to File"** (💾) to overwrite the local file with your changes.
- **Note:** This action overwrites the entire file. Ensure this is what you want before proceeding.
---
## Best Practices
- **Monitor External Changes:**
- Refresh content in big-AGI if the local file has been modified outside the application.
- **Use a Version Control System:**
- For critical files, consider using Git or other version control systems to track and monitor changes, authorship, and history.
---
## Troubleshooting
- **LiveFile Options Not Visible:**
- Ensure you are using a **supported desktop browser**.
- Check that you have the latest version of big-AGI.
- **Permission Issues:**
- Confirm that you granted big-AGI permission to access your files.
- Check your browser's settings to ensure file access is allowed.
---
## Technical Details
LiveFile uses the [File System Access API](https://developer.mozilla.org/en-US/docs/Web/API/File_System_Access_API) to
interact with your local files securely. It leverages the [browser-fs-access](https://github.com/GoogleChromeLabs/browser-fs-access) library,
an open-source project by Google Chrome Labs, which provides an easy interface to the File System Access API with fallbacks for broader browser support.
- **Security:**
- Access to files requires explicit user permission.
- **Performance:**
- Designed to handle dozens of files efficiently (tested on hundreds).
- Works with the Big-AGI attachment system to recursively add directories.
- **Browser Support:**
- Fully supported on **Google Chrome** and **Microsoft Edge** desktop versions.
---
## Another Big-AGI First!
You can significantly boost your productivity and streamline your workflow within big-AGI
by understanding how to utilize LiveFile's features fully.
This Feature is in Beta as there are a few limitations and improvements to be made.
Join us in enjoying and enhancing this feature on [big-AGI.com](https://big-agi.com), or
[GitHub](https://github.com/enricoros/big-AGI) for support and [Discord](https://discord.gg/MkH4qj2Jp9)
to share the love.
-141
View File
@@ -1,141 +0,0 @@
# Enabling Microphone Access for Speech Recognition
This guide explains how to enable microphone access for speech recognition in various browsers and mobile devices.
Ensuring microphone access is essential for using voice features in applications like big-AGI.
## Desktop Browsers
### Google Chrome (All Platforms, recommended)
1. Open the website (e.g., big-AGI) in Chrome.
2. Click the **lock icon** in the address bar.
3. In the dropdown, find **"Microphone"**.
- Set it to **"Allow"**.
4. If "Microphone" isn't listed:
- Click on **"Site settings"**.
- Find **"Microphone"** in the permissions list.
- Change the setting to **"Allow"**.
5. **Refresh** the page.
### Safari (macOS)
**[Watch the video tutorial: How to enable Speech Recognition in Safari](https://vimeo.com/1010342201)**
If you're seeing a "Speech Recognition permission denied" error, follow these steps:
1. Open **System Settings**.
- Go to **Privacy & Security** > **Speech Recognition**.
- Enable Safari in the list of allowed applications.
- Quit & Open Safari.
2. Click **Safari** in the top menu bar.
- Select **Settings**.
- Go to the **Websites** tab.
- Select **Microphone** from the sidebar.
- Find big-AGI (or localhost for developers) in the list and set it to **Allow**.
- Close the Settings window.
3. **Refresh** the page.
This quick and simple fix should get essential voice input working in big-AGI on your Mac.
### Microsoft Edge (Windows)
1. Open the website in Edge.
2. Click the **lock icon** in the address bar.
3. Click **"Permissions for this site"**.
4. Find **"Microphone"**.
- Set it to **"Allow"**.
5. **Refresh** the page.
### Firefox (All Platforms)
> **Note:** The Speech Recognition API is **not supported** in Firefox. If you're using Firefox, please switch to a supported browser to use speech recognition
> features.
## Mobile Devices
### Android (Chrome)
1. Open the website in Chrome.
2. Tap the **lock icon** in the address bar.
3. Tap **"Permissions"**.
4. Find **"Microphone"**.
- Set it to **"Allow"**.
5. **Refresh** the page.
### iOS (Safari)
1. Open the **Settings** app on your device.
2. Scroll down and tap **"Safari"**.
3. Tap **"Microphone"**.
4. Ensure **"Ask"** or **"Allow"** is selected.
5. Return to Safari and open the website.
6. If prompted, allow microphone access.
7. **Refresh** the page.
### iOS (Chrome)
> **Note:** Chrome on iOS uses Safari's engine due to system limitations. Microphone permissions are managed through iOS settings.
1. Open the **Settings** app.
2. Scroll down and tap **"Chrome"**.
3. Ensure **"Microphone"** is toggled **on**.
4. Open Chrome and navigate to the website.
5. If prompted, allow microphone access.
6. **Refresh** the page.
## Troubleshooting
If you're still experiencing issues after enabling microphone access:
**Check System Permissions (macOS):**
- Open **System Settings**.
- Go to **"Privacy & Security"**.
- Select the **"Privacy"** tab.
- Click **"Microphone"** in the sidebar.
- Ensure your browser (e.g., Chrome, Safari) is checked.
- You may need to unlock the settings by clicking the lock icon at the bottom.
**Check Microphone Access (Windows):**
- Open **Settings**.
- Go to **"Privacy"** > **"Microphone"**.
- Ensure **"Allow apps to access your microphone"** is **on**.
- Scroll down and make sure your browser is allowed.
**Close Other Applications:**
- Close any applications that might be using the microphone.
**Restart the Browser:**
- Close all browser windows and reopen.
**Update Your Browser:**
- Ensure you're using the latest version.
**Check for Browser Extensions:**
- Disable extensions that might block access to the microphone.
For persistent issues, consult your browser's official support resources or contact big-AGI support.
## Technical Details
Big-AGI uses the [Web Speech API (SpeechRecognition)](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition)
to transcribe spoken words into text. This API provides real-time transcription with live previews and works on most
modern mobile and desktop browsers.
**Note on Browser Support:**
| Browser | Support Level | Notes |
|----------------|-----------------|------------------------------------------------------------------------|
| Google Chrome | ✅ Recommended | Fully supported on desktop and Android. Preferred for best experience. |
| Safari | ✅ Supported | Requires macOS/iOS 14 or later. |
| Microsoft Edge | ✅ Supported | Fully supported on desktop. |
| Firefox | ❌ Not Supported | SpeechRecognition API not available. |
**Recommendation:**
For the best experience with speech recognition features, we strongly recommend using Google Chrome.
Ensure your browser is up to date to benefit from the latest features and security updates.
-37
View File
@@ -99,43 +99,6 @@ or follow the steps below for a quick start.
```
Access your big-AGI instance at `http://localhost:3000`.
If you deploy big-AGI behind a reverse proxy, you may want to check out the [Reverse Proxy Configuration Guide](deploy-reverse-proxy.md).
### Kubernetes Deployment
Deploy big-AGI on a Kubernetes cluster for enhanced scalability and management. Follow these steps for a Kubernetes deployment:
1. Clone the big-AGI repository:
```bash
git clone https://github.com/enricoros/big-AGI.git
cd big-AGI
```
2. Configure the environment variables:
```bash
cp docs/k8s/env-secret.yaml env-secret.yaml
vim env-secret.yaml # Edit the file to set your environment variables
```
3. Apply the Kubernetes configurations:
```bash
kubectl create namespace ns-big-agi
kubectl apply -f docs/k8s/big-agi-deployment.yaml -f env-secret.yaml
```
4. Verify the deployment:
```bash
kubectl -n ns-big-agi get svc,pod,deployment
```
5. Access the big-AGI application:
```bash
kubectl -n ns-big-agi port-forward service/svc-big-agi 3000:3000
```
Your big-AGI instance is now accessible at `http://localhost:3000`.
For more detailed instructions on Kubernetes deployment, including updating and troubleshooting, refer to our [Kubernetes Deployment Guide](deploy-k8s.md).
### Midori AI Subsystem for Docker Deployment
Follow the instructions found on [Midori AI Subsystem Site](https://io.midori-ai.xyz/subsystem/manager/)
-52
View File
@@ -1,52 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ns-big-agi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: big-agi
name: deployment-big-agi
namespace: ns-big-agi
spec:
replicas: 1
selector:
matchLabels:
app: big-agi
strategy: {}
template:
metadata:
labels:
app: big-agi
spec:
containers:
- image: ghcr.io/enricoros/big-agi:latest
name: big-agi
ports:
- containerPort: 3000
args:
- next
- start
- -p
- "3000"
envFrom:
- secretRef:
name: env
---
apiVersion: v1
kind: Service
metadata:
labels:
app: big-agi
name: svc-big-agi
namespace: ns-big-agi
spec:
ports:
- name: "http"
port: 3000
targetPort: 3000
selector:
app: big-agi
-49
View File
@@ -1,49 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: env
namespace: ns-big-agi
type: Opaque
stringData:
# IMPORTANT: This file contains sensitive information. Do not commit changes to version control.
# All variables are optional. Fill in only the ones you need.
#
# For the latest information on all the environment variables, see /docs/environment-variables.md
#
# LLMs
OPENAI_API_KEY: ""
OPENAI_API_HOST: ""
OPENAI_API_ORG_ID: ""
AZURE_OPENAI_API_ENDPOINT: ""
AZURE_OPENAI_API_KEY: ""
ANTHROPIC_API_KEY: ""
ANTHROPIC_API_HOST: ""
DEEPSEEK_API_KEY: ""
GEMINI_API_KEY: ""
GROQ_API_KEY: ""
LOCALAI_API_HOST: ""
LOCALAI_API_KEY: ""
MISTRAL_API_KEY: ""
OLLAMA_API_HOST: ""
OPENPIPE_API_KEY: ""
OPENROUTER_API_KEY: ""
PERPLEXITY_API_KEY: ""
TOGETHERAI_API_KEY: ""
XAI_API_KEY: ""
# Browse
PUPPETEER_WSS_ENDPOINT: ""
# Search
GOOGLE_CLOUD_API_KEY: ""
GOOGLE_CSE_ID: ""
# Text-To-Speech: Eleven Labs
ELEVENLABS_API_KEY: ""
ELEVENLABS_API_HOST: ""
ELEVENLABS_VOICE_ID: ""
# Text-To-Image: Prodia
PRODIA_API_KEY: ""
-43
View File
@@ -1,43 +0,0 @@
# ReAct: question answering with Reasoning and Actions
## What is ReAct?
[ReAct](https://arxiv.org/abs/2210.03629) (Reason+Act) is a classis AI question-answering feature,
that combines reasoning with actions to provide informed answers.
Within Big-AGI, users can invoke ReAct to ask complex questions that require multiple steps to answer.
| Mode | Activation | Information Sources | Reasoning Visibility | When to Use |
|-------|-----------------------------------|------------------------------------------------------|------------------------------------|--------------------------------------------------|
| Chat | Just type and send | **Pre-trained knowledge only** | Only shows final response | Quick answers, general knowledge queries |
| ReAct | Type "/react" before the question | **Web loads, Web searches, Wikipedia, calculations** | Shows step-by-step thought process | Complex, multi-step, or research-based questions |
Example of ReAct in action, taking a question about current events, googling results, opening a page, and summarizing the information:
https://github.com/user-attachments/assets/c3480428-9ab8-4257-a869-2541bf44a062
The following tools are implemented in Big-AGI:
- **browse**: loads web pages (URLs) and extracts information, using a correctly configured `Tools > Browsing` API
- **search**: searches the web to produce page URLs, using a correctly configured `Tools > Google Search` ([Google Programmable Search Engine](https://programmablesearchengine.google.com/about/)) API
- **wikipedia**: looks up information on Wikipedia pages
- **calculate**: performs mathematical calculations by executing typescript code
- warning: (!) unsafe and dangerous, do not use for untrusted code/LLMs
## How to Use ReAct in Big-AGI
1. **Invoking ReAct**: Type "/react" followed by your question in the chat.
2. **What to Expect**:
- An ephemeral space will show the AI's thought process and actions, showing all the steps taken.
- The final answer will appear in the main chat.
3. **Available Actions**: Web searches, Wikipedia lookups, calculations, and optionally web browsing.
## Good to know:
- **ReAct operates in isolation** from the main chat history.
- It **will take longer than standard responses** due to multiple steps.
- Web searches and browsing may have privacy implications, and require **tool configuration** in the UI.
- Errors or limitations in accessing external resources may affect results.
- ReAct does not use the [Tool or Function Calling](https://platform.openai.com/docs/guides/function-calling) feature of AI models, rather uses the old school approach of parsing and executing actions.
+5
View File
@@ -0,0 +1,5 @@
From root:
```bash
BIG_AGI_BUILD=standalone next build
electron . --enable-logging
```
+61
View File
@@ -0,0 +1,61 @@
<!DOCTYPE html>
<html>
<head>
<style>
body {
background: #2e2c29;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
font-family: Arial, sans-serif;
}
.loader-container {
position: relative;
width: 100px;
height: 100px;
}
.spinner {
position: absolute;
top: 0;
left: 0;
border: 5px solid rgba(255, 255, 255, 0.3);
border-top: 5px solid #3498db;
border-radius: 50%;
width: 100px;
height: 100px;
animation: spin 2s linear infinite;
}
.logo {
position: absolute;
top: 15px;
left: 15px;
width: 80px;
height: 80px;
background: url('tray-icon.png') no-repeat center center;
background-size: contain;
animation: counter-spin 3.33s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
@keyframes counter-spin {
0% { transform: rotate(360deg); }
100% { transform: rotate(0deg); }
}
</style>
</head>
<body>
<div class="loader-container">
<div class="spinner"></div>
<div class="logo"></div>
</div>
</body>
</html>
+178
View File
@@ -0,0 +1,178 @@
const { app, BrowserWindow, Tray, Menu, ipcMain, screen, nativeTheme, shell } = require('electron');
const path = require('path');
const startServer = require('./server.js');
const { autoUpdater } = require('electron-updater');
let mainWindow;
let tray;
const port = 3000;
async function createWindow() {
try {
console.log('Starting server...');
await startServer(port);
console.log('Server started successfully');
const { width, height } = screen.getPrimaryDisplay().workAreaSize;
// // Set up a loading screen
// loadingScreen = new BrowserWindow({
// // width: 150,
// // height: 150,
// frame: false,
// transparent: false,
// alwaysOnTop: true,
// webPreferences: {
// nodeIntegration: true,
// },
// backgroundColor: '#2e2c29',
// });
//
// loadingScreen.loadFile(path.join(__dirname, 'loading.html'));
// loadingScreen.center();
// console.log('Loading screen created');
console.log('Preload script path:', path.join(__dirname, 'preload.js'));
mainWindow = new BrowserWindow({
width: Math.min(1280, width * 0.8),
height: Math.min(800, height * 0.8),
minWidth: 430,
minHeight: 600,
webPreferences: {
nodeIntegration: false,
contextIsolation: true,
preload: path.join(__dirname, 'preload.js'),
sandbox: false,
devTools: false,
},
backgroundColor: nativeTheme.shouldUseDarkColors ? '#1a1a1a' : '#ffffff',
show: true,
frame: false,
titleBarStyle: 'hidden',
icon: path.join(__dirname, 'tray-icon.png'),
// New "insane" features:
// transparent: true, // Enable window transparency
vibrancy: 'under-window', // Add vibrancy effect (macOS only)
visualEffectState: 'active', // Keep vibrancy active even when not focused (macOS only)
roundedCorners: true, // Enable rounded corners (macOS only)
// thickFrame: false, // Use a thinner frame on Windows
autoHideMenuBar: true, // Auto-hide the menu bar, press Alt to show it
scrollBounce: true, // Enable bounce effect when scrolling (macOS only)
});
mainWindow.removeMenu();
mainWindow.setTitle('Your Professional App Name');
console.log('Attempting to load main window URL...');
await mainWindow.loadURL(`http://localhost:${port}`);
console.log('Main window URL loaded successfully');
mainWindow.once('ready-to-show', () => {
console.log('Main window ready to show');
// if (loadingScreen) {
// loadingScreen.close();
// }
mainWindow.show();
mainWindow.focus();
});
createTray();
autoUpdater.checkForUpdatesAndNotify();
// Handle window state
let isQuitting = false;
mainWindow.on('close', (event) => {
if (!isQuitting) {
event.preventDefault();
mainWindow.hide();
}
});
app.on('before-quit', () => {
isQuitting = true;
});
// Adjust window behavior
mainWindow.on('maximize', () => {
mainWindow.webContents.send('window-maximized');
});
mainWindow.on('unmaximize', () => {
mainWindow.webContents.send('window-unmaximized');
});
// Warn if preloads fail
mainWindow.webContents.on('preload-error', (event, preloadPath, error) => {
console.error('Preload error:', preloadPath, error);
});
mainWindow.webContents.on('did-fail-load', (event, errorCode, errorDescription) => {
console.error('Failed to load:', errorCode, errorDescription);
});
// Handle external links
mainWindow.webContents.setWindowOpenHandler(({ url }) => {
shell.openExternal(url);
return { action: 'deny' };
});
} catch (err) {
console.error('Error in createWindow:', err);
app.quit();
}
}
function createTray() {
tray = new Tray(path.join(__dirname, 'tray-icon.png'));
const contextMenu = Menu.buildFromTemplate([
{ label: 'Show App', click: () => mainWindow.show() },
{ type: 'separator' },
{ label: 'Quit', click: () => app.quit() },
]);
tray.setToolTip('Your Professional App Name');
tray.setContextMenu(contextMenu);
tray.on('click', () => {
mainWindow.isVisible() ? mainWindow.hide() : mainWindow.show();
});
}
app.whenReady().then(() => {
console.log('App is ready, creating window...');
createWindow().catch((err) => {
console.error('Failed to create window:', err);
app.quit();
});
app.on('activate', function() {
if (BrowserWindow.getAllWindows().length === 0) createWindow();
});
});
app.on('window-all-closed', function() {
if (process.platform !== 'darwin') app.quit();
});
// IPC handlers for window controls
ipcMain.on('minimize-window', () => mainWindow.minimize());
ipcMain.on('maximize-window', () => {
if (mainWindow.isMaximized()) {
mainWindow.unmaximize();
} else {
mainWindow.maximize();
}
});
ipcMain.on('close-window', () => mainWindow.close());
// Auto-updater events
autoUpdater.on('update-available', () => {
mainWindow.webContents.send('update_available');
});
autoUpdater.on('update-downloaded', () => {
mainWindow.webContents.send('update_downloaded');
});
+36
View File
@@ -0,0 +1,36 @@
const { contextBridge, desktopCapturer, ipcRenderer } = require('electron');
const { readFileSync } = require('fs');
const { join } = require('path');
// Main bridge
contextBridge.exposeInMainWorld('electron', {
sendEvent: (event) => ipcRenderer.send('app-event', event),
onUpdateAvailable: (callback) => ipcRenderer.on('update_available', callback),
onUpdateDownloaded: (callback) => ipcRenderer.on('update_downloaded', callback),
});
// Screen Capture: inject renderer.js into the web page
window.addEventListener('DOMContentLoaded', () => {
console.log('Screen Capture: Injecting renderer.js into the web page');
const rendererScript = document.createElement('script');
rendererScript.text = readFileSync(join(__dirname, 'renderer.js'), 'utf8');
document.body.appendChild(rendererScript);
});
// Screen Capture: expose desktopCapturer to the web page
contextBridge.exposeInMainWorld('myCustomGetDisplayMedia', async () => {
console.log('Screen Capture: Calling desktopCapturer.getSources');
const sources = await desktopCapturer.getSources({
types: ['window', 'screen'],
});
console.log('Available sources:', sources);
// you should create some kind of UI to prompt the user
// to select the correct source like Google Chrome does
// this is just for testing purposes
return sources[0];
});
console.log('Preload script loaded');
+30
View File
@@ -0,0 +1,30 @@
// https://github.com/aabuhijleh/override-getDisplayMedia/blob/main/renderer.js
// This file is required by the index.html file and will
// be executed in the renderer process for that window.
// No Node.js APIs are available in this process because
// `nodeIntegration` is turned off. Use `preload.js` to
// selectively enable features needed in the rendering
// process.
// override getDisplayMedia
navigator.mediaDevices.getDisplayMedia = async () => {
const selectedSource = await globalThis.myCustomGetDisplayMedia();
// create MediaStream
const stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: selectedSource.id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720,
},
},
});
return stream;
};
+71
View File
@@ -0,0 +1,71 @@
const { createServer } = require('http');
const { parse } = require('url');
const next = require('next');
const path = require('path');
// const dev = process.env.NODE_ENV !== 'production';
const dir = path.join(__dirname, '..'); // This points to the root of your project
const app = next({ dev: false, dir });
const handle = app.getRequestHandler();
function startServer(port) {
return new Promise((resolve, reject) => {
app.prepare()
.then(() => {
const server = createServer((req, res) => {
// Basic request logging
console.log(`${new Date().toISOString()} - ${req.method} ${req.url}`);
// Simple rate limiting
if (rateLimiter(req)) {
res.statusCode = 429;
res.end('Too Many Requests');
return;
}
// Handle the request
const parsedUrl = parse(req.url, true);
handle(req, res, parsedUrl);
});
server.listen(port, (err) => {
if (err) reject(err);
console.log(`> Ready on http://localhost:${port}`);
resolve(server);
});
// Graceful shutdown
process.on('SIGTERM', () => {
console.log('SIGTERM signal received: closing HTTP server');
server.close(() => {
console.log('HTTP server closed');
});
});
})
.catch(err => reject(err));
});
}
// Simple in-memory rate limiter
const MAX_REQUESTS_PER_MINUTE = 100;
const requestCounts = new Map();
function rateLimiter(req) {
const ip = req.socket.remoteAddress;
const now = Date.now();
const windowStart = now - 60000; // 1 minute ago
const requestTimestamps = requestCounts.get(ip) || [];
const requestsInWindow = requestTimestamps.filter(timestamp => timestamp > windowStart);
if (requestsInWindow.length >= MAX_REQUESTS_PER_MINUTE) {
return true; // Rate limit exceeded
}
requestTimestamps.push(now);
requestCounts.set(ip, requestTimestamps);
return false; // Rate limit not exceeded
}
module.exports = startServer;
Binary file not shown.

After

Width:  |  Height:  |  Size: 993 B

+5 -12
View File
@@ -1,18 +1,10 @@
import { readFile } from 'node:fs/promises';
// Build information
process.env.NEXT_PUBLIC_BUILD_HASH = 'big-agi-2-dev';
process.env.NEXT_PUBLIC_BUILD_PKGVER = JSON.parse('' + await readFile(new URL('./package.json', import.meta.url))).version;
process.env.NEXT_PUBLIC_BUILD_TIMESTAMP = new Date().toISOString();
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH})`);
// Non-default build types
const buildType =
process.env.BIG_AGI_BUILD === 'standalone' ? 'standalone'
: process.env.BIG_AGI_BUILD === 'static' ? 'export'
: undefined;
buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
/** @type {import('next').NextConfig} */
let nextConfig = {
@@ -21,7 +13,7 @@ let nextConfig = {
// [exports] https://nextjs.org/docs/advanced-features/static-html-export
...buildType && {
output: buildType,
distDir: 'dist',
// distDir: 'dist',
// disable image optimization for exports
images: { unoptimized: true },
@@ -31,8 +23,9 @@ let nextConfig = {
},
// [puppeteer] https://github.com/puppeteer/puppeteer/issues/11052
// NOTE: we may not be needing this anymore, as we use '@cloudflare/puppeteer'
serverExternalPackages: ['puppeteer-core'],
experimental: {
serverComponentsExternalPackages: ['puppeteer-core'],
},
webpack: (config, { isServer }) => {
// @mui/joy: anything material gets redirected to Joy
+6244 -3887
View File
File diff suppressed because it is too large Load Diff
+76 -68
View File
@@ -1,111 +1,119 @@
{
"name": "big-agi",
"version": "1.91.0",
"version": "1.16.0",
"private": true,
"author": "Enrico Ros <enrico.ros@gmail.com>",
"repository": "https://github.com/enricoros/big-agi",
"main": "electron/main.js",
"scripts": {
"dev": "next dev --turbopack",
"dev-debug": "cross-env NODE_OPTIONS='--inspect' next dev",
"dev-https": "next dev --experimental-https",
"dev": "node electron/server.js",
"build": "next build",
"start": "next start",
"start": "NODE_ENV=production node electron/server.js",
"lint": "next lint",
"postinstall": "prisma generate --no-hints",
"postinstall": "prisma generate",
"db:push": "prisma db push",
"db:studio": "prisma studio",
"vercel:env:pull": "npx vercel env pull .env.development.local"
"vercel:env:pull": "npx vercel env pull .env.development.local",
"electron": "electron .",
"electron-dev": "concurrently \"npm run dev\" \"electron .\"",
"electron-build": "next build && electron-builder",
"electron-start": "npm run build && electron ."
},
"prisma": {
"schema": "src/server/prisma/schema.prisma"
},
"dependencies": {
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/modifiers": "^9.0.0",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@emotion/cache": "^11.14.0",
"@emotion/react": "^11.14.0",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.4",
"@emotion/server": "^11.11.0",
"@emotion/styled": "^11.14.0",
"@mui/icons-material": "^5.16.14",
"@mui/joy": "^5.0.0-beta.51",
"@mui/material": "^5.16.14",
"@next/bundle-analyzer": "^15.1.4",
"@next/third-parties": "^15.1.4",
"@prisma/client": "~5.22.0",
"@t3-oss/env-nextjs": "^0.11.1",
"@tanstack/react-query": "^5.63.0",
"@tanstack/react-virtual": "^3.11.2",
"@trpc/client": "11.0.0-rc.688",
"@trpc/next": "11.0.0-rc.688",
"@trpc/react-query": "11.0.0-rc.688",
"@trpc/server": "11.0.0-rc.688",
"@vercel/analytics": "^1.4.1",
"@vercel/speed-insights": "^1.1.0",
"@emotion/styled": "^11.11.5",
"@mui/icons-material": "^5.16.0",
"@mui/joy": "^5.0.0-beta.47",
"@mui/material": "^5.16.0",
"@next/bundle-analyzer": "^14.2.4",
"@next/third-parties": "^14.2.4",
"@prisma/client": "^5.16.1",
"@sanity/diff-match-patch": "^3.1.1",
"@t3-oss/env-nextjs": "^0.10.1",
"@tanstack/react-query": "^5.50.1",
"@trpc/client": "11.0.0-alpha-tmp-issues-5851-take-two.496",
"@trpc/next": "11.0.0-alpha-tmp-issues-5851-take-two.496",
"@trpc/react-query": "11.0.0-alpha-tmp-issues-5851-take-two.496",
"@trpc/server": "11.0.0-alpha-tmp-issues-5851-take-two.496",
"@vercel/analytics": "^1.3.1",
"@vercel/speed-insights": "^1.0.12",
"browser-fs-access": "^0.35.0",
"cheerio": "^1.0.0",
"dexie": "^4.0.10",
"cheerio": "^1.0.0-rc.12",
"dexie": "^4.0.7",
"dexie-react-hooks": "^1.1.7",
"diff": "^7.0.0",
"eventsource-parser": "^3.0.0",
"electron-updater": "^6.2.1",
"eventsource-parser": "^1.1.2",
"idb-keyval": "^6.2.1",
"mammoth": "^1.9.0",
"nanoid": "^5.0.9",
"next": "^15.1.4",
"nanoid": "^5.0.7",
"next": "~14.2.4",
"nprogress": "^0.2.0",
"pdfjs-dist": "4.10.38",
"pdfjs-dist": "4.4.168",
"plantuml-encoder": "^1.4.0",
"prismjs": "^1.29.0",
"react": "^18.3.1",
"react-beautiful-dnd": "^13.1.1",
"react-csv": "^2.2.2",
"react-dom": "^18.3.1",
"react-hook-form": "^7.54.2",
"react-katex": "^3.0.1",
"react-markdown": "^9.0.3",
"react-markdown": "^9.0.1",
"react-player": "^2.16.0",
"react-resizable-panels": "^2.1.7",
"react-resizable-panels": "^2.0.20",
"react-timeago": "^7.2.0",
"rehype-katex": "^7.0.1",
"rehype-katex": "^7.0.0",
"remark-gfm": "^4.0.0",
"remark-mark-highlight": "^0.1.1",
"remark-math": "^6.0.0",
"sharp": "^0.33.5",
"superjson": "^2.2.2",
"tesseract.js": "^6.0.0",
"tiktoken": "^1.0.18",
"sharp": "^0.33.4",
"superjson": "^2.2.1",
"tesseract.js": "^5.1.0",
"tiktoken": "^1.0.15",
"turndown": "^7.2.0",
"zod": "^3.24.1",
"zod-to-json-schema": "^3.24.1",
"zustand": "^5.0.3"
"zod": "^3.23.8",
"zustand": "^4.5.4"
},
"devDependencies": {
"@types/diff": "^7.0.0",
"@types/node": "^22.10.5",
"@cloudflare/puppeteer": "0.0.11",
"@types/node": "^20.14.10",
"@types/nprogress": "^0.2.3",
"@types/plantuml-encoder": "^1.4.2",
"@types/prismjs": "^1.26.5",
"@types/react": "^18.3.18",
"@types/prismjs": "^1.26.4",
"@types/react": "^18.3.3",
"@types/react-beautiful-dnd": "^13.1.8",
"@types/react-csv": "^1.1.10",
"@types/react-dom": "^18.3.5",
"@types/react-dom": "^18.3.0",
"@types/react-katex": "^3.0.4",
"@types/react-timeago": "^4.1.7",
"@types/turndown": "^5.0.5",
"cross-env": "^7.0.3",
"eslint": "^9.17.0",
"eslint-config-next": "^15.1.4",
"prettier": "^3.4.2",
"prisma": "~5.22.0",
"puppeteer-core": "^23.11.1",
"typescript": "^5.7.3"
"@types/turndown": "^5.0.4",
"concurrently": "^8.2.2",
"electron": "^31.1.0",
"electron-builder": "^24.13.3",
"eslint": "^8.57.0",
"eslint-config-next": "^14.2.4",
"prettier": "^3.3.2",
"prisma": "^5.16.1",
"typescript": "^5.5.3"
},
"engines": {
"node": "^22.0.0 || ^20.0.0"
"node": "^20.0.0 || ^18.0.0"
},
"overrides": {
"@types/react": "^18.3.18",
"@types/react-dom": "^18.3.5",
"uri-js": "npm:uri-js-replace"
"build": {
"appId": "com.yourcompany.yourappname",
"productName": "Your App Name",
"files": [
"electron/**/*",
".next/**/*",
"public/**/*",
"next.config.js"
],
"directories": {
"buildResources": "electron"
},
"extraMetadata": {
"main": "electron/main.js"
}
}
}
}
+19 -24
View File
@@ -14,24 +14,18 @@ import '~/common/styles/NProgress.css';
import '~/common/styles/agi.effects.css';
import '~/common/styles/app.styles.css';
import { Is } from '~/common/util/pwaUtils';
import { OverlaysInsert } from '~/common/layout/overlays/OverlaysInsert';
import { ProviderBackendCapabilities } from '~/common/providers/ProviderBackendCapabilities';
import { ProviderBootstrapLogic } from '~/common/providers/ProviderBootstrapLogic';
import { ProviderSingleTab } from '~/common/providers/ProviderSingleTab';
import { ProviderSnacks } from '~/common/providers/ProviderSnacks';
import { ProviderTRPCQuerySettings } from '~/common/providers/ProviderTRPCQuerySettings';
import { ProviderTheming } from '~/common/providers/ProviderTheming';
import { SnackbarInsert } from '~/common/components/snackbar/SnackbarInsert';
import { hasGoogleAnalytics, OptionalGoogleAnalytics } from '~/common/components/GoogleAnalytics';
import { isVercelFromFrontend } from '~/common/util/pwaUtils';
const Big_AGI_App = ({ Component, emotionCache, pageProps }: MyAppProps) => {
// We are using a nextjs per-page layout pattern to bring the (Optima) layout creation to a shared place
// This reduces the flicker and the time switching between apps, and seems to not have impact on
// the build. This is a good trade-off for now.
const getLayout = Component.getLayout ?? ((page: any) => page);
return <>
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) =>
<>
<Head>
<title>{Brand.Title.Common}</title>
@@ -40,23 +34,24 @@ const Big_AGI_App = ({ Component, emotionCache, pageProps }: MyAppProps) => {
<ProviderTheming emotionCache={emotionCache}>
<ProviderSingleTab>
<ProviderBackendCapabilities>
{/* ^ Backend capabilities & SSR boundary */}
<ProviderBootstrapLogic>
<SnackbarInsert />
{getLayout(<Component {...pageProps} />)}
<OverlaysInsert />
</ProviderBootstrapLogic>
</ProviderBackendCapabilities>
<ProviderTRPCQuerySettings>
<ProviderBackendCapabilities>
{/* ^ SSR boundary */}
<ProviderBootstrapLogic>
<ProviderSnacks>
<Component {...pageProps} />
</ProviderSnacks>
</ProviderBootstrapLogic>
</ProviderBackendCapabilities>
</ProviderTRPCQuerySettings>
</ProviderSingleTab>
</ProviderTheming>
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
{isVercelFromFrontend && <VercelAnalytics debug={false} />}
{isVercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
{hasGoogleAnalytics && <OptionalGoogleAnalytics />}
</>;
};
// Initializes React Query and tRPC, and enables the tRPC React Query hooks (apiQuery).
export default apiQuery.withTRPC(Big_AGI_App);
// enables the React Query API invocation
export default apiQuery.withTRPC(MyApp);
+2 -2
View File
@@ -2,7 +2,7 @@ import * as React from 'react';
import { AppType, MyAppProps } from 'next/app';
import { default as Document, DocumentContext, DocumentProps, Head, Html, Main, NextScript } from 'next/document';
import createEmotionServer from '@emotion/server/create-instance';
import InitColorSchemeScript from '@mui/joy/InitColorSchemeScript';
import { getInitColorSchemeScript } from '@mui/joy/styles';
import { Brand } from '~/common/app.config';
import { createEmotionCache } from '~/common/app.theme';
@@ -51,7 +51,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{emotionStyleTags}
</Head>
<body>
<InitColorSchemeScript />
{getInitColorSchemeScript()}
<Main />
<NextScript />
</body>
+4 -2
View File
@@ -2,7 +2,9 @@ import * as React from 'react';
import { AppCall } from '../src/apps/call/AppCall';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppCall />);
export default function CallPage() {
return withLayout({ type: 'optima' }, <AppCall />);
}
+4 -2
View File
@@ -2,7 +2,9 @@ import * as React from 'react';
import { AppBeam } from '../../src/apps/beam/AppBeam';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppBeam />);
export default function BeamPage() {
return withLayout({ type: 'optima' }, <AppBeam />);
}
-8
View File
@@ -1,8 +0,0 @@
import * as React from 'react';
import { AppDiff } from '../src/apps/diff/AppDiff';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppDiff />);
+4 -2
View File
@@ -2,7 +2,9 @@ import * as React from 'react';
import { AppDraw } from '../src/apps/draw/AppDraw';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppDraw />);
export default function DrawPage() {
return withLayout({ type: 'optima' }, <AppDraw />);
}
+4 -4
View File
@@ -2,13 +2,13 @@ import * as React from 'react';
import { AppChat } from '../src/apps/chat/AppChat';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => {
export default function IndexPage() {
// TODO: This Index page will point to the Dashboard (or a landing page)
// For now it offers the chat experience, but this will change. #299
return <AppChat />;
});
return withLayout({ type: 'optima' }, <AppChat />);
}
+23 -20
View File
@@ -7,28 +7,30 @@ import DownloadIcon from '@mui/icons-material/Download';
import { AppPlaceholder } from '../../src/apps/AppPlaceholder';
import { getBackendCapabilities } from '~/modules/backend/store-backend-capabilities';
import { getPlantUmlServerUrl } from '~/modules/blocks/code/code-renderers/RenderCodePlantUML';
import { getPlantUmlServerUrl } from '~/modules/blocks/code/RenderCode';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
// basics
// app config
import { Brand } from '~/common/app.config';
import { ROUTE_APP_CHAT, ROUTE_INDEX } from '~/common/app.routes';
import { Release } from '~/common/app.release';
// apps access
import { incrementalNewsVersion, useAppNewsStateStore } from '../../src/apps/news/news.version';
// capabilities access
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs, useCapabilityTextToImage } from '~/common/components/useCapabilities';
// stores access
import { getLLMsDebugInfo } from '~/common/stores/llms/store-llms';
import { getLLMsDebugInfo } from '~/modules/llms/store-llms';
import { useAppStateStore } from '~/common/state/store-appstate';
import { useChatStore } from '~/common/stores/chat/store-chats';
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
import { useLogicSherpaStore } from '~/common/logic/store-logic-sherpa';
import { useFolderStore } from '~/common/state/store-folders';
import { useUXLabsStore } from '~/common/state/store-ux-labs';
// utils access
import { BrowserLang, clientHostName, Is, isPwa } from '~/common/util/pwaUtils';
import { clientHostName, isChromeDesktop, isFirefox, isIPhoneUser, isMacUser, isPwa, isVercelFromFrontend } from '~/common/util/pwaUtils';
import { getGA4MeasurementId } from '~/common/components/GoogleAnalytics';
import { prettyTimestampForFilenames } from '~/common/util/timeUtils';
import { supportsClipboardRead } from '~/common/util/clipboardUtils';
@@ -69,8 +71,6 @@ function DebugJsonCard(props: { title: string, data: any }) {
}
const frontendBuild = Release.buildInfo('frontend');
function AppDebug() {
// state
@@ -81,15 +81,19 @@ function AppDebug() {
const chatsCount = useChatStore.getState().conversations?.length;
const uxLabsExperiments = Object.entries(useUXLabsStore.getState()).filter(([_k, v]) => v === true).map(([k, _]) => k).join(', ');
const { folders, enableFolders } = useFolderStore.getState();
const { lastSeenNewsVersion, usageCount } = useLogicSherpaStore.getState();
const { lastSeenNewsVersion } = useAppNewsStateStore.getState();
const { usageCount } = useAppStateStore.getState();
// derived state
const cClient = {
// isBrowser,
Is,
BrowserLang,
isChromeDesktop,
isFirefox,
isIPhone: isIPhoneUser,
isMac: isMacUser,
isPWA: isPwa(),
supportsClipboardPaste: supportsClipboardRead(),
supportsClipboardPaste: supportsClipboardRead,
supportsScreenCapture,
};
const cProduct = {
@@ -103,21 +107,18 @@ function AppDebug() {
chatsCount,
foldersCount: folders?.length,
foldersEnabled: enableFolders,
newsCurrent: Release.Monotonics.NewsVersion,
newsCurrent: incrementalNewsVersion,
newsSeen: lastSeenNewsVersion,
labsActive: uxLabsExperiments,
reloads: usageCount,
},
release: {
app: Release.App,
build: frontendBuild,
},
};
const cBackend = {
configuration: backendCaps,
deployment: {
home: Brand.URIs.Home,
hostName: clientHostName(),
isVercelFromFrontend,
measurementId: getGA4MeasurementId(),
plantUmlServerUrl: getPlantUmlServerUrl(),
routeIndex: ROUTE_INDEX,
@@ -163,4 +164,6 @@ function AppDebug() {
}
export default withNextJSPerPageLayout({ type: 'container' }, () => <AppDebug />);
export default function DebugPage() {
return withLayout({ type: 'plain' }, <AppDebug />);
};
+7 -7
View File
@@ -2,12 +2,12 @@ import * as React from 'react';
import { Box, Typography } from '@mui/joy';
import { llmsStoreActions } from '~/common/stores/llms/store-llms';
import { useModelsStore } from '~/modules/llms/store-llms';
import { InlineError } from '~/common/components/InlineError';
import { apiQuery } from '~/common/util/trpc.client';
import { navigateToIndex, useRouterQuery } from '~/common/app.routes';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
@@ -15,6 +15,7 @@ function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
// external state
const { data, isError, error, isPending } = apiQuery.backend.exchangeOpenRouterKey.useQuery({ code: props.openRouterCode || '' }, {
enabled: !!props.openRouterCode,
refetchOnWindowFocus: false,
staleTime: Infinity,
});
@@ -30,7 +31,7 @@ function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
return;
// 1. Save the key as the client key
llmsStoreActions().setOpenRouterKey(openRouterKey);
useModelsStore.getState().setOpenRoutersKey(openRouterKey);
// 2. Navigate to the chat app
void navigateToIndex(true); //.then(openModelsSetup);
@@ -80,11 +81,10 @@ function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
* Docs: https://openrouter.ai/docs#oauth
* Example URL: https://localhost:3000/link/callback_openrouter?code=SomeCode
*/
export default withNextJSPerPageLayout({ type: 'container' }, () => {
export default function CallbackPage() {
// external state - get the 'code=...' from the URL
const { code } = useRouterQuery<{ code: string | undefined }>();
return <CallbackOpenRouterPage openRouterCode={code} />;
});
return withLayout({ type: 'plain' }, <CallbackOpenRouterPage openRouterCode={code} />);
}
+4 -5
View File
@@ -3,14 +3,13 @@ import * as React from 'react';
import { AppLinkChat } from '../../../src/apps/link-chat/AppLinkChat';
import { useRouterQuery } from '~/common/app.routes';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima', suspendAutoModelsSetup: true }, () => {
export default function ChatLinkPage() {
// external state
const { chatLinkId } = useRouterQuery<{ chatLinkId: string | undefined }>();
return <AppLinkChat chatLinkId={chatLinkId || null} />;
});
return withLayout({ type: 'optima', suspendAutoModelsSetup: true }, <AppLinkChat chatLinkId={chatLinkId || null} />);
}
+7 -9
View File
@@ -3,14 +3,14 @@ import * as React from 'react';
import { Alert, Box, Button, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import { setComposerStartupText } from '~/common/logic/store-logic-sherpa';
import { setComposerStartupText } from '../../src/apps/chat/components/composer/store-composer';
import { callBrowseFetchPageOrThrow } from '~/modules/browse/browse.client';
import { callBrowseFetchPage } from '~/modules/browse/browse.client';
import { LogoProgress } from '~/common/components/LogoProgress';
import { asValidURL } from '~/common/util/urlUtils';
import { navigateToIndex, useRouterQuery } from '~/common/app.routes';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
/**
@@ -75,13 +75,9 @@ function AppShareTarget() {
React.useEffect(() => {
if (intentURL) {
setIsDownloading(true);
callBrowseFetchPageOrThrow(intentURL)
callBrowseFetchPage(intentURL)
.then(page => {
if (page.stopReason !== 'error') {
if (!page.content) {
setErrorMessage(page.file ? 'No web page found, and we do not support files at the moment.' : 'No content found');
return;
}
let pageContent = page.content.markdown || page.content.text || page.content.html || '';
if (pageContent)
pageContent = '\n\n```' + intentURL + '\n' + pageContent + '\n```\n';
@@ -139,4 +135,6 @@ function AppShareTarget() {
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* Example URL: https://localhost:3000/link/share_target?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
*/
export default withNextJSPerPageLayout({ type: 'container' }, () => <AppShareTarget />);
export default function ShareTargetPage() {
return withLayout({ type: 'plain' }, <AppShareTarget />);
}
+5 -6
View File
@@ -1,15 +1,14 @@
import * as React from 'react';
import { AppNews } from '../src/apps/news/AppNews';
import { markNewsAsSeen } from '../src/apps/news/news.version';
import { markNewsAsSeen } from '~/common/logic/store-logic-sherpa';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima', suspendAutoModelsSetup: true }, () => {
export default function NewsPage() {
// 'touch' the last seen news version
React.useEffect(() => markNewsAsSeen(), []);
return <AppNews />;
});
return withLayout({ type: 'optima', suspendAutoModelsSetup: true }, <AppNews />);
}
+4 -2
View File
@@ -2,7 +2,9 @@ import * as React from 'react';
import { AppPersonas } from '../src/apps/personas/AppPersonas';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppPersonas />);
export default function PersonasPage() {
return withLayout({ type: 'optima' }, <AppPersonas />);
}
+4 -2
View File
@@ -2,7 +2,9 @@ import * as React from 'react';
import { AppTokens } from '../src/apps/tokens/AppTokens';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppTokens />);
export default function PersonasPage() {
return withLayout({ type: 'optima' }, <AppTokens />);
}
+7 -3
View File
@@ -1,8 +1,12 @@
import * as React from 'react';
import { AppPlaceholder } from '../src/apps/AppPlaceholder';
import { Box } from '@mui/joy';
import { withNextJSPerPageLayout } from '~/common/layout/withLayout';
// import { AppWorkspace } from '../src/apps/personas/AppWorkspace';
import { withLayout } from '~/common/layout/withLayout';
export default withNextJSPerPageLayout({ type: 'optima' }, () => <AppPlaceholder />);
export default function PersonasPage() {
return withLayout({ type: 'optima' }, <Box />);
}
Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 KiB

+8 -29
View File
@@ -15,22 +15,20 @@
"scope": "/",
"icons": [
{
"src": "/icons/icon-1024x1024.png",
"sizes": "1024x1024",
"src": "/icons/icon-192x192.png",
"sizes": "192x192",
"type": "image/png",
"purpose": "any maskable"
"purpose": "maskable"
},
{
"src": "/icons/icon-512x512.png",
"sizes": "512x512",
"type": "image/png",
"purpose": "any"
"type": "image/png"
},
{
"src": "/icons/icon-192x192.png",
"sizes": "192x192",
"type": "image/png",
"purpose": "any"
"src": "/icons/icon-1024x1024.png",
"sizes": "1024x1024",
"type": "image/png"
}
],
"file_handlers": [
@@ -58,26 +56,7 @@
{
"name": "Call",
"url": "/call",
"description": "Call a Persona",
"icons": [
{
"src": "/icons/icon-call-96x96.png",
"sizes": "96x96",
"type": "image/png"
}
]
},
{
"name": "New Voice Chat",
"url": "/?newChat=voiceInput",
"description": "Start a new chat with voice input",
"icons": [
{
"src": "/icons/icon-voicechat-96x96.png",
"sizes": "96x96",
"type": "image/png"
}
]
"description": "Call a Persona"
}
]
}
File diff suppressed because one or more lines are too long
-27
View File
@@ -1,27 +0,0 @@
import * as React from 'react';
import { Box, Container, Typography } from '@mui/joy';
export function AppSmallContainer({ title, description, children }: {
title: string;
description: React.ReactNode;
children: React.ReactNode;
}) {
return (
<Box sx={{ flexGrow: 1, overflowY: 'auto', p: { xs: 3, md: 6 } }}>
<Container disableGutters maxWidth='md' sx={{ display: 'flex', flexDirection: 'column', gap: 3 }}>
<Box sx={{ mb: 2 }}>
<Typography level='h1' sx={{ mb: 1 }}>{title}</Typography>
<Typography>{description}</Typography>
</Box>
{children}
</Container>
</Box>
);
}
+9 -11
View File
@@ -5,13 +5,13 @@ import { Box, Button, Typography } from '@mui/joy';
import { BeamStoreApi, useBeamStore } from '~/modules/beam/store-beam.hooks';
import { BeamView } from '~/modules/beam/BeamView';
import { createBeamVanillaStore } from '~/modules/beam/store-beam_vanilla';
import { createBeamVanillaStore } from '~/modules/beam/store-beam-vanilla';
import { useModelsStore } from '~/modules/llms/store-llms';
import { OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
import { createDConversation, DConversation } from '~/common/stores/chat/chat.conversation';
import { createDMessageTextContent, DMessage } from '~/common/stores/chat/chat.message';
import { getChatLLMId } from '~/common/stores/llms/store-llms';
import { useIsMobile } from '~/common/components/useMatchMedia';
import { usePluggableOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
function initTestConversation(): DConversation {
@@ -22,7 +22,7 @@ function initTestConversation(): DConversation {
}
function initTestBeamStore(messages: DMessage[], beamStore: BeamStoreApi = createBeamVanillaStore()): BeamStoreApi {
beamStore.getState().open(messages, getChatLLMId(), false, (content) => alert(content));
beamStore.getState().open(messages, useModelsStore.getState().chatLLMId, (content) => alert(content));
return beamStore;
}
@@ -57,7 +57,8 @@ export function AppBeam() {
}, [beamStoreApi]);
const toolbarItems = React.useMemo(() => <>
// layout
usePluggableOptimaLayout(null, React.useMemo(() => <>
{/* button to toggle debug info */}
<Button size='sm' variant='plain' color='neutral' onClick={() => setShowDebug(on => !on)}>
{showDebug ? 'Hide' : 'Show'} debug
@@ -72,12 +73,10 @@ export function AppBeam() {
<Button size='sm' variant='plain' color='neutral' onClick={handleClose}>
.close
</Button>
</>, [handleClose, showDebug]);
</>, [handleClose, showDebug]), null, 'AppBeam');
return <>
<OptimaToolbarIn>{toolbarItems}</OptimaToolbarIn>
return (
<Box sx={{ flexGrow: 1, overflowY: 'auto', position: 'relative' }}>
{isOpen && (
@@ -103,6 +102,5 @@ export function AppBeam() {
)}
</Box>
</>;
);
}
+11 -10
View File
@@ -9,9 +9,9 @@ import MicIcon from '@mui/icons-material/Mic';
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
import { PreferencesTab, useOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
import { animationColorRainbow } from '~/common/util/animUtils';
import { navigateBack } from '~/common/app.routes';
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
import { useChatStore } from '~/common/stores/chat/store-chats';
import { useUICounter } from '~/common/state/store-ui';
@@ -44,6 +44,7 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
const [recognitionOverride, setRecognitionOverride] = React.useState(false);
// external state
const { openPreferencesTab } = useOptimaLayout();
const recognition = useCapabilityBrowserSpeechRecognition();
const synthesis = useCapabilityElevenLabs();
const chatIsEmpty = useChatStore(state => {
@@ -61,22 +62,22 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
const fatalGood = overriddenRecognition && synthesis.mayWork;
if (!novel && fatalGood)
return props.children;
const handleOverrideChatEmpty = React.useCallback(() => setChatEmptyOverride(true), []);
const handleOverrideChatEmpty = () => setChatEmptyOverride(true);
const handleOverrideRecognition = React.useCallback(() => setRecognitionOverride(true), []);
const handleOverrideRecognition = () => setRecognitionOverride(true);
const handleConfigureElevenLabs = React.useCallback(() => optimaOpenPreferences('voice'), []);
const handleConfigureElevenLabs = () => {
openPreferencesTab(PreferencesTab.Voice);
};
const handleFinishButton = React.useCallback(() => {
const handleFinishButton = () => {
if (!allGood)
return navigateBack();
touch();
}, [allGood, touch]);
if (!novel && fatalGood)
return props.children;
};
return <>
+11 -12
View File
@@ -4,12 +4,11 @@ import type { SxProps } from '@mui/joy/styles/types';
import { Avatar, Box, Card, CardContent, Chip, IconButton, Link as MuiLink, ListDivider, MenuItem, Sheet, Switch, Typography } from '@mui/joy';
import CallIcon from '@mui/icons-material/Call';
import { DConversation, DConversationId, conversationTitle } from '~/common/stores/chat/chat.conversation';
import { GitHubProjectIssueCard } from '~/common/components/GitHubProjectIssueCard';
import { OptimaPanelGroup } from '~/common/layout/optima/panel/OptimaPanelGroup';
import { animationShadowRingLimey } from '~/common/util/animUtils';
import { conversationTitle, DConversation, DConversationId } from '~/common/stores/chat/chat.conversation';
import { useChatStore } from '~/common/stores/chat/store-chats';
import { useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
import { usePluggableOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
import type { AppCallIntent } from './AppCall';
import { MockPersona, useMockPersonas } from './state/useMockPersonas';
@@ -224,12 +223,7 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
// pluggable UI
const menuItems = React.useMemo(() => <OptimaPanelGroup title='Contacts Settings'>
<MenuItem onClick={toggleGrayUI}>
Grayed UI
<Switch checked={grayUI} sx={{ ml: 'auto' }} />
</MenuItem>
const menuItems = React.useMemo(() => <>
<MenuItem onClick={toggleShowConversations}>
Conversations
@@ -237,13 +231,18 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
</MenuItem>
<MenuItem onClick={toggleShowSupport}>
Show Support
Support
<Switch checked={showSupport} sx={{ ml: 'auto' }} />
</MenuItem>
</OptimaPanelGroup>, [grayUI, showConversations, showSupport, toggleGrayUI, toggleShowConversations, toggleShowSupport]);
<MenuItem onClick={toggleGrayUI}>
Grayed UI
<Switch checked={grayUI} sx={{ ml: 'auto' }} />
</MenuItem>
useSetOptimaAppMenu(menuItems, 'CallUI-Contacts');
</>, [grayUI, showConversations, showSupport, toggleGrayUI, toggleShowConversations, toggleShowSupport]);
usePluggableOptimaLayout(null, null, menuItems, 'CallUI');
return <>
+56 -84
View File
@@ -13,25 +13,19 @@ import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
import { useChatLLMDropdown } from '../chat/components/layout-bar/useLLMDropdown';
import { EXPERIMENTAL_speakTextStream } from '~/modules/elevenlabs/elevenlabs.client';
import { SystemPurposeId, SystemPurposes } from '../../data';
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
import { AixChatGenerateContent_DMessage, aixChatGenerateContent_DMessage_FromConversation } from '~/modules/aix/client/aix.client';
import { llmStreamingChatGenerate, VChatMessageIn } from '~/modules/llms/llm.client';
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
import { Link } from '~/common/components/Link';
import { OptimaPanelGroup } from '~/common/layout/optima/panel/OptimaPanelGroup';
import { OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
import { SpeechResult, useSpeechRecognition } from '~/common/components/speechrecognition/useSpeechRecognition';
import { conversationTitle, remapMessagesSysToUsr } from '~/common/stores/chat/chat.conversation';
import { createDMessageFromFragments, createDMessageTextContent, DMessage, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
import { createErrorContentFragment } from '~/common/stores/chat/chat.fragments';
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
import { conversationTitle } from '~/common/stores/chat/chat.conversation';
import { createDMessageTextContent, DMessage, messageFragmentsReduceText, messageSingleTextOrThrow } from '~/common/stores/chat/chat.message';
import { launchAppChat, navigateToIndex } from '~/common/app.routes';
import { useChatStore } from '~/common/stores/chat/store-chats';
import { useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
import { usePlayUrl } from '~/common/util/audio/usePlayUrl';
import { useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
import { usePluggableOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
import type { AppCallIntent } from './AppCall';
import { CallAvatar } from './components/CallAvatar';
@@ -56,7 +50,7 @@ function CallMenuItems(props: {
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
return <OptimaPanelGroup title='Call'>
return <>
<MenuItem onClick={handlePushToTalkToggle}>
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
@@ -86,7 +80,7 @@ function CallMenuItems(props: {
Voice Calls Feedback
</MenuItem>
</OptimaPanelGroup>;
</>;
}
@@ -103,11 +97,10 @@ export function Telephone(props: {
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
const [pushToTalk, setPushToTalk] = React.useState(true);
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
const llmDropdownRef = React.useRef<OptimaBarControlMethods>(null);
const responseAbortController = React.useRef<AbortController | null>(null);
// external state
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown(llmDropdownRef);
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
const { chatTitle, reMessages } = useChatStore(useShallow(state => {
const conversation = props.callIntent.conversationId
? state.conversations.find(conversation => conversation.id === props.callIntent.conversationId) ?? null
@@ -132,7 +125,7 @@ export function Telephone(props: {
setCallMessages(messages => [...messages, createDMessageTextContent('user', userSpeechTranscribed)]); // [state] append user:speech
}
}, []);
const { recognitionState, startRecognition, stopRecognition, toggleRecognition } = useSpeechRecognition('webSpeechApi', onSpeechResultCallback, 1000);
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000);
// derived state
const isRinging = stage === 'ring';
@@ -149,19 +142,13 @@ export function Telephone(props: {
}, [isRinging, isConnected]);
// ringtone
usePlayUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
AudioPlayer.usePlayUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
/// Shortcuts
useGlobalShortcuts('Telephone', React.useMemo(() => [
{ key: 'm', ctrl: true, action: toggleRecognition },
], [toggleRecognition]));
/// CONNECTED
const handleCallStop = () => {
stopRecognition(false);
stopRecording();
setStage('ended');
};
@@ -187,7 +174,7 @@ export function Telephone(props: {
setCallMessages([createDMessageTextContent('assistant', firstMessage)]); // [state] set assistant:hello message
// fire/forget
void elevenLabsSpeakText(firstMessage, personaVoiceId, true, true);
void EXPERIMENTAL_speakTextStream(firstMessage, personaVoiceId);
return () => clearInterval(interval);
}, [isConnected, personaCallStarters, personaVoiceId]);
@@ -228,57 +215,43 @@ export function Telephone(props: {
// bail if no llm selected
if (!chatLLMId) return;
// temp fix: when the chat has no messages, only assume a single system message
const chatMessages: { role: VChatMessageIn['role'], text: string }[] = (reMessages && reMessages.length > 0)
? reMessages.map(message => ({ role: message.role, text: messageSingleTextOrThrow(message) }))
: personaSystemMessage
? [{ role: 'system', text: personaSystemMessage }]
: [];
// Call Message Generation Prompt
const callSystemInstruction = createDMessageTextContent('system', 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.');
const reMessagesRemapSysToUsr = remapMessagesSysToUsr(reMessages);
const callGenerationInputHistory: DMessage[] = [
// Chat messages, including the system prompt which is casted to a user message
// TODO: when upgrading to dynamic personas, we need to inject the persona message instead - not rely on reMessages, as messages[0] !== 'system'
...(reMessagesRemapSysToUsr ? reMessagesRemapSysToUsr : [createDMessageTextContent('user', personaSystemMessage)]),
// Call system prompt 2, to indicate the call has started
createDMessageTextContent('user', '**You are now on the phone call related to the chat above**.\nRespect your personality and answer with short, friendly and accurate thoughtful brief lines.'),
// Call history
...callMessages,
// 'prompt' for a "telephone call"
// FIXME: can easily run ouf of tokens - if this gets traction, we'll fix it
const callPrompt: VChatMessageIn[] = [
{ role: 'system', content: 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.' },
...chatMessages.map(message => ({ role: message.role, content: message.text })),
{ role: 'system', content: 'You are now on the phone call related to the chat above. Respect your personality and answer with short, friendly and accurate thoughtful lines.' },
...callMessages.map(message => ({ role: message.role, content: messageSingleTextOrThrow(message) })),
];
// perform completion
responseAbortController.current = new AbortController();
let finalText = '';
let error: any | null = null;
setPersonaTextInterim('💭...');
aixChatGenerateContent_DMessage_FromConversation(
chatLLMId,
callSystemInstruction,
callGenerationInputHistory,
'call',
callMessages[0].id,
{ abortSignal: responseAbortController.current.signal },
(update: AixChatGenerateContent_DMessage, _isDone: boolean) => {
const updatedText = messageFragmentsReduceText(update.fragments).trim();
if (updatedText)
setPersonaTextInterim(finalText = updatedText);
},
).then((status) => {
// whether status.outcome === 'success' or not, we get a valid DMessage, eventually with Error Fragments inside
const fullMessage = createDMessageFromFragments('assistant', status.lastDMessage.fragments);
fullMessage.generator = status.lastDMessage.generator;
setCallMessages(messages => [...messages, fullMessage]); // [state] append assistant:call_response
// fire/forget
if (status.outcome === 'success' && finalText?.length >= 1)
void elevenLabsSpeakText(finalText, personaVoiceId, true, true);
}).catch((err: DOMException) => {
if (err?.name !== 'AbortError') {
// create an error message to explain the exception
const errorMesage = createDMessageFromFragments('assistant', [createErrorContentFragment(err.message || err.toString())]);
setCallMessages(messages => [...messages, errorMesage]); // [state] append assistant:call_response-ERROR
llmStreamingChatGenerate(chatLLMId, callPrompt, 'call', callMessages[0].id, null, null, responseAbortController.current.signal, ({ textSoFar }) => {
const text = textSoFar?.trim();
if (text) {
finalText = text;
setPersonaTextInterim(text);
}
}).catch((err: DOMException) => {
if (err?.name !== 'AbortError')
error = err;
}).finally(() => {
setPersonaTextInterim(null);
if (finalText || error)
setCallMessages(messages => [...messages, createDMessageTextContent('assistant', finalText + (error ? ` (ERROR: ${error.message || error.toString()})` : ''))]); // [state] append assistant:call_response
// fire/forget
if (finalText?.length >= 1)
void EXPERIMENTAL_speakTextStream(finalText, personaVoiceId);
});
return () => {
@@ -288,7 +261,7 @@ export function Telephone(props: {
}, [isConnected, callMessages, chatLLMId, personaVoiceId, personaSystemMessage, reMessages]);
// [E] Message interrupter
const abortTrigger = isConnected && recognitionState.hasSpeech;
const abortTrigger = isConnected && isRecordingSpeech;
React.useEffect(() => {
if (abortTrigger && responseAbortController.current) {
responseAbortController.current.abort();
@@ -299,16 +272,16 @@ export function Telephone(props: {
// [E] continuous speech recognition (reload)
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !recognitionState.hasAudio;
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !isRecordingAudio;
React.useEffect(() => {
if (shouldStartRecording)
startRecognition();
}, [shouldStartRecording, startRecognition]);
startRecording();
}, [shouldStartRecording, startRecording]);
// more derived state
const personaName = persona?.title ?? 'Unknown';
const isMicEnabled = recognitionState.isAvailable;
const isMicEnabled = isSpeechEnabled;
const isTTSEnabled = true;
const isEnabled = isMicEnabled && isTTSEnabled;
@@ -322,11 +295,10 @@ export function Telephone(props: {
, [overridePersonaVoice, pushToTalk],
);
useSetOptimaAppMenu(menuItems, 'CallUI-Call');
usePluggableOptimaLayout(null, chatLLMDropdown, menuItems, 'CallUI');
return <>
<OptimaToolbarIn>{chatLLMDropdown}</OptimaToolbarIn>
<Typography
level='h1'
@@ -378,7 +350,7 @@ export function Telephone(props: {
{callMessages.map((message) =>
<CallMessage
key={message.id}
text={messageFragmentsReduceText(message.fragments)}
text={messageSingleTextOrThrow(message)}
variant={message.role === 'assistant' ? 'solid' : 'soft'}
color={message.role === 'assistant' ? 'neutral' : 'primary'}
role={message.role}
@@ -396,10 +368,10 @@ export function Telephone(props: {
)}
{/* Listening... */}
{recognitionState.isActive && (
{isRecording && (
<CallMessage
text={<>{speechInterim?.transcript.trim() || null}{speechInterim?.interimTranscript.trim() ? <i> {speechInterim.interimTranscript}</i> : null}</>}
variant={(recognitionState.hasSpeech || !!speechInterim?.transcript) ? 'soft' : 'outlined'}
variant={(isRecordingSpeech || !!speechInterim?.transcript) ? 'soft' : 'outlined'}
color='primary'
role='user'
/>
@@ -425,11 +397,11 @@ export function Telephone(props: {
{isConnected && <CallButton Icon={CallEndIcon} text='Hang up' color='danger' variant='soft' onClick={handleCallStop} />}
{isConnected && (pushToTalk ? (
<CallButton
Icon={MicIcon} onClick={toggleRecognition}
text={recognitionState.hasSpeech ? 'Listening...' : recognitionState.isActive ? 'Listening' : 'Push To Talk'}
variant={recognitionState.hasSpeech ? 'solid' : recognitionState.isActive ? 'soft' : 'outlined'}
Icon={MicIcon} onClick={toggleRecording}
text={isRecordingSpeech ? 'Listening...' : isRecording ? 'Listening' : 'Push To Talk'}
variant={isRecordingSpeech ? 'solid' : isRecording ? 'soft' : 'outlined'}
color='primary'
sx={!recognitionState.isActive ? { backgroundColor: 'background.surface' } : undefined}
sx={!isRecording ? { backgroundColor: 'background.surface' } : undefined}
/>
) : null
// <CallButton disabled={true} Icon={MicOffIcon} onClick={() => setMicMuted(muted => !muted)}
@@ -445,9 +417,9 @@ export function Telephone(props: {
{/* DEBUG state */}
{avatarClickCount > 10 && (avatarClickCount % 2 === 0) && (
<Card variant='outlined' sx={{ maxHeight: '25dvh', fontSize: 'sm', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
Special commands: Stop, Retry, Try Again, Restart, Goodbye.<br />
{JSON.stringify({ ...recognitionState, speechInterim }, null, 2)}
<Card variant='outlined' sx={{ maxHeight: '25dvh', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
Special commands: Stop, Retry, Try Again, Restart, Goodbye.
{JSON.stringify({ isSpeechEnabled, isRecordingAudio, speechInterim }, null, 2)}
</Card>
)}
+2 -2
View File
@@ -3,13 +3,13 @@ import * as React from 'react';
import { Chip, ColorPaletteProp, VariantProp } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import type { DMessage } from '~/common/stores/chat/chat.message';
import type { VChatMessageIn } from '~/modules/llms/llm.client';
export function CallMessage(props: {
text?: string | React.JSX.Element,
variant?: VariantProp, color?: ColorPaletteProp,
role: DMessage['role'],
role: VChatMessageIn['role'],
sx?: SxProps,
}) {
const isUserMessage = props.role === 'user';
+179 -286
View File
@@ -8,52 +8,46 @@ import { DEV_MODE_SETTINGS } from '../settings-modal/UxLabsSettings';
import { DiagramConfig, DiagramsModal } from '~/modules/aifn/digrams/DiagramsModal';
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
import { TradeConfig, TradeModal } from '~/modules/trade/TradeModal';
import { downloadSingleChat, importConversationsFromFilesAtRest, openConversationsAtRestPicker } from '~/modules/trade/trade.client';
import { imaginePromptFromTextOrThrow } from '~/modules/aifn/imagine/imaginePromptFromText';
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
import { downloadConversation, openAndLoadConversations } from '~/modules/trade/trade.client';
import { getChatLLMId, useChatLLM } from '~/modules/llms/store-llms';
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
import { useAreBeamsOpen } from '~/modules/beam/store-beam.hooks';
import { useCapabilityTextToImage } from '~/modules/t2i/t2i.client';
import type { DConversation, DConversationId } from '~/common/stores/chat/chat.conversation';
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager';
import { LLM_IF_ANT_PromptCaching, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
import { OptimaDrawerIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { ConversationsManager } from '~/common/chats/ConversationsManager';
import { DConversation, DConversationId } from '~/common/stores/chat/chat.conversation';
import { DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragments } from '~/common/stores/chat/chat.fragments';
import { GlobalShortcutDefinition, ShortcutKeyName, useGlobalShortcuts } from '~/common/components/useGlobalShortcuts';
import { PanelResizeInset } from '~/common/components/panes/GoodPanelResizeHandler';
import { Release } from '~/common/app.release';
import { PreferencesTab, useOptimaLayout, usePluggableOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
import { WorkspaceIdProvider } from '~/common/stores/workspace/WorkspaceIdProvider';
import { addSnackbar, removeSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
import { createDMessageFromFragments, createDMessagePlaceholderIncomplete, DMessageMetadata, duplicateDMessageMetadata } from '~/common/stores/chat/chat.message';
import { createErrorContentFragment, createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragmentsNoVoid } from '~/common/stores/chat/chat.fragments';
import { gcChatImageAssets } from '~/common/stores/chat/chat.gc';
import { getChatLLMId } from '~/common/stores/llms/store-llms';
import { addSnackbar, removeSnackbar } from '~/common/components/useSnackbarsStore';
import { createDMessageFromFragments, createDMessageTextContent, DMessageMetadata, duplicateDMessageMetadata } from '~/common/stores/chat/chat.message';
import { getConversation, getConversationSystemPurposeId, useConversation } from '~/common/stores/chat/store-chats';
import { optimaActions, optimaOpenModels, optimaOpenPreferences, useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
import { themeBgAppChatComposer } from '~/common/app.theme';
import { useChatLLM } from '~/common/stores/llms/llms.hooks';
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
import { useIsMobile, useIsTallScreen } from '~/common/components/useMatchMedia';
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
import { useFolderStore } from '~/common/state/store-folders';
import { useIsMobile } from '~/common/components/useMatchMedia';
import { useRouterQuery } from '~/common/app.routes';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { useUXLabsStore } from '~/common/state/store-ux-labs';
import { ChatPane } from './components/layout-pane/ChatPane';
import { ChatBarAltBeam } from './components/layout-bar/ChatBarAltBeam';
import { ChatBarAltTitle } from './components/layout-bar/ChatBarAltTitle';
import { ChatBarDropdowns } from './components/layout-bar/ChatBarDropdowns';
import { ChatBeamWrapper } from './components/ChatBeamWrapper';
import { ChatDrawerMemo } from './components/layout-drawer/ChatDrawer';
import { ChatMessageList } from './components/ChatMessageList';
import { ChatPageMenuItems } from './components/layout-menu/ChatPageMenuItems';
import { Composer } from './components/composer/Composer';
import { usePanesManager } from './components/panes/usePanesManager';
import type { ChatExecuteMode } from './execute-mode/execute-mode.types';
import { _handleExecute } from './editors/_handleExecute';
import { gcChatImageAssets } from './editors/image-generate';
// what to say when a chat is new and has no title
@@ -61,32 +55,16 @@ export const CHAT_NOVEL_TITLE = 'Chat';
export interface AppChatIntent {
initialConversationId?: string;
newChat?: 'voiceInput';
initialConversationId: string | null;
}
const scrollToBottomSx = {
display: 'flex',
flexDirection: 'column',
};
const chatMessageListSx: SxProps = {
flexGrow: 1,
};
const chatBeamWrapperSx: SxProps = {
flexGrow: 1,
// minHeight: 'calc(100vh - 69px - var(--AGI-Nav-width))',
};
const composerOpenSx: SxProps = {
zIndex: 21, // just to allocate a surface, and potentially have a shadow
minWidth: { md: 480 }, // don't get compresses too much on desktop
backgroundColor: themeBgAppChatComposer,
borderTop: `1px solid`,
borderTopColor: 'rgba(var(--joy-palette-neutral-mainChannel, 99 107 116) / 0.4)',
// hack: eats the bottom of the last message (as it has a 1px divider)
mt: '-1px',
borderTopColor: 'divider',
p: { xs: 1, md: 2 },
};
const composerClosedSx: SxProps = {
@@ -97,15 +75,14 @@ const composerClosedSx: SxProps = {
export function AppChat() {
// state
const { showPromisedOverlay } = useOverlayComponents();
const [isComposerMulticast, setIsComposerMulticast] = React.useState(false);
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [diagramConfig, setDiagramConfig] = React.useState<DiagramConfig | null>(null);
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
const [clearConversationId, setClearConversationId] = React.useState<DConversationId | null>(null);
const [deleteConversationIds, setDeleteConversationIds] = React.useState<DConversationId[] | null>(null);
const [flattenConversationId, setFlattenConversationId] = React.useState<DConversationId | null>(null);
const showNextTitleChange = React.useRef(false);
const llmDropdownRef = React.useRef<OptimaBarControlMethods>(null);
const personaDropdownRef = React.useRef<OptimaBarControlMethods>(null);
const composerTextAreaRef = React.useRef<HTMLTextAreaElement>(null);
const [_activeFolderId, setActiveFolderId] = React.useState<string | null>(null);
@@ -113,12 +90,13 @@ export function AppChat() {
const theme = useTheme();
const isMobile = useIsMobile();
const isTallScreen = useIsTallScreen();
const intent = useRouterQuery<Partial<AppChatIntent>>();
const showAltTitleBar = useUXLabsStore(state => DEV_MODE_SETTINGS && state.labsChatBarAlt === 'title');
const { openLlmOptions, openModelsSetup, openPreferencesTab } = useOptimaLayout();
const { chatLLM } = useChatLLM();
const {
@@ -167,16 +145,14 @@ export function AppChat() {
deleteConversations,
} = useConversation(focusedPaneConversationId);
// this will be used for the side panel
// const focusedConversationWorkspaceId = workspaceForConversationIdentity(focusedPaneConversationId);
//// const focusedConversationWorkspace = useWorkspaceIdForConversation(focusedPaneConversationId);
const { mayWork: capabilityHasT2I } = useCapabilityTextToImage();
const activeFolderId = useFolderStore(({ enableFolders, folders }) => {
const { activeFolderId } = useFolderStore(({ enableFolders, folders }) => {
const activeFolderId = enableFolders ? _activeFolderId : null;
const activeFolder = activeFolderId ? folders.find(folder => folder.id === activeFolderId) : null;
return activeFolder?.id ?? null;
return {
activeFolderId: activeFolder?.id ?? null,
};
});
@@ -201,15 +177,30 @@ export function AppChat() {
showNextTitleChange.current = true;
}, [navigateHistoryInFocusedPane]);
// [effect] Handle the initial conversation intent
React.useEffect(() => {
intent.initialConversationId && handleOpenConversationInFocusedPane(intent.initialConversationId);
}, [handleOpenConversationInFocusedPane, intent.initialConversationId]);
// [effect] Show snackbar with the focused chat title after a history navigation in focused pane
React.useEffect(() => {
if (showNextTitleChange.current) {
showNextTitleChange.current = false;
const title = (focusedChatNumber >= 0 ? `#${focusedChatNumber + 1} · ` : '') + (focusedChatTitle || 'New Chat');
const id = addSnackbar({ key: 'focused-title', message: title, type: 'title' });
return () => removeSnackbar(id);
}
}, [focusedChatNumber, focusedChatTitle]);
// Execution
const handleExecuteAndOutcome = React.useCallback(async (chatExecuteMode: ChatExecuteMode, conversationId: DConversationId, callerNameDebug: string) => {
const outcome = await _handleExecute(chatExecuteMode, conversationId, callerNameDebug);
if (outcome === 'err-no-chatllm')
optimaOpenModels();
openModelsSetup();
else if (outcome === 'err-t2i-unconfigured')
optimaOpenPreferences('draw');
openPreferencesTab(PreferencesTab.Draw);
else if (outcome === 'err-no-persona')
addSnackbar({ key: 'chat-no-persona', message: 'No persona selected.', type: 'issue' });
else if (outcome === 'err-no-conversation')
@@ -217,7 +208,7 @@ export function AppChat() {
else if (outcome === 'err-no-last-message')
addSnackbar({ key: 'chat-no-conversation', message: 'No conversation history.', type: 'issue' });
return outcome === true;
}, []);
}, [openModelsSetup, openPreferencesTab]);
const handleComposerAction = React.useCallback((conversationId: DConversationId, chatExecuteMode: ChatExecuteMode, fragments: (DMessageContentFragment | DMessageAttachmentFragment)[], metadata?: DMessageMetadata): boolean => {
@@ -237,7 +228,7 @@ export function AppChat() {
// create the user:message
// NOTE: this can lead to multiple chat messages with data refs that are referring to the same dblobs,
// however, we already got transferred ownership of the dblobs at this point.
const userMessage = createDMessageFromFragments('user', duplicateDMessageFragmentsNoVoid(fragments)); // [chat] create user:message to send per-chat
const userMessage = createDMessageFromFragments('user', duplicateDMessageFragments(fragments)); // [chat] create user:message
if (metadata) userMessage.metadata = duplicateDMessageMetadata(metadata);
ConversationsManager.getHandler(conversation.id).messageAppend(userMessage); // [chat] append user message in each conversation
@@ -250,76 +241,56 @@ export function AppChat() {
}, [paneUniqueConversationIds, handleExecuteAndOutcome, willMulticast]);
const handleConversationExecuteHistory = React.useCallback(async (conversationId: DConversationId) => {
await handleExecuteAndOutcome('generate-content', conversationId, 'chat-execute-history'); // replace with 'history', then 'generate-content'
await handleExecuteAndOutcome('generate-content', conversationId, 'chat-execute-history'); // replace with 'history', then 'generate-text'
}, [handleExecuteAndOutcome]);
const handleMessageRegenerateLastInFocusedPane = React.useCallback(async () => {
// Ctrl + Shift + Z
if (!focusedPaneConversationId) return;
const cHandler = ConversationsManager.getHandler(focusedPaneConversationId);
if (!cHandler.isValid()) return;
const inputHistory = cHandler.historyViewHeadOrThrow('chat-regenerate-shortcut');
if (!inputHistory.length) return;
// remove the last message if assistant's
const lastMessage = inputHistory[inputHistory.length - 1];
if (lastMessage.role === 'assistant')
cHandler.historyTruncateTo(lastMessage.id, -1);
// generate: NOTE: this will replace the system message correctly
await handleExecuteAndOutcome('generate-content', focusedPaneConversationId, 'chat-regenerate-last'); // truncate if assistant, then gen-text
const focusedConversation = getConversation(focusedPaneConversationId);
if (focusedPaneConversationId && focusedConversation?.messages?.length) {
const lastMessage = focusedConversation.messages[focusedConversation.messages.length - 1];
if (lastMessage.role === 'assistant')
ConversationsManager.getHandler(focusedPaneConversationId).historyTruncateTo(lastMessage.id, -1);
await handleExecuteAndOutcome('generate-content', focusedConversation.id, 'chat-regenerate-last'); // truncate if assistant, then gen-text
}
}, [focusedPaneConversationId, handleExecuteAndOutcome]);
const handleMessageBeamLastInFocusedPane = React.useCallback(async () => {
// Ctrl + Shift + B
if (!focusedPaneConversationId) return;
const cHandler = ConversationsManager.getHandler(focusedPaneConversationId);
if (!cHandler.isValid()) return;
const inputHistory = cHandler.historyViewHeadOrThrow('chat-beam-shortcut');
if (!inputHistory.length) return;
// TODO: replace the Persona and Auto-Cache-hint in the history?
// replace the prompt in history
const lastMessage = inputHistory[inputHistory.length - 1];
if (lastMessage.role === 'assistant')
cHandler.beamInvoke(inputHistory.slice(0, -1), [lastMessage], lastMessage.id);
else if (lastMessage.role === 'user')
cHandler.beamInvoke(inputHistory, [], null);
const focusedConversation = getConversation(focusedPaneConversationId);
if (focusedConversation?.messages?.length) {
const lastMessage = focusedConversation.messages[focusedConversation.messages.length - 1];
if (lastMessage.role === 'assistant')
ConversationsManager.getHandler(focusedConversation.id).beamInvoke(focusedConversation.messages.slice(0, -1), [lastMessage], lastMessage.id);
else if (lastMessage.role === 'user')
ConversationsManager.getHandler(focusedConversation.id).beamInvoke(focusedConversation.messages, [], null);
}
}, [focusedPaneConversationId]);
const handleTextDiagram = React.useCallback((diagramConfig: DiagramConfig | null) => setDiagramConfig(diagramConfig), []);
const handleImagineFromText = React.useCallback(async (conversationId: DConversationId, subjectText: string) => {
const cHandler = ConversationsManager.getHandler(conversationId);
if (!cHandler.isValid()) return;
const userImagineMessage = createDMessagePlaceholderIncomplete('user', `Thinking at the subject...`); // [chat] append user:imagine prompt
cHandler.messageAppend(userImagineMessage);
await imaginePromptFromTextOrThrow(subjectText, conversationId)
.then(imaginedPrompt => {
// Replace the placeholder with the message to draw, then execute the draw
cHandler.messageFragmentReplace(userImagineMessage.id, userImagineMessage.fragments[0].fId, createTextContentFragment(imaginedPrompt), true);
return handleExecuteAndOutcome('generate-image', conversationId, 'chat-imagine-from-text'); // append message for 'imagine', then generate-image
})
.catch((error: any) => {
// Replace the placeholder with the error message
cHandler.messageFragmentReplace(userImagineMessage.id, userImagineMessage.fragments[0].fId, createErrorContentFragment(`Issue requesting an Image prompt. ${error?.message || ''}`), true);
});
const handleImagineFromText = React.useCallback(async (conversationId: DConversationId, messageText: string) => {
const conversation = getConversation(conversationId);
if (!conversation)
return;
const imaginedPrompt = await imaginePromptFromText(messageText, conversationId) || 'An error sign.';
const imaginePrompMessage = createDMessageTextContent('user', imaginedPrompt);
ConversationsManager.getHandler(conversationId).messageAppend(imaginePrompMessage); // [chat] append user:imagine prompt
await handleExecuteAndOutcome('generate-image', conversationId, 'chat-imagine-from-text'); // append message for 'imagine', then generate-image
}, [handleExecuteAndOutcome]);
const handleTextSpeak = React.useCallback(async (text: string): Promise<void> => {
await elevenLabsSpeakText(text, undefined, true, true);
await speakText(text);
}, []);
// Chat actions
const handleConversationNewInFocusedPane = React.useCallback((forceNoRecycle: boolean, isIncognito: boolean) => {
const handleConversationNewInFocusedPane = React.useCallback((forceNoRecycle?: boolean) => {
// create conversation (or recycle the existing top-of-stack empty conversation)
const conversationId = (recycleNewConversationId && !forceNoRecycle && !isIncognito)
const conversationId = (recycleNewConversationId && !forceNoRecycle)
? recycleNewConversationId
: prependNewConversation(getConversationSystemPurposeId(focusedPaneConversationId) ?? undefined, isIncognito);
: prependNewConversation(getConversationSystemPurposeId(focusedPaneConversationId) ?? undefined);
// switch the focused pane to the new conversation
handleOpenConversationInFocusedPane(conversationId);
@@ -339,30 +310,23 @@ export function AppChat() {
setTradeConfig({ dir: 'export', conversationId, exportAll });
}, []);
const handleConversationsImportFromFiles = React.useCallback(
(files: File[] | null): Promise<void> =>
importConversationsFromFilesAtRest(files, true)
.then((outcome) => {
// activate the last (most recent) imported conversation
if (outcome.activateConversationId) {
showNextTitleChange.current = true;
handleOpenConversationInFocusedPane(outcome.activateConversationId);
}
})
.catch(() => {
addSnackbar({ key: 'chat-import-fail', message: 'Could not open file.', type: 'issue' });
}),
[handleOpenConversationInFocusedPane],
);
const handleConversationsImportFormFilePicker = React.useCallback(
() => openConversationsAtRestPicker().then(handleConversationsImportFromFiles),
[handleConversationsImportFromFiles],
);
const handleFileOpenConversation = React.useCallback(() => {
openAndLoadConversations(true)
.then((outcome) => {
// activate the last (most recent) imported conversation
if (outcome?.activateConversationId) {
showNextTitleChange.current = true;
handleOpenConversationInFocusedPane(outcome.activateConversationId);
}
})
.catch(() => {
addSnackbar({ key: 'chat-import-fail', message: 'Could not open the file.', type: 'issue' });
});
}, [handleOpenConversationInFocusedPane]);
const handleFileSaveConversation = React.useCallback((conversationId: DConversationId | null) => {
const conversation = getConversation(conversationId);
conversation && downloadSingleChat(conversation, 'json')
conversation && downloadConversation(conversation, 'json')
.then(() => {
addSnackbar({ key: 'chat-save-as-ok', message: 'File saved.', type: 'success' });
})
@@ -372,7 +336,7 @@ export function AppChat() {
});
}, []);
const handleConversationBranch = React.useCallback((srcConversationId: DConversationId, messageId: string | null, addSplitPane: boolean): DConversationId | null => {
const handleConversationBranch = React.useCallback((srcConversationId: DConversationId, messageId: string | null): DConversationId | null => {
// clone data
const branchedConversationId = branchConversation(srcConversationId, messageId);
@@ -382,39 +346,28 @@ export function AppChat() {
// replace/open a new pane with this
showNextTitleChange.current = true;
if (addSplitPane && isMultiAddable)
handleOpenConversationInSplitPane(branchedConversationId);
else
if (!isMultiAddable)
handleOpenConversationInFocusedPane(branchedConversationId);
else
handleOpenConversationInSplitPane(branchedConversationId);
return branchedConversationId;
}, [activeFolderId, branchConversation, handleOpenConversationInFocusedPane, handleOpenConversationInSplitPane, isMultiAddable]);
const handleConversationFlatten = React.useCallback((conversationId: DConversationId) => setFlattenConversationId(conversationId), []);
const handleConversationReset = React.useCallback(async (conversationId: DConversationId) => {
if (await showPromisedOverlay('chat-reset-confirmation', { rejectWithValue: false }, ({ onResolve, onUserReject }) =>
<ConfirmationModal
open onClose={onUserReject} onPositive={() => onResolve(true)}
confirmationText='This will clear all messages while keeping the current chat settings, model, and persona. Do you want to continue?'
positiveActionText='Restart Chat'
title='Restart Chat'
/>,
)) {
ConversationsManager.getHandler(conversationId).historyClear();
const handleConfirmedClearConversation = React.useCallback(() => {
if (clearConversationId) {
ConversationsManager.getHandler(clearConversationId).historyClear();
setClearConversationId(null);
}
}, [showPromisedOverlay]);
}, [clearConversationId]);
const handleDeleteConversations = React.useCallback(async (conversationIds: DConversationId[], bypassConfirmation: boolean) => {
const handleConversationClear = React.useCallback((conversationId: DConversationId) => setClearConversationId(conversationId), []);
// show confirmation dialog
if (!bypassConfirmation && !await showPromisedOverlay('chat-delete-confirmation', { rejectWithValue: false }, ({ onResolve, onUserReject }) =>
<ConfirmationModal
open onClose={onUserReject} onPositive={() => onResolve(true)}
confirmationText={`Are you absolutely sure you want to delete ${conversationIds.length === 1 ? 'this conversation' : 'these conversations'}? This action cannot be undone.`}
positiveActionText={conversationIds.length === 1 ? 'Delete conversation' : `Yes, delete all ${conversationIds.length} conversations`}
/>,
)) return;
const handleDeleteConversations = React.useCallback((conversationIds: DConversationId[], bypassConfirmation: boolean) => {
if (!bypassConfirmation)
return setDeleteConversationIds(conversationIds);
// perform deletion, and return the next (or a new) conversation
const nextConversationId = deleteConversations(conversationIds, /*focusedSystemPurposeId ??*/ undefined);
@@ -422,9 +375,43 @@ export function AppChat() {
// switch the focused pane to the new conversation - NOTE: this makes the assumption that deletion had impact on the focused pane
handleOpenConversationInFocusedPane(nextConversationId);
setDeleteConversationIds(null);
// run GC for dblobs in this conversation
void gcChatImageAssets(); // fire/forget
}, [showPromisedOverlay, deleteConversations, handleOpenConversationInFocusedPane]);
}, [deleteConversations, handleOpenConversationInFocusedPane]);
const handleConfirmedDeleteConversations = React.useCallback(() => {
!!deleteConversationIds?.length && handleDeleteConversations(deleteConversationIds, true);
}, [deleteConversationIds, handleDeleteConversations]);
// Shortcuts
const handleOpenChatLlmOptions = React.useCallback(() => {
const chatLLMId = getChatLLMId();
if (!chatLLMId) return;
openLlmOptions(chatLLMId);
}, [openLlmOptions]);
const shortcuts = React.useMemo((): GlobalShortcutDefinition[] => [
// focused conversation
['b', true, true, false, handleMessageBeamLastInFocusedPane],
['r', true, true, false, handleMessageRegenerateLastInFocusedPane],
['n', true, false, true, handleConversationNewInFocusedPane],
['o', true, false, false, handleFileOpenConversation],
['s', true, false, false, () => handleFileSaveConversation(focusedPaneConversationId)],
['b', true, false, true, () => isFocusedChatEmpty || (focusedPaneConversationId && handleConversationBranch(focusedPaneConversationId, null))],
['x', true, false, true, () => isFocusedChatEmpty || (focusedPaneConversationId && handleConversationClear(focusedPaneConversationId))],
['d', true, false, true, () => focusedPaneConversationId && handleDeleteConversations([focusedPaneConversationId], false)],
[ShortcutKeyName.Left, true, false, true, () => handleNavigateHistoryInFocusedPane('back')],
[ShortcutKeyName.Right, true, false, true, () => handleNavigateHistoryInFocusedPane('forward')],
// global
['o', true, true, false, handleOpenChatLlmOptions],
['+', true, true, false, useUIPreferencesStore.getState().increaseContentScaling],
['-', true, true, false, useUIPreferencesStore.getState().decreaseContentScaling],
], [focusedPaneConversationId, handleConversationBranch, handleConversationClear, handleConversationNewInFocusedPane, handleFileOpenConversation, handleFileSaveConversation, handleDeleteConversations, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]);
useGlobalShortcuts(shortcuts);
// Pluggable Optima components
@@ -434,18 +421,14 @@ export function AppChat() {
const focusedBarContent = React.useMemo(() => beamOpenStoreInFocusedPane
? <ChatBarAltBeam beamStore={beamOpenStoreInFocusedPane} isMobile={isMobile} />
: (barAltTitle === null)
? <ChatBarDropdowns conversationId={focusedPaneConversationId} llmDropdownRef={llmDropdownRef} personaDropdownRef={personaDropdownRef} />
? <ChatBarDropdowns conversationId={focusedPaneConversationId} />
: <ChatBarAltTitle conversationId={focusedPaneConversationId} conversationTitle={barAltTitle} />
, [barAltTitle, beamOpenStoreInFocusedPane, focusedPaneConversationId, isMobile],
);
// Disabled by default, as it lags the opening of the drawer and immediatly vanishes during the closing animation
const isDrawerOpen = true; // useOptimaDrawerOpen();
const drawerContent = React.useMemo(() => !isDrawerOpen ? null :
const drawerContent = React.useMemo(() =>
<ChatDrawerMemo
// isMobile={isMobile /* expensive as it undoes the memo; not passed anymore */}
isMobile={isMobile}
activeConversationId={focusedPaneConversationId}
activeFolderId={activeFolderId}
chatPanesConversationIds={paneUniqueConversationIds}
@@ -458,131 +441,32 @@ export function AppChat() {
onConversationsImportDialog={handleConversationImportDialog}
setActiveFolderId={setActiveFolderId}
/>,
[activeFolderId, disableNewButton, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isDrawerOpen, paneUniqueConversationIds],
[activeFolderId, disableNewButton, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isMobile, paneUniqueConversationIds],
);
const focusedMenuItems = React.useMemo(() =>
<ChatPane
<ChatPageMenuItems
isMobile={isMobile}
conversationId={focusedPaneConversationId}
disableItems={!focusedPaneConversationId || isFocusedChatEmpty}
hasConversations={hasConversations}
isMessageSelectionMode={isMessageSelectionMode}
isVerticalSplit={isMobile || isTallScreen}
onConversationBranch={handleConversationBranch}
onConversationClear={handleConversationReset}
onConversationClear={handleConversationClear}
onConversationFlatten={handleConversationFlatten}
// onConversationNew={handleConversationNewInFocusedPane}
setIsMessageSelectionMode={setIsMessageSelectionMode}
/>,
[focusedPaneConversationId, handleConversationBranch, handleConversationFlatten, handleConversationReset, hasConversations, isFocusedChatEmpty, isMessageSelectionMode, isMobile, isTallScreen],
[focusedPaneConversationId, handleConversationBranch, handleConversationClear, handleConversationFlatten, hasConversations, isFocusedChatEmpty, isMessageSelectionMode, isMobile],
);
useSetOptimaAppMenu(focusedMenuItems, 'AppChat');
// Effects
// [effect] Handle the conversation intent
React.useEffect(() => {
// Debug: open a null chat
if (Release.IsNodeDevBuild && intent.initialConversationId === 'null')
openConversationInFocusedPane(null! /* for debugging purporse */);
// Open the initial conversation if set
else if (intent.initialConversationId)
openConversationInFocusedPane(intent.initialConversationId);
// Create a new chat if requested
else if (intent.newChat !== undefined)
handleConversationNewInFocusedPane(false, false);
}, [handleConversationNewInFocusedPane, intent.initialConversationId, intent.newChat, openConversationInFocusedPane]);
// [effect] Show snackbar with the focused chat title after a history navigation in focused pane
React.useEffect(() => {
if (showNextTitleChange.current) {
showNextTitleChange.current = false;
const title = (focusedChatNumber >= 0 ? `#${focusedChatNumber + 1} · ` : '') + (focusedChatTitle || 'New Chat');
const id = addSnackbar({ key: 'focused-title', message: title, type: 'center-title' });
return () => removeSnackbar(id);
}
}, [focusedChatNumber, focusedChatTitle]);
// Shortcuts
const handleOpenChatLlmOptions = React.useCallback(() => {
const chatLLMId = getChatLLMId();
if (!chatLLMId) return;
optimaActions().openModelOptions(chatLLMId);
}, []);
const handleMoveFocus = React.useCallback((direction: number, wholeList?: boolean) => {
// find the parent list
let messageListElement: HTMLElement | null;
let withinBeam = false;
const activeElement = document.activeElement as HTMLElement;
if (activeElement) {
messageListElement = document.querySelector('[role=beam-list]') as HTMLElement;
if (!messageListElement)
messageListElement = activeElement.closest('[role=chat-messages-list]') as HTMLElement;
else
withinBeam = true;
} else
messageListElement = document.querySelector('[role=chat-messages-list]') as HTMLElement;
if (!messageListElement) return;
// find the scrollable container and if we're at the bottom
const scrollContainer = messageListElement.closest('[role=scrollable]') as HTMLElement;
if (!scrollContainer) return;
const isAtBottom = Math.abs(scrollContainer.scrollHeight - scrollContainer.scrollTop - scrollContainer.clientHeight) < 1;
// determine the current message and next index
const messageElements = Array.from(messageListElement.querySelectorAll(withinBeam ? '[role=beam-card]' : '[role=chat-message]')) as HTMLElement[];
const currentIndex = messageElements.findIndex(el => el.contains(activeElement));
// if going down and we're at/past the last message, scroll to bottom
const snapToBottom = direction > 0 && (wholeList || (currentIndex === -1 || currentIndex >= messageElements.length - 1));
const nextIndex = (wholeList && direction < 0) ? 0
: snapToBottom ? messageElements.length - 1
: (isAtBottom && direction < 0) ? currentIndex
: currentIndex === -1 ? (direction < 0 ? 0 : messageElements.length - 1)
: currentIndex + direction;
if (nextIndex < 0 || nextIndex >= messageElements.length) return;
// perform the smooth scroll and focus
const targetElement = messageElements[nextIndex];
targetElement.focus({ preventScroll: true, focusVisible: true } as FocusOptions);
targetElement.scrollIntoView({ behavior: 'smooth', block: snapToBottom ? 'end' : 'start' });
}, []);
useGlobalShortcuts('AppChat', React.useMemo(() => [
// focused conversation
{ key: 'z', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageRegenerateLastInFocusedPane, description: 'Retry' },
{ key: 'b', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageBeamLastInFocusedPane, description: 'Beam Edit' },
{ key: 'o', ctrl: true, action: handleConversationsImportFormFilePicker },
{ key: 's', ctrl: true, action: () => handleFileSaveConversation(focusedPaneConversationId) },
{ key: 'n', ctrl: true, shift: true, action: () => handleConversationNewInFocusedPane(false, false) },
{ key: 'x', ctrl: true, shift: true, action: () => isFocusedChatEmpty || (focusedPaneConversationId && handleConversationReset(focusedPaneConversationId)) },
{ key: 'd', ctrl: true, shift: true, action: () => focusedPaneConversationId && handleDeleteConversations([focusedPaneConversationId], false) },
{ key: '[', ctrl: true, action: () => handleNavigateHistoryInFocusedPane('back') },
{ key: ']', ctrl: true, action: () => handleNavigateHistoryInFocusedPane('forward') },
// change active message (in any possible panel)
{ key: ShortcutKey.Up, ctrl: true, action: () => handleMoveFocus(-1) },
{ key: ShortcutKey.Down, ctrl: true, action: () => handleMoveFocus(1) },
{ key: ShortcutKey.Up, ctrl: true, shift: true, action: () => handleMoveFocus(-1, true) },
{ key: ShortcutKey.Down, ctrl: true, shift: true, action: () => handleMoveFocus(1, true) },
// open the dropdowns
{ key: 'l', ctrl: true, action: () => llmDropdownRef.current?.openListbox() /*, description: 'Open Models Dropdown'*/ },
{ key: 'p', ctrl: true, action: () => personaDropdownRef.current?.openListbox() /*, description: 'Open Persona Dropdown'*/ },
// focused conversation llm
{ key: 'o', ctrl: true, shift: true, action: handleOpenChatLlmOptions },
], [focusedPaneConversationId, handleConversationNewInFocusedPane, handleConversationReset, handleConversationsImportFormFilePicker, handleDeleteConversations, handleFileSaveConversation, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleMoveFocus, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]));
usePluggableOptimaLayout(drawerContent, focusedBarContent, focusedMenuItems, 'AppChat');
return <>
<OptimaDrawerIn>{drawerContent}</OptimaDrawerIn>
<OptimaToolbarIn>{focusedBarContent}</OptimaToolbarIn>
<PanelGroup
direction={(isMobile || isTallScreen) ? 'vertical' : 'horizontal'}
direction={isMobile ? 'vertical' : 'horizontal'}
id='app-chat-panels'
>
@@ -590,13 +474,12 @@ export function AppChat() {
const _paneIsFocused = idx === focusedPaneIndex;
const _paneConversationId = pane.conversationId;
const _paneChatHandler = paneHandlers[idx] ?? null;
const _paneIsIncognito = _paneChatHandler?.isIncognito() ?? false;
const _paneBeamStoreApi = paneBeamStores[idx] ?? null;
const _paneBeamIsOpen = !!beamsOpens?.[idx] && !!_paneBeamStoreApi;
const _paneBeamStore = paneBeamStores[idx] ?? null;
const _paneBeamIsOpen = !!beamsOpens?.[idx] && !!_paneBeamStore;
const _panesCount = chatPanes.length;
const _keyAndId = `chat-pane-${pane.paneId}`;
const _sepId = `sep-pane-${idx}`;
return <WorkspaceIdProvider conversationId={_paneIsFocused ? _paneConversationId : null} key={_keyAndId}>
return <React.Fragment key={_keyAndId}>
<Panel
id={_keyAndId}
@@ -634,17 +517,13 @@ export function AppChat() {
// it was optional before: https://github.com/bvaughn/react-resizable-panels/issues/241
pointerEvents: 'auto',
}),
...((_paneIsIncognito && {
backgroundColor: theme.palette.background.level3,
})),
}}
>
<ScrollToBottom
bootToBottom
stickToBottomInitial
disableAutoStick={isMobile && _paneBeamIsOpen}
sx={scrollToBottomSx}
sx={{ display: 'flex', flexDirection: 'column' }}
>
{!_paneBeamIsOpen && (
@@ -652,28 +531,30 @@ export function AppChat() {
conversationId={_paneConversationId}
conversationHandler={_paneChatHandler}
capabilityHasT2I={capabilityHasT2I}
chatLLMAntPromptCaching={chatLLM?.interfaces?.includes(LLM_IF_ANT_PromptCaching) ?? false}
chatLLMContextTokens={chatLLM?.contextTokens ?? null}
chatLLMSupportsImages={chatLLM?.interfaces?.includes(LLM_IF_OAI_Vision) ?? false}
fitScreen={isMobile || isMultiPane}
isMobile={isMobile}
isMessageSelectionMode={isMessageSelectionMode}
setIsMessageSelectionMode={setIsMessageSelectionMode}
onConversationBranch={handleConversationBranch}
onConversationExecuteHistory={handleConversationExecuteHistory}
onConversationNew={handleConversationNewInFocusedPane}
onTextDiagram={handleTextDiagram}
onTextImagine={handleImagineFromText}
onTextSpeak={handleTextSpeak}
sx={chatMessageListSx}
sx={{
flexGrow: 1,
}}
/>
)}
{_paneBeamIsOpen && (
<ChatBeamWrapper
beamStore={_paneBeamStoreApi}
beamStore={_paneBeamStore}
isMobile={isMobile}
inlineSx={chatBeamWrapperSx}
inlineSx={{
flexGrow: 1,
// minHeight: 'calc(100vh - 69px - var(--AGI-Nav-width))',
}}
/>
)}
@@ -691,7 +572,7 @@ export function AppChat() {
</PanelResizeHandle>
)}
</WorkspaceIdProvider>;
</React.Fragment>;
})}
</PanelGroup>
@@ -705,19 +586,13 @@ export function AppChat() {
isMulticast={!isMultiConversationId ? null : isComposerMulticast}
isDeveloperMode={isFocusedChatDeveloper}
onAction={handleComposerAction}
onConversationsImportFromFiles={handleConversationsImportFromFiles}
onTextImagine={handleImagineFromText}
setIsMulticast={setIsComposerMulticast}
sx={beamOpenStoreInFocusedPane ? composerClosedSx : composerOpenSx}
/>
{/* Diagrams */}
{!!diagramConfig && (
<DiagramsModal
config={diagramConfig}
onClose={() => setDiagramConfig(null)}
/>
)}
{!!diagramConfig && <DiagramsModal config={diagramConfig} onClose={() => setDiagramConfig(null)} />}
{/* Flatten */}
{!!flattenConversationId && (
@@ -737,5 +612,23 @@ export function AppChat() {
/>
)}
{/* [confirmation] Reset Conversation */}
{!!clearConversationId && (
<ConfirmationModal
open onClose={() => setClearConversationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText='Are you sure you want to discard all messages?'
positiveActionText='Clear conversation'
/>
)}
{/* [confirmation] Delete All */}
{!!deleteConversationIds?.length && (
<ConfirmationModal
open onClose={() => setDeleteConversationIds(null)} onPositive={handleConfirmedDeleteConversations}
confirmationText={`Are you absolutely sure you want to delete ${deleteConversationIds.length === 1 ? 'this conversation' : 'these conversations'}? This action cannot be undone.`}
positiveActionText={deleteConversationIds.length === 1 ? 'Delete conversation' : `Yes, delete all ${deleteConversationIds.length} conversations`}
/>
)}
</>;
}
+16
View File
@@ -0,0 +1,16 @@
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
import type { ICommandsProvider } from './ICommandsProvider';
export const CommandsBeam: ICommandsProvider = {
id: 'cmd-mode-beam',
rank: 9,
getCommands: () => [{
primary: '/beam',
arguments: ['prompt'],
description: 'Combine the smarts of models',
Icon: ChatBeamIcon,
}],
};
+16
View File
@@ -0,0 +1,16 @@
import LanguageIcon from '@mui/icons-material/Language';
import type { ICommandsProvider } from './ICommandsProvider';
export const CommandsBrowse: ICommandsProvider = {
id: 'cmd-ass-browse',
rank: 20,
getCommands: () => [{
primary: '/browse',
arguments: ['URL'],
description: 'Assistant will download the web page',
Icon: LanguageIcon,
}],
};
+6 -2
View File
@@ -1,12 +1,14 @@
import type { ChatCommand, ICommandsProvider } from './ICommandsProvider';
import { ChatCommand, ICommandsProvider } from './ICommandsProvider';
import { CommandsAlter } from './CommandsAlter';
import { CommandsBeam } from './CommandsBeam';
import { CommandsBrowse } from './CommandsBrowse';
import { CommandsDraw } from './CommandsDraw';
import { CommandsHelp } from './CommandsHelp';
import { CommandsReact } from './CommandsReact';
export type CommandsProviderId = 'cmd-ass-t2i' | 'cmd-chat-alter' | 'cmd-help' | 'cmd-mode-react';
export type CommandsProviderId = 'cmd-ass-browse' | 'cmd-ass-t2i' | 'cmd-chat-alter' | 'cmd-help' | 'cmd-mode-beam' | 'cmd-mode-react';
type TextCommandPiece =
| { type: 'nocmd'; value: string; }
@@ -14,9 +16,11 @@ type TextCommandPiece =
const ChatCommandsProviders: Record<CommandsProviderId, ICommandsProvider> = {
'cmd-ass-browse': CommandsBrowse,
'cmd-ass-t2i': CommandsDraw,
'cmd-chat-alter': CommandsAlter,
'cmd-help': CommandsHelp,
'cmd-mode-beam': CommandsBeam,
'cmd-mode-react': CommandsReact,
};
+75 -143
View File
@@ -4,25 +4,21 @@ import { useShallow } from 'zustand/react/shallow';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, List } from '@mui/joy';
import type { SystemPurposeExample } from '../../../data';
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
import type { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
import { DConversationId, excludeSystemMessages } from '~/common/stores/chat/chat.conversation';
import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
import { convertFilesToDAttachmentFragments } from '~/common/attachment-drafts/attachment.pipeline';
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP } from '~/common/stores/chat/chat.message';
import { createTextContentFragment, DMessageFragment, DMessageFragmentId } from '~/common/stores/chat/chat.fragments';
import { openFileForAttaching } from '~/common/components/ButtonAttachFiles';
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
import type { ConversationHandler } from '~/common/chats/ConversationHandler';
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
import type { DMessageFragment, DMessageFragmentId } from '~/common/stores/chat/chat.fragments';
import { InlineError } from '~/common/components/InlineError';
import { PreferencesTab, useOptimaLayout } from '~/common/layout/optima/useOptimaLayout';
import { ShortcutKeyName, useGlobalShortcuts } from '~/common/components/useGlobalShortcuts';
import { createDMessageTextContent, DMessageId, DMessageUserFlag, messageToggleUserFlag } from '~/common/stores/chat/chat.message';
import { getConversation, useChatStore } from '~/common/stores/chat/store-chats';
import { useBrowserTranslationWarning } from '~/common/components/useIsBrowserTranslating';
import { useCapabilityElevenLabs } from '~/common/components/useCapabilities';
import { useChatOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
import { useChatStore } from '~/common/stores/chat/store-chats';
import { useEphemerals } from '~/common/chats/EphemeralsStore';
import { useScrollToBottom } from '~/common/scroll-to-bottom/useScrollToBottom';
import { CMLZeroConversation } from './messages-list/CMLZeroConversation';
import { ChatMessage, ChatMessageMemo } from './message/ChatMessage';
import { CleanerMessage, MessagesSelectionHeader } from './message/CleanerMessage';
import { Ephemerals } from './Ephemerals';
@@ -30,8 +26,6 @@ import { PersonaSelector } from './persona-selector/PersonaSelector';
import { useChatAutoSuggestHTMLUI, useChatShowSystemMessages } from '../store-app-chat';
const stableNoMessages: DMessage[] = [];
/**
* A list of ChatMessages
*/
@@ -39,15 +33,12 @@ export function ChatMessageList(props: {
conversationId: DConversationId | null,
conversationHandler: ConversationHandler | null,
capabilityHasT2I: boolean,
chatLLMAntPromptCaching: boolean,
chatLLMContextTokens: number | null,
chatLLMSupportsImages: boolean,
fitScreen: boolean,
isMobile: boolean,
isMessageSelectionMode: boolean,
onConversationBranch: (conversationId: DConversationId, messageId: string, addSplitPane: boolean) => void,
onConversationBranch: (conversationId: DConversationId, messageId: string) => void,
onConversationExecuteHistory: (conversationId: DConversationId) => Promise<void>,
onConversationNew: (forceNoRecycle: boolean, isIncognito: boolean) => void,
onTextDiagram: (diagramConfig: DiagramConfig | null) => void,
onTextImagine: (conversationId: DConversationId, selectedText: string) => Promise<void>,
onTextSpeak: (selectedText: string) => Promise<void>,
@@ -62,65 +53,29 @@ export function ChatMessageList(props: {
// external state
const { notifyBooting } = useScrollToBottom();
const { openPreferencesTab } = useOptimaLayout();
const danger_experimentalHtmlWebUi = useChatAutoSuggestHTMLUI();
const [showSystemMessages] = useChatShowSystemMessages();
const optionalTranslationWarning = useBrowserTranslationWarning();
const { conversationMessages, historyTokenCount } = useChatStore(useShallow(({ conversations }) => {
const conversation = conversations.find(conversation => conversation.id === props.conversationId);
const { conversationMessages, historyTokenCount } = useChatStore(useShallow(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
conversationMessages: conversation ? conversation.messages : stableNoMessages,
conversationMessages: conversation ? conversation.messages : [],
historyTokenCount: conversation ? conversation.tokenCount : 0,
};
}));
const { _composerInReferenceToCount, ephemerals } = useChatOverlayStore(props.conversationHandler?.conversationOverlayStore ?? null, useShallow(state => ({
_composerInReferenceToCount: state.inReferenceTo?.length ?? 0,
ephemerals: state.ephemerals?.length ? state.ephemerals : null,
})));
const ephemerals = useEphemerals(props.conversationHandler);
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
// derived state
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine, onTextSpeak } = props;
const composerCanAddInReferenceTo = _composerInReferenceToCount < 5;
const composerHasInReferenceto = _composerInReferenceToCount > 0;
// text actions
const handleRunExample = React.useCallback(async (example: SystemPurposeExample) => {
if (!conversationId || !conversationHandler) return;
// Simple Example Prompt (User text message)
if (typeof example === 'string') {
conversationHandler.messageAppend(createDMessageTextContent('user', example)); // [chat] append user:persona question
await onConversationExecuteHistory(conversationId);
return;
}
// User-Action Example Prompts (User text message + File attachments)
switch (example.action) {
case 'require-data-attachment':
await openFileForAttaching(true, async (filesWithHandle) => {
// Retrieve fully-fledged Attachment Fragments (converted/extracted, with sources, mimes, etc.) from the selected files
const attachmentFragments = await convertFilesToDAttachmentFragments('file-open', filesWithHandle, {
hintAddImages: props.chatLLMSupportsImages,
});
// Create a User message with the prompt and the attachment fragments
if (attachmentFragments.length) {
conversationHandler.messageAppend(createDMessageFromFragments('user', [ // [chat] append user:persona question + attachment(s)
createTextContentFragment(example.prompt),
...attachmentFragments,
]));
await onConversationExecuteHistory(conversationId);
}
});
break;
}
}, [conversationHandler, conversationId, onConversationExecuteHistory, props.chatLLMSupportsImages]);
const handleMessageContinue = React.useCallback(async (_messageId: DMessageId /* Ignored for now */) => {
const handleRunExample = React.useCallback(async (examplePrompt: string) => {
if (conversationId && conversationHandler) {
conversationHandler.messageAppend(createDMessageTextContent('user', 'Continue')); // [chat] append user:Continue
conversationHandler.messageAppend(createDMessageTextContent('user', examplePrompt)); // [chat] append user:persona question
await onConversationExecuteHistory(conversationId);
}
}, [conversationHandler, conversationId, onConversationExecuteHistory]);
@@ -136,34 +91,31 @@ export function ChatMessageList(props: {
}, [conversationHandler, conversationId, onConversationExecuteHistory]);
const handleMessageBeam = React.useCallback(async (messageId: DMessageId) => {
// Message option menu Beam
if (!conversationId || !props.conversationHandler || !props.conversationHandler.isValid()) return;
const inputHistory = props.conversationHandler.historyViewHeadOrThrow('chat-beam-message');
if (!inputHistory.length) return;
// TODO: replace the Persona and Auto-Cache-hint in the history?
// truncate the history to the given message (may or may not have more after)
const truncatedHistory = inputHistory.slice(0, inputHistory.findIndex(m => m.id === messageId) + 1);
const lastTruncatedMessage = truncatedHistory[truncatedHistory.length - 1];
if (!lastTruncatedMessage) return;
// assistant: do an in-place beam
if (lastTruncatedMessage.role === 'assistant') {
if (truncatedHistory.length >= 2)
props.conversationHandler.beamInvoke(truncatedHistory.slice(0, -1), [lastTruncatedMessage], lastTruncatedMessage.id);
} else if (lastTruncatedMessage.role === 'user') {
// user: truncate and append (but if the next message is an assistant message, import it)
const possibleNextMessage = inputHistory[truncatedHistory.length];
if (possibleNextMessage?.role === 'assistant')
props.conversationHandler.beamInvoke(truncatedHistory, [possibleNextMessage], null);
else
props.conversationHandler.beamInvoke(truncatedHistory, [], null);
// Right-click menu Beam
if (!conversationId || !props.conversationHandler) return;
const messages = getConversation(conversationId)?.messages;
if (messages?.length) {
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + 1);
const lastMessage = truncatedHistory[truncatedHistory.length - 1];
if (lastMessage) {
// assistant: do an in-place beam
if (lastMessage.role === 'assistant') {
if (truncatedHistory.length >= 2)
props.conversationHandler.beamInvoke(truncatedHistory.slice(0, -1), [lastMessage], lastMessage.id);
} else {
// user: truncate and append (but if the next message is an assistant message, import it)
const nextMessage = messages[truncatedHistory.length];
if (nextMessage?.role === 'assistant')
props.conversationHandler.beamInvoke(truncatedHistory, [nextMessage], null);
else
props.conversationHandler.beamInvoke(truncatedHistory, [], null);
}
}
}
}, [conversationId, props.conversationHandler]);
const handleMessageBranch = React.useCallback((messageId: DMessageId) => {
conversationId && onConversationBranch(conversationId, messageId, true);
conversationId && onConversationBranch(conversationId, messageId);
}, [conversationId, onConversationBranch]);
const handleMessageTruncate = React.useCallback((messageId: DMessageId) => {
@@ -186,16 +138,14 @@ export function ChatMessageList(props: {
props.conversationHandler?.messageFragmentReplace(messageId, fragmentId, newFragment, false);
}, [props.conversationHandler]);
const handleMessageToggleUserFlag = React.useCallback((messageId: DMessageId, userFlag: DMessageUserFlag, _maxPerConversation?: number) => {
props.conversationHandler?.messageToggleUserFlag(messageId, userFlag, true /* touch */);
// Note: we don't support 'maxPerConversation' yet, which is supposed to turn off the flag from the beginning if it's too numerous
// if (_maxPerConversation) {
// ...
// }
const handleMessageToggleUserFlag = React.useCallback((messageId: DMessageId, userFlag: DMessageUserFlag) => {
props.conversationHandler?.messageEdit(messageId, (message) => ({
userFlags: messageToggleUserFlag(message, userFlag),
}), false, false);
}, [props.conversationHandler]);
const handleAddInReferenceTo = React.useCallback((item: DMetaReferenceItem) => {
props.conversationHandler?.overlayActions.addInReferenceTo(item);
const handleReplyTo = React.useCallback((_messageId: DMessageId, text: string) => {
props.conversationHandler?.getOverlayStore().getState().setReplyToText(text);
}, [props.conversationHandler]);
const handleTextDiagram = React.useCallback(async (messageId: DMessageId, text: string) => {
@@ -204,21 +154,21 @@ export function ChatMessageList(props: {
const handleTextImagine = React.useCallback(async (text: string) => {
if (!capabilityHasT2I)
return optimaOpenPreferences('draw');
return openPreferencesTab(PreferencesTab.Draw);
if (conversationId) {
setIsImagining(true);
await onTextImagine(conversationId, text);
setIsImagining(false);
}
}, [capabilityHasT2I, conversationId, onTextImagine]);
}, [capabilityHasT2I, conversationId, onTextImagine, openPreferencesTab]);
const handleTextSpeak = React.useCallback(async (text: string) => {
if (!isSpeakable)
return optimaOpenPreferences('voice');
return openPreferencesTab(PreferencesTab.Voice);
setIsSpeaking(true);
await onTextSpeak(text);
setIsSpeaking(false);
}, [isSpeakable, onTextSpeak]);
}, [isSpeakable, onTextSpeak, openPreferencesTab]);
// operate on the local selection set
@@ -242,17 +192,9 @@ export function ChatMessageList(props: {
setSelectedMessages(new Set());
}, [props.conversationHandler, selectedMessages]);
const handleSelectionHide = React.useCallback(() => {
for (let selectedMessage of Array.from(selectedMessages))
props.conversationHandler?.messageSetUserFlag(selectedMessage, MESSAGE_FLAG_AIX_SKIP, true, true);
setSelectedMessages(new Set());
}, [props.conversationHandler, selectedMessages]);
const { isMessageSelectionMode, setIsMessageSelectionMode } = props;
useGlobalShortcuts('ChatMessageList_Selection', React.useMemo(() => !isMessageSelectionMode ? [] : [
{ key: ShortcutKey.Esc, action: () => setIsMessageSelectionMode(false), description: 'Close Cleanup', level: 10 - 1 },
], [isMessageSelectionMode, setIsMessageSelectionMode]));
useGlobalShortcuts([[props.isMessageSelectionMode && ShortcutKeyName.Esc, false, false, false, () => {
props.setIsMessageSelectionMode(false);
}]]);
// text-diff functionality: only diff the last complete message, and they're similar in size
@@ -277,39 +219,33 @@ export function ChatMessageList(props: {
}, [conversationId, notifyBooting]);
// style memo
const listSx: SxProps = React.useMemo(() => ({
p: 0,
...props.sx,
// fix for the double-border on the last message (one by the composer, one to the bottom of the message)
// marginBottom: '-1px',
// layout
display: 'flex',
flexDirection: 'column',
}), [props.sx]);
// no conversation: sine qua non
if (!conversationId)
return <CMLZeroConversation onConversationNew={props.onConversationNew} />;
// no content: show the persona selector
const filteredMessages = excludeSystemMessages(conversationMessages, showSystemMessages);
const filteredMessages = conversationMessages
.filter(m => m.role !== 'system' || showSystemMessages); // hide the System message if the user choses to
if (!filteredMessages.length)
return (
<Box sx={{ ...props.sx }}>
<PersonaSelector conversationId={conversationId} isMobile={props.isMobile} runExample={handleRunExample} />
{conversationId
? <PersonaSelector conversationId={conversationId} runExample={handleRunExample} />
: <InlineError severity='info' error='Select a conversation' sx={{ m: 2 }} />}
</Box>
);
return (
<List role='chat-messages-list' sx={listSx}>
<List role='chat-messages-list' sx={{
p: 0,
...(props.sx || {}),
// fix for the double-border on the last message (one by the composer, one to the bottom of the message)
// marginBottom: '-1px',
// layout
display: 'flex',
flexDirection: 'column',
}}>
{optionalTranslationWarning}
@@ -320,11 +256,10 @@ export function ChatMessageList(props: {
onClose={() => props.setIsMessageSelectionMode(false)}
onSelectAll={handleSelectAll}
onDeleteMessages={handleSelectionDelete}
onHideMessages={handleSelectionHide}
/>
)}
{filteredMessages.map((message, idx) => {
{filteredMessages.map((message, idx, { length: count }) => {
// Optimization: only memo complete components, or we'd be memoizing garbage
const ChatMessageMemoOrNot = !message.pendingIncomplete ? ChatMessageMemo : ChatMessage;
@@ -345,24 +280,21 @@ export function ChatMessageList(props: {
message={message}
// diffPreviousText={message === diffTargetMessage ? diffPrevText : undefined}
fitScreen={props.fitScreen}
hasInReferenceTo={composerHasInReferenceto}
isMobile={props.isMobile}
isBottom={idx === filteredMessages.length - 1}
isBottom={idx === count - 1}
isImagining={isImagining}
isSpeaking={isSpeaking}
showAntPromptCaching={props.chatLLMAntPromptCaching}
showUnsafeHtmlCode={danger_experimentalHtmlWebUi}
onAddInReferenceTo={!composerCanAddInReferenceTo ? undefined : handleAddInReferenceTo}
showUnsafeHtml={danger_experimentalHtmlWebUi}
onMessageAssistantFrom={handleMessageAssistantFrom}
onMessageBeam={handleMessageBeam}
onMessageBranch={handleMessageBranch}
onMessageContinue={handleMessageContinue}
onMessageDelete={handleMessageDelete}
onMessageFragmentAppend={handleMessageAppendFragment}
onMessageFragmentDelete={handleMessageDeleteFragment}
onMessageFragmentReplace={handleMessageReplaceFragment}
onMessageToggleUserFlag={handleMessageToggleUserFlag}
onMessageTruncate={handleMessageTruncate}
onReplyTo={handleReplyTo}
onTextDiagram={handleTextDiagram}
onTextImagine={capabilityHasT2I ? handleTextImagine : undefined}
onTextSpeak={isSpeakable ? handleTextSpeak : undefined}
@@ -372,14 +304,14 @@ export function ChatMessageList(props: {
},
)}
{/* Render ephemerals (sidebar ReAct output widgets) at the bottom */}
{!!ephemerals?.length && !!conversationHandler && (
{!!ephemerals.length && (
<Ephemerals
ephemerals={ephemerals}
conversationHandler={conversationHandler}
conversationId={props.conversationId}
sx={{
mt: 'auto',
overflowY: 'auto',
minHeight: 64,
}}
/>
)}
+78 -159
View File
@@ -1,22 +1,14 @@
import * as React from 'react';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, Grid, IconButton, Sheet, styled, Typography } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
import MaximizeIcon from '@mui/icons-material/Maximize';
import MinimizeIcon from '@mui/icons-material/Minimize';
import VerticalSplitIcon from '@mui/icons-material/VerticalSplit';
import VerticalSplitOutlinedIcon from '@mui/icons-material/VerticalSplitOutlined';
import { ScaledTextBlockRenderer } from '~/modules/blocks/ScaledTextBlockRenderer';
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
import type { DEphemeral } from '~/common/chats/EphemeralsStore';
import { ConversationsManager } from '~/common/chats/ConversationsManager';
import { lineHeightChatTextMd } from '~/common/app.theme';
import type { DEphemeral } from '~/common/chat-overlay/store-perchat-ephemerals_slice';
import { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
import { adjustContentScaling, ContentScaling, lineHeightChatTextMd } from '~/common/app.theme';
import { useUIPreferencesStore } from '~/common/state/store-ui';
// State Pane
const StateLine = styled(Typography)(({ theme }) => ({
textOverflow: 'ellipsis',
@@ -54,7 +46,8 @@ function ObjectRenderer({ name }: { name: string }) {
return <StateLine><b>{name}</b>: <i>object not displayed</i></StateLine>;
}
function StateRenderer(props: { state: object, contentScaling: ContentScaling }) {
function StateRenderer(props: { state: object }) {
if (typeof props.state !== 'object')
return <pre>Developer Warning: state is not an object: {JSON.stringify(props.state, null, 2)}</pre>;
@@ -62,17 +55,10 @@ function StateRenderer(props: { state: object, contentScaling: ContentScaling })
return (
<Box>
<ScaledTextBlockRenderer
text='**Internal State**'
contentScaling={props.contentScaling}
textRenderVariant='markdown'
/>
<Box sx={{
mt: 1,
p: 1,
borderRadius: 'md',
background: 'linear-gradient(180deg, var(--joy-palette-success-softHoverBg), transparent)',
}}>
<Typography fontSize='smaller' sx={{ mb: 1 }}>
## Internal State
</Typography>
<Sheet sx={{ p: 1 }}>
{!entries && <Typography level='body-sm'>No state variables</Typography>}
{entries.map(([key, value]) =>
isPrimitive(value)
@@ -83,164 +69,97 @@ function StateRenderer(props: { state: object, contentScaling: ContentScaling })
? <ObjectRenderer key={'state-' + key} name={key} />
: <Typography key={'state-' + key} level='body-sm'>{key}: {value}</Typography>,
)}
</Box>
</Sheet>
</Box>
);
}
const leftPaneSx = {
// <pre> looks
overflowWrap: 'anywhere',
whiteSpace: 'break-spaces',
// 'undo' some of the github-markdown CSS customizations
'.markdown-body': { mx: '0!important' },
'.markdown-body p': { mb: 0 },
};
function EphemeralItem({ conversationId, ephemeral }: { conversationId: string, ephemeral: DEphemeral }) {
const rightPaneSx = {
borderLeft: { md: `1px dashed` },
borderTop: { xs: `1px dashed`, md: 'none' },
};
function EphemeralItem(props: {
ephemeral: DEphemeral,
conversationHandler: ConversationHandler,
contentScaling: ContentScaling,
}) {
const { ephemeral, conversationHandler } = props;
// Event handlers
const handleDelete = React.useCallback(() => {
conversationHandler.overlayActions.ephemeralsDelete(ephemeral.id);
}, [conversationHandler, ephemeral.id]);
ConversationsManager.getHandler(conversationId).ephemeralsStore.delete(ephemeral.id);
}, [conversationId, ephemeral.id]);
const handleToggleMinimized = React.useCallback(() => {
conversationHandler.overlayActions.ephemeralsToggleMinimized(ephemeral.id);
}, [conversationHandler, ephemeral.id]);
const handleToggleShowState = React.useCallback(() => {
conversationHandler.overlayActions.ephemeralsToggleShowStatePane(ephemeral.id);
}, [conversationHandler, ephemeral.id]);
const showStatePane = ephemeral.showStatePane && !!ephemeral.state;
return (
<Box sx={{
borderTop: '1px solid',
borderTopColor: 'divider',
return <Box
sx={{
p: { xs: 1, md: 2 },
position: 'relative',
// border: (i < ephemerals.length - 1) ? `2px solid ${theme.palette.divider}` : undefined,
display: 'flex',
flexDirection: 'column',
'&:hover > button': { opacity: 1 },
}}>
{/* Top Line - Title and Buttons */}
<Box sx={{
py: 1,
px: { xs: 1, md: 2 },
backgroundColor: 'success.softHoverBg',
display: 'flex',
gap: 1,
alignItems: 'center'
}}>
{/* Title */}
{ephemeral.title && <Typography level='title-sm' sx={{ mb: 1.5 }}>
{ephemeral.title} Development Tools
</Typography>}
<Typography level='title-sm' sx={{ flex: 1, color: 'success.solidBg' }}>
{ephemeral.title} Internal Monologue
{/* Vertical | split */}
<Grid container spacing={2}>
{/* Left pane (console) */}
<Grid xs={12} md={ephemeral.state ? 6 : 12}>
<Typography fontSize='smaller' sx={{ overflowWrap: 'anywhere', whiteSpace: 'break-spaces', lineHeight: lineHeightChatTextMd }}>
{ephemeral.text}
</Typography>
</Grid>
{/* Show State */}
{!ephemeral.minimized && (
<IconButton
size='sm'
variant={ephemeral.showStatePane ? 'solid' : 'outlined'}
onClick={handleToggleShowState}
>
{ephemeral.showStatePane ? <VerticalSplitIcon /> : <VerticalSplitOutlinedIcon />}
</IconButton>
)}
{/* Right pane (state) */}
{!!ephemeral.state && <Grid
xs={12} md={6}
sx={{
borderLeft: { md: `1px dashed` },
borderTop: { xs: `1px dashed`, md: 'none' },
}}>
<StateRenderer state={ephemeral.state} />
</Grid>}
</Grid>
{/* Minimize/Expand Button */}
<IconButton
size='sm'
variant={'outlined'}
onClick={handleToggleMinimized}
>
{ephemeral.minimized ? <MaximizeIcon /> : <MinimizeIcon />}
</IconButton>
{/* Close */}
<IconButton
size='sm'
variant={ephemeral.done ? 'solid' : 'outlined'}
onClick={handleDelete}
>
<CloseRoundedIcon />
</IconButton>
</Box>
{/* Content */}
{!ephemeral.minimized && <Box sx={{
py: 1,
px: { xs: 1, md: 2 },
{/* Close button (right of title) */}
<IconButton
size='sm'
onClick={handleDelete}
sx={{
position: 'absolute', top: 8, right: 8,
opacity: { xs: 1, sm: 0.5 }, transition: 'opacity 0.3s',
}}>
<CloseRoundedIcon />
</IconButton>
{/* Content Grid */}
<Grid container spacing={2} sx={{ mt: 0.5 }}>
{/* Left pane (log) */}
<Grid xs={12} md={showStatePane ? 6 : 12}>
{/* New renderer, with */}
<Box sx={leftPaneSx}>
<ScaledTextBlockRenderer
text={ephemeral.text}
contentScaling={props.contentScaling}
textRenderVariant='markdown'
/>
</Box>
</Grid>
{/* Right pane (state) */}
{showStatePane && (
<Grid xs={12} md={6} sx={rightPaneSx}>
<StateRenderer
state={ephemeral.state}
contentScaling={props.contentScaling}
/>
</Grid>
)}
</Grid>
</Box>}
</Box>
);
</Box>;
}
// const dashedBorderSVG = encodeURIComponent(`
// <svg xmlns='http://www.w3.org/2000/svg' width='100%' height='100%'>
// <rect x='0' y='0' width='100%' height='100%' fill='none' stroke='currentColor' stroke-width='2' stroke-dasharray='16, 2' />
// </svg>
// `);
export function Ephemerals(props: {
ephemerals: DEphemeral[],
conversationHandler: ConversationHandler,
sx?: SxProps
}) {
// external state
const adjContentScaling = useUIPreferencesStore(state => adjustContentScaling(state.contentScaling, -1));
export function Ephemerals(props: { ephemerals: DEphemeral[], conversationId: DConversationId | null, sx?: SxProps }) {
// global state
// const ephemerals = useChatStore(state => {
// const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
// return conversation ? conversation.ephemerals : [];
// }, shallow);
const ephemerals = props.ephemerals;
// if (!ephemerals?.length) return null;
return (
<Sheet variant='soft' color='success' invertedColors sx={props.sx}>
<Sheet
variant='soft' color='success' invertedColors
sx={{
borderTop: '1px solid',
borderTopColor: 'divider',
// backgroundImage: `url("data:image/svg+xml,${dashedBorderSVG.replace('currentColor', '%23A1E8A1')}")`,
// backgroundSize: '100% 100%',
// backgroundRepeat: 'no-repeat',
...(props.sx || {}),
}}>
{props.ephemerals.map((ephemeral, i) => (
<EphemeralItem
key={ephemeral.id}
ephemeral={ephemeral}
conversationHandler={props.conversationHandler}
contentScaling={adjContentScaling}
/>
))}
{ephemerals.map((ephemeral, i) =>
props.conversationId && <EphemeralItem key={`ephemeral-${i}`} conversationId={props.conversationId} ephemeral={ephemeral} />)}
</Sheet>
);
-228
View File
@@ -1,228 +0,0 @@
import * as React from 'react';
import { useShallow } from 'zustand/react/shallow';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, IconButton, styled, Typography } from '@mui/joy';
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
import MinimizeIcon from '@mui/icons-material/Minimize';
// import { isMacUser } from '~/common/util/pwaUtils';
import type { ShortcutObject } from '~/common/components/shortcuts/useGlobalShortcuts';
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
import { GoodTooltip } from '~/common/components/GoodTooltip';
import { useGlobalShortcutsStore } from '~/common/components/shortcuts/store-global-shortcuts';
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
import { useUXLabsStore } from '~/common/state/store-ux-labs';
// configuration
const COMPOSER_ENABLE_MINIMIZE = false;
const hideButtonTooltip = (
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
Hide Shortcuts<br />
Enable again in Settings &gt; Labs
</Box>
);
const hideButtonSx: SxProps = {
'--IconButton-size': '28px',
'--Icon-fontSize': '16px',
'--Icon-color': 'var(--joy-palette-text-tertiary)',
mr: -0.5,
};
// const animateAppear = keyframes`
// from {
// opacity: 0;
// transform: translateY(10px);
// }
// to {
// opacity: 1;
// transform: translateY(0);
// }
// `;
const StatusBarContainer = styled(Box)({
borderBottom: '1px solid',
// borderBottomColor: 'var(--joy-palette-divider)',
borderBottomColor: 'rgba(var(--joy-palette-neutral-mainChannel) / 0.1)',
// borderTopColor: 'rgba(var(--joy-palette-neutral-mainChannel, 99 107 116) / 0.4)',
// backgroundColor: 'var(--joy-palette-background-surface)',
// paddingBlock: '0.25rem',
paddingInline: '0.5rem',
// layout
display: 'flex',
flexFlow: 'row nowrap',
columnGap: '1.5rem', // space between shortcuts
lineHeight: '1em',
// animation: `${animateAppear} 0.3s ease-out`,
// transition: 'all 0.2s ease',
// '&:hover': {
// backgroundColor: 'var(--joy-palette-background-level1)',
// },
});
const ShortcutContainer = styled(Box)({
display: 'flex',
alignItems: 'center',
whiteSpace: 'nowrap',
gap: '2px', // space between modifiers
marginBlock: '0.25rem',
// transition: 'transform 0.2s ease',
// '&:hover': {
// transform: 'scale(1.05)',
// },
'&:hover > div': {
backgroundColor: 'var(--joy-palette-background-level1)',
},
cursor: 'pointer',
[`&[aria-disabled="true"]`]: {
opacity: 0.5,
pointerEvents: 'none',
}
});
const ShortcutKey = styled(Box)({
fontSize: 'var(--joy-fontSize-xs)',
fontWeight: 'var(--joy-fontWeight-md)',
border: '1px solid',
borderColor: 'var(--joy-palette-neutral-outlinedBorder)',
borderRadius: 'var(--joy-radius-xs)',
// backgroundColor: 'var(--joy-palette-neutral-outlinedBorder)',
backgroundColor: 'var(--joy-palette-background-popup)',
// boxShadow: 'inset 2px 0px 4px -2px var(--joy-palette-background-backdrop)',
boxShadow: 'var(--joy-shadow-xs)',
// minWidth: '1rem',
paddingBlock: '1px',
paddingInline: '4px',
// pointerEvents: 'none',
cursor: 'pointer',
transition: 'background-color 1s ease',
});
// Display mac-style shortcuts on windows as well
const displayMacModifiers = true;
function _platformAwareModifier(symbol: 'Ctrl' | 'Alt' | 'Shift') {
switch (symbol) {
case 'Ctrl':
return displayMacModifiers ? '⌃' : 'Ctrl';
case 'Shift':
return displayMacModifiers ? '⇧' : '⇧';
case 'Alt':
return displayMacModifiers ? '⌥' /* Option */ : 'Alt';
}
}
function ShortcutItem(props: { shortcut: ShortcutObject }) {
const handleClicked = React.useCallback(() => {
if (props.shortcut.action !== '_specialPrintShortcuts')
props.shortcut.action();
}, [props.shortcut]);
return (
<ShortcutContainer onClick={!props.shortcut.disabled ? handleClicked : undefined} aria-disabled={props.shortcut.disabled}>
{!!props.shortcut.ctrl && <ShortcutKey>{_platformAwareModifier('Ctrl')}</ShortcutKey>}
{!!props.shortcut.shift && <ShortcutKey>{_platformAwareModifier('Shift')}</ShortcutKey>}
{/*{!!props.shortcut.altForNonMac && <ShortcutKey onClick={handleClicked}>{_platformAwareModifier('Alt')}</ShortcutKey>}*/}
<ShortcutKey>{props.shortcut.key === 'Escape' ? 'Esc' : props.shortcut.key === 'Enter' ? '↵' : props.shortcut.key.toUpperCase()}</ShortcutKey>
&nbsp;<Typography level='body-xs'>{props.shortcut.description}</Typography>
{props.shortcut.endDecoratorIcon && <props.shortcut.endDecoratorIcon sx={{ fontSize: 'md' }} />}
</ShortcutContainer>
);
}
export function StatusBar(props: { toggleMinimized?: () => void, isMinimized?: boolean }) {
// state (modifiers pressed/not)
const { showPromisedOverlay } = useOverlayComponents();
// const [ctrlPressed, setCtrlPressed] = React.useState(false);
// const [shiftPressed, setShiftPressed] = React.useState(false);
// external state
const labsShowShortcutBar = useUXLabsStore(state => state.labsShowShortcutBar);
const shortcuts = useGlobalShortcutsStore(useShallow(state => {
let visibleShortcuts = !labsShowShortcutBar ? [] : state.getAllShortcuts().filter(shortcut => !!shortcut.description);
const maxLevel = Math.max(...visibleShortcuts.map(s => s.level ?? 0));
if (maxLevel > 0)
visibleShortcuts = visibleShortcuts.filter(s => s.level === maxLevel);
visibleShortcuts.sort((a, b) => {
// if they don't have a 'shift', they are sorted first
if (a.shift !== b.shift)
return a.shift ? 1 : -1;
// (Hack) If the description is 'Beam', it goes last
if (a.description === 'Beam Edit')
return 1;
// alphabetical for the rest
return a.key.localeCompare(b.key);
});
return visibleShortcuts;
}));
// handlers
const handleHideShortcuts = React.useCallback((event: React.MouseEvent) => {
if (event.shiftKey) {
console.log('shortcutGroups', useGlobalShortcutsStore.getState().shortcutGroups);
return;
}
showPromisedOverlay('shortcuts-confirm-close', {}, ({ onResolve, onUserReject }) =>
<ConfirmationModal
open onClose={onUserReject} onPositive={() => onResolve(true)}
confirmationText='Remove productivity tips and shortcuts? You can add it back in Settings > Labs.'
positiveActionText='Remove'
/>,
).then(() => useUXLabsStore.getState().setLabsShowShortcutBar(false)).catch(() => null /* ignore closure */);
}, [showPromisedOverlay]);
// React to modifiers
// React.useEffect(() => {
// const handleKeyDown = (e: KeyboardEvent) => {
// if (e.key === 'Control') setCtrlPressed(true);
// if (e.key === 'Shift') setShiftPressed(true);
// };
// const handleKeyUp = (e: KeyboardEvent) => {
// if (e.key === 'Control') setCtrlPressed(false);
// if (e.key === 'Shift') setShiftPressed(false);
// };
// window.addEventListener('keydown', handleKeyDown);
// window.addEventListener('keyup', handleKeyUp);
// return () => {
// window.removeEventListener('keydown', handleKeyDown);
// window.removeEventListener('keyup', handleKeyUp);
// };
// }, []);
if (!labsShowShortcutBar)
return null;
return (
<StatusBarContainer aria-label='Status bar'>
{(!props.toggleMinimized || !COMPOSER_ENABLE_MINIMIZE) && !props.isMinimized ? (
// Close Button
<GoodTooltip variantOutlined arrow placement='top' title={hideButtonTooltip}>
<IconButton size='sm' sx={hideButtonSx} onClick={handleHideShortcuts}>
<CloseRoundedIcon />
</IconButton>
</GoodTooltip>
) : (
// Minimize / Maximize Button - note the Maximize icon would be more correct, but also less discoverable
<IconButton size='sm' sx={hideButtonSx} onClick={props.toggleMinimized}>
{props.isMinimized ? <ExpandLessIcon /> : <MinimizeIcon />}
</IconButton>
)}
{/* Show all shortcuts */}
{shortcuts.map((shortcut, idx) => (
<ShortcutItem key={shortcut.key + idx} shortcut={shortcut} />
))}
</StatusBarContainer>
);
}
@@ -1,71 +1,25 @@
import * as React from 'react';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, Button, ButtonGroup, IconButton, Modal, ModalClose, Option, Select, Sheet, Tooltip, Typography } from '@mui/joy';
import AddRoundedIcon from '@mui/icons-material/AddRounded';
import CameraEnhanceIcon from '@mui/icons-material/CameraEnhance';
import CameraFrontIcon from '@mui/icons-material/CameraFront';
import CameraRearIcon from '@mui/icons-material/CameraRear';
import { Box, Button, IconButton, Modal, ModalClose, Option, Select, Sheet, Typography } from '@mui/joy';
import CameraAltIcon from '@mui/icons-material/CameraAlt';
import DownloadIcon from '@mui/icons-material/Download';
import FlipCameraAndroidOutlinedIcon from '@mui/icons-material/FlipCameraAndroidOutlined';
import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined';
import InfoIcon from '@mui/icons-material/Info';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
import { InlineError } from '~/common/components/InlineError';
import { Is } from '~/common/util/pwaUtils';
import { animationBackgroundCameraFlash } from '~/common/util/animUtils';
import { downloadVideoFrame, renderVideoFrameAsFile } from '~/common/util/videoUtils';
import { downloadVideoFrameAsPNG, renderVideoFrameAsPNGFile } from '~/common/util/videoUtils';
import { useCameraCapture } from '~/common/components/useCameraCapture';
// configuration
const DEBUG_NO_CAMERA_OPTION = false;
const FLASH_DURATION_MS = 600;
const ADD_COOLDOWN_MS = 300;
const captureButtonContainerSx: SxProps = {
display: 'flex',
gap: 1,
justifyContent: 'space-between',
alignItems: 'center',
};
const captureButtonGroupSx: SxProps = {
'--ButtonGroup-separatorColor': 'none !important',
// '--ButtonGroup-separatorSize': '2px',
borderRadius: '3rem',
// boxShadow: 'md',
boxShadow: '0 8px 12px -6px rgb(var(--joy-palette-neutral-darkChannel) / 50%)',
};
const captureButtonSx: SxProps = {
backgroundColor: 'neutral.solidHoverBg',
pl: 3.25,
pr: 4.5,
py: 1.5,
minWidth: { md: 200 },
'&:hover': {
backgroundColor: 'neutral.plainHoverColor',
},
};
const addButtonSx: SxProps = {
pl: 2.5,
pr: 2,
};
export function CameraCaptureModal(props: {
onCloseModal: () => void;
onAttachImage: (file: File) => void;
onCloseModal: () => void,
onAttachImage: (file: File) => void
// onOCR: (ocrText: string) => void }
}) {
// state
const [showInfo, setShowInfo] = React.useState(false);
const [isFlashing, setIsFlashing] = React.useState(false); // For flash effect
const [isAddButtonDisabled, setIsAddButtonDisabled] = React.useState(false); // Cooldown state
// const [ocrProgress/*, setOCRProgress*/] = React.useState<number | null>(null);
// external state
const {
@@ -85,28 +39,29 @@ export function CameraCaptureModal(props: {
onCloseModal();
}, [onCloseModal, resetVideo]);
/*const handleVideoOCRClicked = async () => {
if (!videoRef.current) return;
const renderedFrame = renderVideoFrameToCanvas(videoRef.current);
const handleFlashEffect = React.useCallback((cooldownMs: number) => {
// Flash effect
setIsFlashing(true);
setTimeout(() => {
setIsFlashing(false);
}, FLASH_DURATION_MS); // Flash duration in milliseconds
// Cooldown
if (cooldownMs) {
setIsAddButtonDisabled(true);
setTimeout(() => {
setIsAddButtonDisabled(false);
}, cooldownMs);
}
}, []);
setOCRProgress(0);
const { recognize } = await import('tesseract.js');
const result = await recognize(renderedFrame, undefined, {
logger: m => {
// noinspection SuspiciousTypeOfGuard
if (typeof m.progress === 'number')
setOCRProgress(m.progress);
},
errorHandler: e => console.error(e),
});
setOCRProgress(null);
stopAndClose();
props.onOCR(result.data.text);
};*/
const handleVideoSnapClicked = React.useCallback(async () => {
if (!videoRef.current) return;
try {
// handleFlashEffect(0); // Trigger flash
const file = await renderVideoFrameAsFile(videoRef.current, 'camera', 'image/jpeg', 0.95);
const file = await renderVideoFrameAsPNGFile(videoRef.current, 'camera');
onAttachImage(file);
stopAndClose();
} catch (error) {
@@ -114,250 +69,96 @@ export function CameraCaptureModal(props: {
}
}, [onAttachImage, stopAndClose, videoRef]);
const handleVideoAddClicked = React.useCallback(async () => {
const handleVideoDownloadClicked = React.useCallback(() => {
if (!videoRef.current) return;
try {
handleFlashEffect(ADD_COOLDOWN_MS); // Trigger flash and cooldown
const file = await renderVideoFrameAsFile(videoRef.current, 'camera', 'image/jpeg', 0.95);
onAttachImage(file);
} catch (error) {
console.error('Error capturing video frame:', error);
}
}, [handleFlashEffect, onAttachImage, videoRef]);
const handleVideoDownloadClicked = React.useCallback(async () => {
if (!videoRef.current) return;
await downloadVideoFrame(videoRef.current, 'camera', 'image/jpeg', 0.98);
downloadVideoFrameAsPNG(videoRef.current, 'camera');
}, [videoRef]);
// Reduced set of cameras
const displayCameras = React.useMemo(() => {
// iOS/English: "Front Camera", "Back Camera"
if (Is.OS.iOS) {
let reducedCameras = cameras.filter((device) => ['Front Camera', 'Back Camera'].includes(device.label));
if (reducedCameras.length > 0)
return reducedCameras;
}
return cameras;
}, [cameras]);
const { canSwitchCameras, isFrontCamera, isBackCamera } = React.useMemo(() => {
// determine if the current device is a front or back camera
let isFrontCamera = false;
let isBackCamera = false;
if (cameraIdx !== -1) {
const currentDevice = displayCameras[cameraIdx];
if (currentDevice) {
isFrontCamera = currentDevice.label.includes('Front Camera') || currentDevice.label.toLowerCase().includes('front');
isBackCamera = currentDevice.label.includes('Back Camera') || currentDevice.label.toLowerCase().includes('back');
}
}
// quick out if we only have 1 or 0 cameras
if (displayCameras.length <= 1)
return { canSwitchCameras: false, isFrontCamera, isBackCamera };
// use a reduction to find both the front and back cameras
const foundCameras = displayCameras.reduce((acc, device) => {
if (acc.front && acc.back) return acc;
if (device.label.includes('Front Camera')) acc.front = true;
else if (device.label.toLowerCase().includes('front')) acc.front = true;
if (device.label.includes('Back Camera')) acc.back = true;
else if (device.label.toLowerCase().includes('back')) acc.back = true;
return acc;
}, { front: false, back: false });
return { canSwitchCameras: (foundCameras.front && foundCameras.back) || displayCameras.length === 2, isFrontCamera, isBackCamera };
}, [cameraIdx, displayCameras]);
const handleCameraSwitch = React.useCallback(() => {
// safety checks: has multiple cameras, and current camera is valid
if (displayCameras.length <= 1 || cameraIdx === -1) return;
const currentCamera = displayCameras[cameraIdx] || undefined;
if (!currentCamera) return;
// finds the camera to switch to
let nextIdx: number | undefined = undefined;
// iOS
if (currentCamera.label.includes('Front Camera'))
nextIdx = displayCameras.findIndex((device) => device.label.includes('Back Camera'));
else if (currentCamera.label.includes('Back Camera'))
nextIdx = displayCameras.findIndex((device) => device.label.includes('Front Camera'));
// Android
if (nextIdx === undefined && currentCamera.label.includes('facing front'))
nextIdx = displayCameras.map((device) => device.label).findLastIndex((label) => label.includes('facing back'));
else if (nextIdx === undefined && currentCamera.label.includes('facing back'))
nextIdx = displayCameras.map((device) => device.label).findLastIndex((label) => label.includes('facing front'));
// Generic: if we have 2 cameras, flip to the other one
if (nextIdx === undefined && displayCameras.length === 2)
nextIdx = cameraIdx === 0 ? 1 : 0;
// if we found a valid camera, switch to it
if (nextIdx !== undefined && nextIdx !== -1)
setCameraIdx(nextIdx);
}, [cameraIdx, displayCameras, setCameraIdx]);
return (
<Modal
open
onClose={stopAndClose}
sx={{
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}}
slotProps={{
backdrop: {
sx: {
backdropFilter: 'none', // using none because this is heavy
// backdropFilter: 'blur(4px)',
// backgroundColor: 'rgba(11 13 14 / 0.75)',
backgroundColor: 'rgba(var(--joy-palette-neutral-darkChannel) / 0.5)',
},
},
}}
>
<Modal open onClose={stopAndClose} sx={{ display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Box sx={{
display: 'flex', flexDirection: 'column', m: 1,
borderRadius: 'md', overflow: 'hidden',
boxShadow: 'lg',
boxShadow: 'sm',
}}>
{/* Top bar */}
<Sheet variant='solid' invertedColors={true} sx={{
p: 1,
backgroundColor: 'neutral.800',
display: 'flex',
justifyContent: 'space-between',
}}>
<Box sx={{ display: 'flex', alignItems: 'center', gap: 1 }}>
<Select
size='sm'
variant={displayCameras.length > 1 ? 'soft' : 'plain'}
color='neutral'
value={cameraIdx} onChange={(_event: any, value: number | null) => setCameraIdx(value === null ? -1 : value)}
indicator={<KeyboardArrowDownIcon />}
sx={{ background: 'transparent' }}
slotProps={{ listbox: { size: 'md' } }}
>
{(!displayCameras.length || DEBUG_NO_CAMERA_OPTION) && (
<Option key='video-dev-none' value={-1}>
No Camera
</Option>
)}
{displayCameras.map((device: MediaDeviceInfo, camIndex) => (
<Option key={'video-dev-' + camIndex} value={camIndex}>
{/*{device.label?.includes('Face') ? <CameraFrontIcon />*/}
{/* : device.label?.includes('tual') ? <CameraRearIcon />*/}
{/* : null}*/}
{device.label
?.replace('camera2 ', 'Camera ')
.replace('facing front', 'Front')
.replace('facing back', 'Back')}
</Option>
))}
</Select>
<Sheet variant='solid' invertedColors sx={{ display: 'flex', justifyContent: 'space-between', p: 1 }}>
<Select
variant='solid' color='neutral'
value={cameraIdx} onChange={(_event: any, value: number | null) => setCameraIdx(value === null ? -1 : value)}
indicator={<KeyboardArrowDownIcon />}
>
<Option value={-1}>
No Camera
</Option>
{cameras.map((device: MediaDeviceInfo, camIndex) => (
<Option key={'video-dev-' + camIndex} value={camIndex}>
{device.label}
</Option>
))}
</Select>
{canSwitchCameras && (
<IconButton size='sm' onClick={handleCameraSwitch}>
{isFrontCamera ? <CameraRearIcon /> : isBackCamera ? <CameraFrontIcon /> : <FlipCameraAndroidOutlinedIcon />}
</IconButton>
)}
</Box>
<ModalClose size='lg' onClick={stopAndClose} sx={{ position: 'static' }} />
<ModalClose onClick={stopAndClose} sx={{ position: 'static' }} />
</Sheet>
{/* (main) Video */}
<Box sx={{ position: 'relative', backgroundColor: 'background.level3' }}>
<Box sx={{ position: 'relative' }}>
<video
ref={videoRef} autoPlay playsInline
style={{
display: 'block',
width: !Is.Browser.Safari ? '100%' : undefined,
marginLeft: 'auto', marginRight: 'auto',
maxHeight: 'calc(100vh - 200px)',
display: 'block', width: '100%', maxHeight: 'calc(100vh - 200px)',
background: '#8888', //opacity: ocrProgress !== null ? 0.5 : 1,
}}
/>
{/* Flash overlay */}
{isFlashing && (
<Box
sx={{
position: 'absolute', inset: 0, zIndex: 2,
animation: `${animationBackgroundCameraFlash} ${FLASH_DURATION_MS / 1000}s`,
}}
/>
)}
{showInfo && !!info && (
<Typography
sx={{
position: 'absolute', inset: 0, zIndex: 1, /* camera info on top of video */
background: 'rgba(0,0,0,0.5)', color: 'white',
whiteSpace: 'pre', overflowY: 'scroll',
}}>
{info}
</Typography>
)}
{showInfo && !!info && <Typography
sx={{
position: 'absolute', inset: 0, zIndex: 1, /* camera info on top of video */
background: 'rgba(0,0,0,0.5)', color: 'white',
whiteSpace: 'pre', overflowY: 'scroll',
}}>
{info}
</Typography>}
{/*{ocrProgress !== null && <CircularProgress sx={{ position: 'absolute', top: 'calc(50% - 34px / 2)', left: 'calc(50% - 34px / 2)', zIndex: 2 }} />}*/}
</Box>
{/* Bottom controls (zoom, download) & progress */}
<Sheet
variant='soft'
sx={{
p: 1,
display: 'flex',
flexDirection: 'column',
gap: 1,
}}
>
{/* Bottom controls (zoom, ocr, download) & progress */}
<Sheet variant='soft' sx={{ display: 'flex', flexDirection: 'column', gap: 1, p: 1 }}>
{!!error && <InlineError error={error} />}
{zoomControl}
{/*{ocrProgress !== null && <LinearProgress color='primary' determinate value={100 * ocrProgress} sx={{ px: 2 }} />}*/}
<Box paddingBottom={zoomControl ? 1 : undefined} sx={captureButtonContainerSx}>
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'space-between' }}>
{/* Info */}
<IconButton disabled={!info} onClick={() => setShowInfo((prev) => !prev)}>
<InfoOutlinedIcon />
<IconButton size='lg' disabled={!info} variant='soft' onClick={() => setShowInfo(info => !info)}>
<InfoIcon />
</IconButton>
{/*<Button disabled={ocrProgress !== null} fullWidth variant='solid' size='lg' onClick={handleVideoOCRClicked} sx={{ flex: 1, maxWidth: 260 }}>*/}
{/* Extract Text*/}
{/*</Button>*/}
{/* Capture */}
<ButtonGroup variant='solid' sx={captureButtonGroupSx}>
<Tooltip disableInteractive arrow placement='top' title='Add to message'>
<IconButton size='sm' disabled={isAddButtonDisabled} onClick={handleVideoAddClicked} sx={addButtonSx}>
<AddRoundedIcon />
</IconButton>
</Tooltip>
<Button size='lg' onClick={handleVideoSnapClicked} endDecorator={<CameraEnhanceIcon />} sx={captureButtonSx}>
Capture
</Button>
</ButtonGroup>
<Button
fullWidth
variant='solid' color='neutral'
onClick={handleVideoSnapClicked}
endDecorator={<CameraAltIcon />}
sx={{ flex: 1, maxWidth: 200, py: 2, borderRadius: '3rem' }}
>
Capture
</Button>
{/* Download */}
<IconButton onClick={handleVideoDownloadClicked}>
<IconButton size='lg' variant='soft' onClick={handleVideoDownloadClicked}>
<DownloadIcon />
</IconButton>
</Box>
</Sheet>
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,193 @@
import * as React from 'react';
import { Badge, Box, ColorPaletteProp, Tooltip } from '@mui/joy';
function alignRight(value: number, columnSize: number = 8) {
const str = value.toLocaleString();
return str.padStart(columnSize);
}
function formatCost(cost: number) {
return cost < 1
? (cost * 100).toFixed(cost < 0.010 ? 2 : 1) + ' ¢'
: '$ ' + cost.toFixed(2);
}
export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, historyTokens?: number, responseMaxTokens?: number, tokenPriceIn?: number, tokenPriceOut?: number): {
color: ColorPaletteProp,
message: string,
remainingTokens: number,
costMax?: number,
costMin?: number,
} {
const usedInputTokens = directTokens + (historyTokens || 0);
const usedMaxTokens = usedInputTokens + (responseMaxTokens || 0);
const remainingTokens = tokenLimit - usedMaxTokens;
const gteLimit = (remainingTokens <= 0 && tokenLimit > 0);
// message
let message: string = gteLimit ? '⚠️ ' : '';
// costs
let costMax: number | undefined = undefined;
let costMin: number | undefined = undefined;
// no limit: show used tokens only
if (!tokenLimit) {
message += `Requested: ${usedMaxTokens.toLocaleString()} tokens`;
}
// has full information (d + i < l)
else if (historyTokens || responseMaxTokens) {
message +=
`${Math.abs(remainingTokens).toLocaleString()} ${remainingTokens >= 0 ? 'available' : 'excess'} message tokens\n\n` +
` = Model max tokens: ${alignRight(tokenLimit)}\n` +
` - This message: ${alignRight(directTokens)}\n` +
` - History: ${alignRight(historyTokens || 0)}\n` +
` - Max response: ${alignRight(responseMaxTokens || 0)}`;
// add the price, if available
if (tokenPriceIn || tokenPriceOut) {
costMin = tokenPriceIn ? usedInputTokens * tokenPriceIn / 1E6 : undefined;
const costOutMax = (tokenPriceOut && responseMaxTokens) ? responseMaxTokens * tokenPriceOut / 1E6 : undefined;
if (costMin || costOutMax) {
message += `\n\n\n▶ Chat Turn Cost (max, approximate)\n`;
if (costMin) message += '\n' +
` Input tokens: ${alignRight(usedInputTokens)}\n` +
` Input Price $/M: ${tokenPriceIn!.toFixed(2).padStart(8)}\n` +
` Input cost: ${('$' + costMin!.toFixed(4)).padStart(8)}\n`;
if (costOutMax) message += '\n' +
` Max output tokens: ${alignRight(responseMaxTokens!)}\n` +
` Output Price $/M: ${tokenPriceOut!.toFixed(2).padStart(8)}\n` +
` Max output cost: ${('$' + costOutMax!.toFixed(4)).padStart(8)}\n`;
if (costMin) message += '\n' +
` > Min turn cost: ${formatCost(costMin).padStart(8)}`;
costMax = (costMin && costOutMax) ? costMin + costOutMax : undefined;
if (costMax) message += '\n' +
` < Max turn cost: ${formatCost(costMax).padStart(8)}`;
}
}
}
// Cleaner mode: d + ? < R (total is the remaining in this case)
else {
message +=
`${(tokenLimit + usedMaxTokens).toLocaleString()} available tokens after deleting this\n\n` +
` = Currently free: ${alignRight(tokenLimit)}\n` +
` + This message: ${alignRight(usedMaxTokens)}`;
}
const color: ColorPaletteProp =
(tokenLimit && remainingTokens < 0)
? 'danger'
: remainingTokens < tokenLimit / 4
? 'warning'
: 'primary';
return { color, message, remainingTokens, costMax, costMin };
}
export const TokenTooltip = (props: { message: string | null, color: ColorPaletteProp, placement?: 'top' | 'top-end', children: React.ReactElement }) =>
<Tooltip
placement={props.placement}
variant={props.color !== 'primary' ? 'solid' : 'soft'} color={props.color}
title={props.message ? <Box sx={{ p: 2, whiteSpace: 'pre' }}>{props.message}</Box> : null}
sx={{
fontFamily: 'code',
// fontSize: '0.8125rem',
border: '1px solid',
borderColor: `${props.color}.outlinedColor`,
boxShadow: 'md',
}}
>
{props.children}
</Tooltip>;
/**
* Simple little component to show the token count (and a tooltip on hover)
*/
export const TokenBadgeMemo = React.memo(TokenBadge);
function TokenBadge(props: {
direct: number,
history?: number,
responseMax?: number,
limit: number,
tokenPriceIn?: number,
tokenPriceOut?: number,
enableHover?: boolean,
showCost?: boolean
showExcess?: boolean,
absoluteBottomRight?: boolean,
inline?: boolean,
}) {
// state
const [isHovering, setIsHovering] = React.useState(false);
const { message, color, remainingTokens, costMax, costMin } =
tokensPrettyMath(props.limit, props.direct, props.history, props.responseMax, props.tokenPriceIn, props.tokenPriceOut);
// handlers
const handleHoverEnter = React.useCallback(() => setIsHovering(true), []);
const handleHoverLeave = React.useCallback(() => setIsHovering(false), []);
let badgeValue: string;
const showAltCosts = !!props.showCost && !!costMax && costMin !== undefined;
if (showAltCosts) {
badgeValue = (!props.enableHover || isHovering)
? '< ' + formatCost(costMax)
: '> ' + formatCost(costMin);
} else {
// show the direct tokens, unless we exceed the limit and 'showExcess' is enabled
const value = (props.showExcess && (props.limit && remainingTokens <= 0))
? Math.abs(remainingTokens)
: props.direct;
badgeValue = value.toLocaleString();
}
const shallHide = !props.direct && remainingTokens >= 0 && !showAltCosts;
if (shallHide) return null;
return (
<TokenTooltip color={color} message={message} placement='top-end'>
<Badge
variant='soft' color={color} max={1000000}
// invisible={shallHide}
onMouseEnter={props.enableHover ? handleHoverEnter : undefined}
onMouseLeave={props.enableHover ? handleHoverLeave : undefined}
badgeContent={badgeValue}
slotProps={{
root: {
sx: {
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: 8 }),
cursor: 'help',
},
},
badge: {
sx: {
// the badge (not the tooltip)
// boxShadow: 'sm',
fontFamily: 'code',
fontSize: 'xs',
...((props.absoluteBottomRight || props.inline) && { position: 'static', transform: 'none' }),
},
},
}}
/>
</TokenTooltip>
);
}
@@ -2,9 +2,7 @@ import * as React from 'react';
import { Box, useTheme } from '@mui/joy';
import type { DPricingChatGenerate } from '~/common/stores/llms/llms.pricing';
import { tokenCountsMathAndMessage, TokenTooltip } from './TokenTooltip';
import { tokensPrettyMath, TokenTooltip } from './TokenBadge';
/**
@@ -15,14 +13,14 @@ import { tokenCountsMathAndMessage, TokenTooltip } from './TokenTooltip';
export const TokenProgressbarMemo = React.memo(TokenProgressbar);
function TokenProgressbar(props: {
chatPricing?: DPricingChatGenerate,
direct: number,
history: number,
responseMax: number,
limit: number,
}) {
tokenPriceIn?: number,
tokenPriceOut?: number,
}) {
// external state
const theme = useTheme();
@@ -50,7 +48,7 @@ function TokenProgressbar(props: {
const overflowColor = theme.palette.danger.softColor;
// tooltip message/color
const { message, color } = tokenCountsMathAndMessage(props.limit, props.direct, props.history, props.responseMax, props.chatPricing);
const { message, color } = tokensPrettyMath(props.limit, props.direct, props.history, props.responseMax, props.tokenPriceIn, props.tokenPriceOut);
// sizes
const containerHeight = 8;
@@ -1,179 +0,0 @@
import * as React from 'react';
import { Controller, useFieldArray, useForm } from 'react-hook-form';
import { Box, Button, FormControl, FormHelperText, IconButton, Input, Stack, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import LanguageRoundedIcon from '@mui/icons-material/LanguageRounded';
import YouTubeIcon from '@mui/icons-material/YouTube';
import { extractYoutubeVideoIDFromURL } from '~/modules/youtube/youtube.utils';
import { GoodModal } from '~/common/components/modals/GoodModal';
import { addSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
import { asValidURL } from '~/common/util/urlUtils';
// configuration
const MAX_URLS = 5;
type WebInputData = {
url: string,
// attachImages?: boolean,
}
type WebInputModalInputs = {
links: WebInputData[];
}
function WebInputModal(props: {
onClose: () => void,
onWebLinks: (urls: WebInputData[]) => void,
}) {
// state
const { control: formControl, handleSubmit: formHandleSubmit, formState: { isValid: formIsValid, isDirty: formIsDirty } } = useForm<WebInputModalInputs>({
values: { links: [{ url: '' }] },
// mode: 'onChange', // validate on change
});
const { fields: formFields, append: formFieldsAppend, remove: formFieldsRemove } = useFieldArray({ control: formControl, name: 'links' });
// derived
const urlFieldCount = formFields.length;
// handlers
const { onClose, onWebLinks } = props;
const handleClose = React.useCallback(() => onClose(), [onClose]);
const handleSubmit = React.useCallback(({ links }: WebInputModalInputs) => {
// clean and prefix URLs
const cleanUrls = links.reduce((acc, { url, ...linkRest }) => {
const trimmed = (url || '').trim();
if (trimmed) {
// this form uses a 'relaxed' URL validation, meaning one can write 'big-agi.com' and we'll assume https://
const relaxedUrl = asValidURL(trimmed, true);
if (relaxedUrl)
acc.push({ url: relaxedUrl, ...linkRest });
}
return acc;
}, [] as WebInputData[]);
if (!cleanUrls.length) {
addSnackbar({ key: 'invalid-urls', message: 'Please enter at least one valid web address', type: 'issue', overrides: { autoHideDuration: 2000 } });
return;
}
onWebLinks(cleanUrls);
handleClose();
}, [handleClose, onWebLinks]);
return (
<GoodModal
open
onClose={handleClose}
title='Add Web Content'
titleStartDecorator={<LanguageRoundedIcon />}
closeText={'Cancel'}
// unfilterBackdrop
// themedColor='neutral'
hideBottomClose
>
<Box fontSize='md'>
Enter web page addresses to import their content.
</Box>
<Typography level='body-sm'>
Works on most websites and for YouTube videos (e.g., youtube.com/...) the transcript will be imported.
{/*You can add up to {MAX_URLS} URLs.*/}
</Typography>
<form onSubmit={formHandleSubmit(handleSubmit)}>
<Stack spacing={1}>
{formFields.map((field, index) => (
<Controller
key={field.id}
control={formControl}
name={`links.${index}.url`}
rules={{ required: 'Please enter a valid URL' }}
render={({ field: { value, onChange }, fieldState: { error } }) => (
<FormControl error={!!error}>
<Box sx={{ display: 'flex', gap: 1 }}>
<Input
autoFocus={index === 0}
required={index === 0}
placeholder='https://...'
endDecorator={extractYoutubeVideoIDFromURL(value) ? <YouTubeIcon sx={{ color: 'red' }} /> : undefined}
value={value}
onChange={onChange}
sx={{ flex: 1 }}
/>
{urlFieldCount > 1 && (
<IconButton
size='sm'
variant='plain'
color='neutral'
onClick={() => formFieldsRemove(index)}
>
<DeleteOutlineIcon />
</IconButton>
)}
</Box>
{error && <FormHelperText>{error.message}</FormHelperText>}
</FormControl>
)}
/>
))}
</Stack>
{/* Add a new link */}
<Box sx={{ display: 'flex', justifyContent: 'space-between', gap: 1, mt: 2.5 }}>
{formIsDirty && <Button
color='neutral'
variant='soft'
disabled={urlFieldCount >= MAX_URLS}
onClick={() => formFieldsAppend({ url: '' })}
startDecorator={<AddIcon />}
>
Another
{/*{urlFieldCount >= MAX_URLS ? 'Enough URLs' : urlFieldCount === 1 ? 'Add URL' : urlFieldCount === 2 ? 'Add another' : urlFieldCount === 3 ? 'And another one' : urlFieldCount === 4 ? 'Why stopping' : 'Just one more'}*/}
</Button>}
<Button
variant='solid'
type='submit'
disabled={!formIsValid || !formIsDirty}
sx={{ minWidth: 160, ml: 'auto' }}
>
Add {urlFieldCount > 1 ? `(${urlFieldCount})` : ''}
</Button>
</Box>
</form>
</GoodModal>
);
}
export function useWebInputModal(onAttachWebLinks: (urls: WebInputData[]) => void) {
// state
const [open, setOpen] = React.useState(false);
const openWebInputDialog = React.useCallback(() => setOpen(true), []);
const webInputDialogComponent = React.useMemo(() => open && (
<WebInputModal
onClose={() => setOpen(false)}
onWebLinks={onAttachWebLinks}
/>
), [onAttachWebLinks, open]);
return {
openWebInputDialog,
webInputDialogComponent,
};
}
@@ -2,46 +2,40 @@ import * as React from 'react';
import { Box, ListItem, ListItemButton, ListItemDecorator, Sheet, Typography } from '@mui/joy';
import { CloseablePopup } from '~/common/components/CloseablePopup';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import type { ActileItem } from './ActileProvider';
import type { ActileItem, ActileProvider } from './ActileProvider';
export function ActilePopup(props: {
anchorEl: HTMLElement | null,
onClose: () => void,
itemsByProvider: { provider: ActileProvider, items: ActileItem[] }[],
activeItemIndex: number,
title?: string,
items: ActileItem[],
activeItemIndex: number | undefined,
activePrefixLength: number,
onItemClick: (item: ActileItem) => void,
children?: React.ReactNode
}) {
// We need to keep track of the overall item index to correctly match with activeItemIndex
const itemIndices = React.useMemo(() => {
const indices: { providerKey: string, itemKey: string, isActive: boolean }[] = [];
let indexCounter = 0;
props.itemsByProvider.forEach(({ provider, items }) => {
items.forEach((item) => {
indices.push({
providerKey: provider.key,
itemKey: item.key,
isActive: indexCounter === props.activeItemIndex,
});
indexCounter += 1;
});
});
return indices;
}, [props.itemsByProvider, props.activeItemIndex]);
const hasAnyIcon = props.items.some(item => !!item.Icon);
return (
<CloseablePopup
menu anchorEl={props.anchorEl} onClose={props.onClose}
maxHeightGapPx={320}
minWidth={320}
noBottomPadding
noTopPadding
<CloseableMenu
noTopPadding noBottomPadding
open anchorEl={props.anchorEl} onClose={props.onClose}
sx={{ minWidth: 320 }}
>
{!props.itemsByProvider.length && (
{!!props.title && (
<Sheet variant='soft' sx={{ p: 1, borderBottom: '1px solid', borderBottomColor: 'neutral.softActiveBg' }}>
<Typography level='title-sm'>
{props.title}
</Typography>
</Sheet>
)}
{!props.items.length && (
<ListItem variant='soft' color='warning'>
<Typography level='body-md'>
No matching command
@@ -49,65 +43,46 @@ export function ActilePopup(props: {
</ListItem>
)}
{props.itemsByProvider.map(({ provider, items }) => (
<React.Fragment key={provider.key}>
{props.items.map((item, idx) => {
const isActive = idx === props.activeItemIndex;
const labelBold = item.label.slice(0, props.activePrefixLength);
const labelNormal = item.label.slice(props.activePrefixLength);
return (
<ListItem
key={item.key}
variant={isActive ? 'soft' : undefined}
color={isActive ? 'primary' : undefined}
onClick={() => props.onItemClick(item)}
>
<ListItemButton color='primary'>
{hasAnyIcon && (
<ListItemDecorator>
{item.Icon ? <item.Icon /> : null}
</ListItemDecorator>
)}
<Box>
{/* Provider Label */}
<Sheet variant='soft' sx={{ p: 1, borderBottom: '1px solid', borderBottomColor: 'neutral.softActiveBg' }}>
<Typography level='title-sm'>
{provider.label}
</Typography>
</Sheet>
{/* Items */}
{items.map((item) => {
const index = itemIndices.findIndex(idx => idx.providerKey === provider.key && idx.itemKey === item.key);
const isActive = itemIndices[index]?.isActive;
const labelBold = item.label.slice(0, props.activePrefixLength);
const labelNormal = item.label.slice(props.activePrefixLength);
return (
<ListItem
key={`${provider.key}-${item.key}`}
variant={isActive ? 'soft' : undefined}
color={isActive ? 'primary' : undefined}
onClick={() => props.onItemClick(item)}
>
<ListItemButton color='primary'>
{item.Icon && (
<ListItemDecorator>
<item.Icon />
</ListItemDecorator>
)}
{/* Item*/}
<Box>
{/* Item main text */}
<Box sx={{ display: 'flex', alignItems: 'center', gap: 1 }}>
<Typography level='title-sm' color={isActive ? 'primary' : undefined}>
<span style={{ textDecoration: 'underline' }}><b>{labelBold}</b></span>{labelNormal}
</Typography>
{item.argument && <Typography level='body-sm'>
{item.argument}
</Typography>}
</Box>
{/* Item description */}
{!!item.description && <Typography level='body-xs'>
{item.description}
<Box sx={{ display: 'flex', alignItems: 'center', gap: 1 }}>
<Typography level='title-sm' color={isActive ? 'primary' : undefined}>
<span style={{ textDecoration: 'underline' }}><b>{labelBold}</b></span>{labelNormal}
</Typography>
{item.argument && <Typography level='body-sm'>
{item.argument}
</Typography>}
</Box>
</ListItemButton>
</ListItem>
);
})}
</React.Fragment>
))}
{!!item.description && <Typography level='body-xs'>
{item.description}
</Typography>}
</Box>
</ListItemButton>
</ListItem>
);
},
)}
</CloseablePopup>
{props.children}
</CloseableMenu>
);
}
}
@@ -1,27 +1,15 @@
import type { FunctionComponent } from 'react';
export interface ActileProvider<TItem extends ActileItem = ActileItem> {
// Unique key for the provider
readonly key: 'pcmd' | 'pstrmsg' | 'pattlbl';
// Label for display
get label(): string;
// Interface for the provider
fastCheckTriggerText: (trailingText: string) => boolean;
fetchItems: () => ActileProviderItems<TItem>;
onItemSelect: (item: ActileItem) => void;
}
export type ActileProviderItems<TItem extends ActileItem = ActileItem> = Promise<{ searchPrefix: string, items: TItem[] }>;
export interface ActileItem {
key: string;
providerKey: ActileProvider['key'];
label: string;
argument?: string;
description?: string;
Icon?: FunctionComponent;
}
export interface ActileProvider<TItem extends ActileItem = ActileItem> {
fastCheckTriggerText: (trailingText: string) => boolean;
fetchItems: () => Promise<{ title: string, searchPrefix: string, items: TItem[] }>;
onItemSelect: (item: ActileItem) => void;
}
@@ -1,37 +0,0 @@
import type { ActileItem, ActileProvider, ActileProviderItems } from './ActileProvider';
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
export interface AttachmentLabelItem extends ActileItem {
// nothing to do do here, this is really just a label
}
export const providerAttachmentLabels = (
attachmentsStoreApi: AttachmentDraftsStoreApi | null,
onLabelSelect: (item: ActileItem, searchPrefix: string) => void,
): ActileProvider<AttachmentLabelItem> => ({
key: 'pattlbl',
get label() {
return 'Attachment Labels';
},
// Uses '@' as the trigger
fastCheckTriggerText: (trailingText: string) => trailingText === '@' || trailingText.endsWith(' @'),
fetchItems: async (): ActileProviderItems<AttachmentLabelItem> => ({
searchPrefix: '',
items: attachmentsStoreApi?.getState()?.attachmentDrafts.map(draft => ({
key: draft.id,
providerKey: 'pattlbl',
label: draft.label,
argument: undefined,
description: 'name',
Icon: undefined,
} as AttachmentLabelItem)) ?? [],
}),
onItemSelect: item => onLabelSelect(item as AttachmentLabelItem, '@'),
});
@@ -1,35 +1,26 @@
import { ActileItem, ActileProvider } from './ActileProvider';
import { findAllChatCommands } from '../../../commands/commands.registry';
import type { ActileItem, ActileProvider, ActileProviderItems } from './ActileProvider';
export function providerCommands(onCommandSelect: (item: ActileItem) => void): ActileProvider {
return {
export const providerCommands = (
onCommandSelect: (item: ActileItem, searchPrefix: string) => void,
): ActileProvider => ({
key: 'pcmd',
get label() {
return 'Chat Commands';
},
fastCheckTriggerText: (trailingText: string) => {
// only the literal '/' is a trigger
return trailingText === '/';
},
fastCheckTriggerText: (trailingText: string) => trailingText === '/',
fetchItems: async (): ActileProviderItems => ({
searchPrefix: '/',
items: findAllChatCommands().map((cmd) => ({
key: cmd.primary,
providerKey: 'pcmd',
label: cmd.primary,
argument: cmd.arguments?.join(' ') ?? undefined,
description: cmd.description,
Icon: cmd.Icon,
} satisfies ActileItem)),
}),
// no real need to be async
fetchItems: async () => ({
title: 'Chat Commands',
searchPrefix: '/',
items: findAllChatCommands().map((cmd) => ({
key: cmd.primary,
label: cmd.primary,
argument: cmd.arguments?.join(' ') ?? undefined,
description: cmd.description,
Icon: cmd.Icon,
} satisfies ActileItem)),
}),
onItemSelect: (item) => onCommandSelect(item as ActileItem, '/'),
});
onItemSelect: onCommandSelect,
};
}
@@ -1,8 +1,8 @@
import { conversationTitle, DConversationId } from '~/common/stores/chat/chat.conversation';
import { MESSAGE_FLAG_STARRED, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
import { messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
import { useChatStore } from '~/common/stores/chat/store-chats';
import type { ActileItem, ActileProvider, ActileProviderItems } from './ActileProvider';
import { ActileItem, ActileProvider } from './ActileProvider';
export interface StarredMessageItem extends ActileItem {
@@ -10,44 +10,39 @@ export interface StarredMessageItem extends ActileItem {
messageId: string,
}
export const providerStarredMessages = (onMessageSelect: (item: StarredMessageItem) => void): ActileProvider<StarredMessageItem> => ({
export function providerStarredMessage(onMessageSeelect: (item: StarredMessageItem) => void): ActileProvider<StarredMessageItem> {
return {
key: 'pstrmsg',
// only the literal '@' at start of chat, or ' @' at end of chat
fastCheckTriggerText: (trailingText: string) => trailingText === '@' || trailingText.endsWith(' @'),
get label() {
return 'Starred Messages';
},
// finds all the starred messages in all the conversations - this could be heavy
fetchItems: async () => {
const { conversations } = useChatStore.getState();
// only the literal '@' at start of chat, or ' @' at end of chat
fastCheckTriggerText: (trailingText: string) => trailingText === '@' || trailingText.endsWith(' @'),
// finds all the starred messages in all the conversations - this could be heavy
fetchItems: async (): ActileProviderItems<StarredMessageItem> => {
const { conversations } = useChatStore.getState();
const starredMessages: StarredMessageItem[] = [];
conversations.forEach((conversation) => {
conversation.messages.forEach((message) => {
messageHasUserFlag(message, MESSAGE_FLAG_STARRED) && starredMessages.push({
key: message.id,
providerKey: 'pstrmsg',
// data
conversationId: conversation.id,
messageId: message.id,
// looks
label: conversationTitle(conversation) + ' - ' + messageFragmentsReduceText(message.fragments).slice(0, 32) + '...',
// description: message.text.slice(32, 100),
Icon: undefined,
} satisfies StarredMessageItem);
const starredMessages: StarredMessageItem[] = [];
conversations.forEach((conversation) => {
conversation.messages.forEach((message) => {
messageHasUserFlag(message, 'starred') && starredMessages.push({
// data
conversationId: conversation.id,
messageId: message.id,
// looks
key: message.id,
label: conversationTitle(conversation) + ' - ' + messageFragmentsReduceText(message.fragments).slice(0, 32) + '...',
// description: message.text.slice(32, 100),
Icon: undefined,
} satisfies StarredMessageItem);
});
});
});
return {
searchPrefix: '',
items: starredMessages,
};
},
return {
title: 'Starred Messages',
searchPrefix: '',
items: starredMessages,
};
},
onItemSelect: item => onMessageSelect(item as StarredMessageItem),
});
onItemSelect: item => onMessageSeelect(item as StarredMessageItem),
};
}
@@ -1,6 +1,5 @@
import * as React from 'react';
import type { ActileItem, ActileProvider } from './ActileProvider';
import { ActileItem, ActileProvider } from './ActileProvider';
import { ActilePopup } from './ActilePopup';
@@ -8,74 +7,71 @@ export const useActileManager = (providers: ActileProvider[], anchorRef: React.R
// state
const [popupOpen, setPopupOpen] = React.useState(false);
const [itemsByProvider, setItemsByProvider] = React.useState<{ provider: ActileProvider, items: ActileItem[] }[]>([]);
const [provider, setProvider] = React.useState<ActileProvider | null>(null);
const [title, setTitle] = React.useState<string>('');
const [items, setItems] = React.useState<ActileItem[]>([]);
const [activeSearchString, setActiveSearchString] = React.useState<string>('');
const [activeItemIndex, setActiveItemIndex] = React.useState<number>(0);
// derived state
const activeItemsByProvider = React.useMemo(() => {
const search = activeSearchString.trim().toLowerCase();
return itemsByProvider.map(({ provider, items }) => ({
provider,
items: items.filter(item => item.label?.toLowerCase().startsWith(search)),
})).filter(({ items }) => items.length > 0);
}, [itemsByProvider, activeSearchString]);
const flatActiveItems = React.useMemo(() => {
return activeItemsByProvider.flatMap(({ items }) => items);
}, [activeItemsByProvider]);
const totalItems = flatActiveItems.length;
const activeItem = totalItems > 0 && activeItemIndex >= 0 && activeItemIndex < totalItems ? flatActiveItems[activeItemIndex] : null;
// derived state
const activeItems = React.useMemo(() => {
const search = activeSearchString.trim().toLowerCase();
return items.filter(item => item.label?.toLowerCase().startsWith(search));
}, [items, activeSearchString]);
const activeItem = activeItemIndex >= 0 && activeItemIndex < activeItems.length ? activeItems[activeItemIndex] : null;
const handleClose = React.useCallback(() => {
setPopupOpen(false);
setItemsByProvider([]);
setProvider(null);
setTitle('');
setItems([]);
setActiveSearchString('');
setActiveItemIndex(0);
}, []);
const handlePopupItemClicked = React.useCallback((item: ActileItem) => {
const provider = providers.find(p => p.key === item.providerKey);
provider?.onItemSelect(item);
handleClose();
}, [providers, handleClose]);
}, [handleClose, provider]);
const handleEnterKey = React.useCallback(() => {
if (activeItem)
handlePopupItemClicked(activeItem);
activeItem && handlePopupItemClicked(activeItem);
}, [activeItem, handlePopupItemClicked]);
const actileInterceptTextChange = React.useCallback((trailingText: string) => {
// Collect all providers whose trigger matches
const matchingProviders = providers.filter(provider => provider.fastCheckTriggerText(trailingText));
if (matchingProviders.length > 0) {
// Fetch items from all matching providers
Promise.all(matchingProviders.map(provider =>
provider.fetchItems().then(({ searchPrefix, items }) => ({
provider,
searchPrefix,
items: items.map(item => ({ ...item, providerKey: provider.key })),
})),
)).then((results) => {
// Filter out empty results
results = results.filter(result => result.items.length > 0);
if (results.length) {
setPopupOpen(true);
setItemsByProvider(results.map(result => ({ provider: result.provider, items: result.items })));
setActiveSearchString(results[0].searchPrefix); // Assuming all search prefixes are the same
setActiveItemIndex(0);
}
}).catch(error => {
handleClose();
console.error('Failed to fetch popup items:', error);
});
return true;
const actileInterceptTextChange = React.useCallback((trailingText: string) => {
for (const provider of providers) {
if (provider.fastCheckTriggerText(trailingText)) {
provider
.fetchItems()
.then(({ title, searchPrefix, items }) => {
// if there are no items, ignore
if (items.length) {
setPopupOpen(true);
setProvider(provider);
setTitle(title);
setItems(items);
setActiveSearchString(searchPrefix);
}
})
.catch(error => {
handleClose();
console.error('Failed to fetch popup items:', error);
});
return true;
}
}
return false;
}, [handleClose, providers]);
const actileInterceptKeydown = React.useCallback((_event: React.KeyboardEvent<HTMLTextAreaElement>): boolean => {
// Popup open: Intercept
const { key, currentTarget, ctrlKey, metaKey } = _event;
if (popupOpen) {
@@ -84,11 +80,11 @@ export const useActileManager = (providers: ActileProvider[], anchorRef: React.R
handleClose();
} else if (key === 'ArrowUp') {
_event.preventDefault();
setActiveItemIndex((prevIndex) => (prevIndex > 0 ? prevIndex - 1 : totalItems - 1));
setActiveItemIndex((prevIndex) => (prevIndex > 0 ? prevIndex - 1 : activeItems.length - 1));
} else if (key === 'ArrowDown') {
_event.preventDefault();
setActiveItemIndex((prevIndex) => (prevIndex < totalItems - 1 ? prevIndex + 1 : 0));
} else if (key === 'Enter' || key === 'ArrowRight' || key === 'Tab' || (key === ' ' && totalItems === 1)) {
setActiveItemIndex((prevIndex) => (prevIndex < activeItems.length - 1 ? prevIndex + 1 : 0));
} else if (key === 'Enter' || key === 'ArrowRight' || key === 'Tab' || (key === ' ' && activeItems.length === 1)) {
_event.preventDefault();
handleEnterKey();
} else if (key === 'Backspace') {
@@ -104,24 +100,26 @@ export const useActileManager = (providers: ActileProvider[], anchorRef: React.R
const trailingText = (currentTarget.value || '') + key;
return actileInterceptTextChange(trailingText);
}, [actileInterceptTextChange, handleClose, handleEnterKey, popupOpen, totalItems]);
}, [actileInterceptTextChange, activeItems.length, handleClose, handleEnterKey, popupOpen]);
const actileComponent = React.useMemo(() => {
return !popupOpen ? null : (
<ActilePopup
anchorEl={anchorRef.current}
onClose={handleClose}
itemsByProvider={activeItemsByProvider}
title={title}
items={activeItems}
activeItemIndex={activeItemIndex}
activePrefixLength={activeSearchString.length}
onItemClick={handlePopupItemClicked}
/>
);
}, [activeItemIndex, activeItemsByProvider, activeSearchString.length, anchorRef, handleClose, handlePopupItemClicked, popupOpen]);
}, [activeItemIndex, activeItems, activeSearchString.length, anchorRef, handleClose, handlePopupItemClicked, popupOpen, title]);
return {
actileComponent,
actileInterceptKeydown,
actileInterceptTextChange,
};
};
};
@@ -2,49 +2,35 @@ import * as React from 'react';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
import CameraAltOutlinedIcon from '@mui/icons-material/CameraAltOutlined';
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
import { CameraCaptureModal } from '../CameraCaptureModal';
const attachCameraLegend = (isMobile: boolean) =>
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
<b>Attach photo</b><br />
{isMobile ? 'Auto-OCR to read text' : 'See the world, on the go'}
</Box>;
export const ButtonAttachCameraMemo = React.memo(ButtonAttachCamera);
function ButtonAttachCamera(props: {
isMobile?: boolean,
disabled?: boolean,
fullWidth?: boolean,
noToolTip?: boolean,
onOpenCamera: () => void,
}) {
function ButtonAttachCamera(props: { isMobile?: boolean, onOpenCamera: () => void }) {
return props.isMobile ? (
<IconButton disabled={props.disabled} onClick={props.onOpenCamera}>
<IconButton onClick={props.onOpenCamera}>
<AddAPhotoIcon />
</IconButton>
) : (
<Tooltip arrow disableInteractive placement='top-start' title={props.noToolTip ? null : (
<Box sx={buttonAttachSx.tooltip}>
<b>Attach photo</b><br />
{!!props.isMobile ? 'Auto-OCR to read text' : 'See the world, on the go'}
</Box>
)}>
<Button
variant='plain'
color='neutral'
disabled={props.disabled}
fullWidth={props.fullWidth}
startDecorator={<CameraAltOutlinedIcon />}
onClick={props.onOpenCamera}
sx={buttonAttachSx.desktop}
>
<Tooltip disableInteractive variant='solid' placement='top-start' title={attachCameraLegend(!!props.isMobile)}>
<Button fullWidth variant='plain' color='neutral' onClick={props.onOpenCamera} startDecorator={<AddAPhotoIcon />}
sx={{ justifyContent: 'flex-start' }}>
Camera
</Button>
</Tooltip>
);
}
export function useCameraCaptureModalDialog(onAttachImageStable: (file: File) => void) {
export function useCameraCaptureModal(onAttachImage: (file: File) => void) {
// state
const [open, setOpen] = React.useState(false);
@@ -54,9 +40,9 @@ export function useCameraCaptureModalDialog(onAttachImageStable: (file: File) =>
const cameraCaptureComponent = React.useMemo(() => open && (
<CameraCaptureModal
onCloseModal={() => setOpen(false)}
onAttachImage={onAttachImageStable}
onAttachImage={onAttachImage}
/>
), [open, onAttachImageStable]);
), [open, onAttachImage]);
return {
openCamera,
@@ -4,41 +4,29 @@ import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import ContentPasteGoIcon from '@mui/icons-material/ContentPasteGo';
import { KeyStroke } from '~/common/components/KeyStroke';
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
const pasteClipboardLegend =
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
<b>Attach clipboard 📚</b><br />
Auto-converts to the best types<br />
<KeyStroke combo='Ctrl + Shift + V' sx={{ mt: 1, mb: 0.5 }} />
</Box>;
export const ButtonAttachClipboardMemo = React.memo(ButtonAttachClipboard);
function ButtonAttachClipboard(props: {
isMobile?: boolean,
disabled?: boolean,
fullWidth?: boolean,
noToolTip?: boolean,
onAttachClipboard: () => void,
}) {
function ButtonAttachClipboard(props: { isMobile?: boolean, onClick: () => void }) {
return props.isMobile ? (
<IconButton disabled={props.disabled} onClick={props.onAttachClipboard}>
<IconButton onClick={props.onClick}>
<ContentPasteGoIcon />
</IconButton>
) : (
<Tooltip arrow disableInteractive placement='top-start' title={props.noToolTip ? null : (
<Box sx={buttonAttachSx.tooltip}>
<b>Attach clipboard 📚</b><br />
Auto-converts to the best types<br />
<KeyStroke combo='Ctrl + Shift + V' sx={{ mt: 1, mb: 0.5 }} />
</Box>
)}>
<Button
variant='plain'
color='neutral'
disabled={props.disabled}
fullWidth={props.fullWidth}
startDecorator={<ContentPasteGoIcon />}
onClick={props.onAttachClipboard}
sx={buttonAttachSx.desktop}
>
<Tooltip disableInteractive variant='solid' placement='top-start' title={pasteClipboardLegend}>
<Button fullWidth variant='plain' color='neutral' startDecorator={<ContentPasteGoIcon />} onClick={props.onClick}
sx={{ justifyContent: 'flex-start' }}>
Paste
</Button>
</Tooltip>
);
}
}
@@ -0,0 +1,29 @@
import * as React from 'react';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import AttachFileOutlinedIcon from '@mui/icons-material/AttachFileOutlined';
const attachFileLegend =
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
<b>Attach files</b><br />
Drag & drop in chat for faster loads
</Box>;
export const ButtonAttachFileMemo = React.memo(ButtonAttachFile);
function ButtonAttachFile(props: { isMobile?: boolean, onAttachFilePicker: () => void }) {
return props.isMobile ? (
<IconButton onClick={props.onAttachFilePicker}>
<AttachFileOutlinedIcon />
</IconButton>
) : (
<Tooltip disableInteractive variant='solid' placement='top-start' title={attachFileLegend}>
<Button fullWidth variant='plain' color='neutral' onClick={props.onAttachFilePicker} startDecorator={<AttachFileOutlinedIcon />}
sx={{ justifyContent: 'flex-start' }}>
File
</Button>
</Tooltip>
);
}
@@ -1,44 +0,0 @@
import * as React from 'react';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import AddRoundedIcon from '@mui/icons-material/AddRounded';
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
export const ButtonAttachNewMemo = React.memo(ButtonAttachNew);
function ButtonAttachNew(props: {
isMobile?: boolean,
disabled?: boolean,
fullWidth?: boolean,
noToolTip?: boolean,
onAttachNew: () => void,
}) {
return props.isMobile ? (
<IconButton disabled={props.disabled} onClick={props.onAttachNew}>
<AddRoundedIcon />
</IconButton>
) : (
<Tooltip arrow disableInteractive placement='top-start' title={props.noToolTip ? null : (
<Box sx={buttonAttachSx.tooltip}>
<b>Create new document</b><br />
Edit your own empty document
{/*<br />*/}
{/*<KeyStroke combo='Ctrl + Alt + N' sx={{ mt: 1, mb: 0.5 }} />*/}
</Box>
)}>
<Button
variant='plain'
color='neutral'
disabled={props.disabled}
fullWidth={props.fullWidth}
startDecorator={<AddRoundedIcon />}
onClick={props.onAttachNew}
sx={buttonAttachSx.desktop}
>
New
</Button>
</Tooltip>
);
}
@@ -3,20 +3,12 @@ import * as React from 'react';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import ScreenshotMonitorIcon from '@mui/icons-material/ScreenshotMonitor';
import { Is } from '~/common/util/pwaUtils';
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
import { takeScreenCapture } from '~/common/util/screenCaptureUtils';
export const ButtonAttachScreenCaptureMemo = React.memo(ButtonAttachScreenCapture);
function ButtonAttachScreenCapture(props: {
isMobile?: boolean,
disabled?: boolean,
fullWidth?: boolean,
noToolTip?: boolean,
onAttachScreenCapture: (file: File) => void
}) {
function ButtonAttachScreenCapture(props: { isMobile?: boolean, onAttachScreenCapture: (file: File) => void }) {
// state
const [capturing, setCapturing] = React.useState(false);
@@ -34,36 +26,34 @@ function ButtonAttachScreenCapture(props: {
file && onAttachScreenCapture(file);
} catch (error: any) {
const message = error instanceof Error ? error.message : String(error);
setError(`Issue: ${message}`);
setError(`Screen capture issue: ${message}`);
}
setCapturing(false);
}, [onAttachScreenCapture]);
return props.isMobile ? (
<IconButton disabled={props.disabled} onClick={handleTakeScreenCapture}>
<IconButton onClick={handleTakeScreenCapture}>
<ScreenshotMonitorIcon />
</IconButton>
) : (
<Tooltip arrow disableInteractive placement='top-start' title={props.noToolTip ? null : (
<Box sx={buttonAttachSx.tooltip}>
<b>Attach screen capture</b><br />
{error || 'Attach the image of a window, a browser tab, or a screen'}
{!error && Is.OS.MacOS && Is.Browser.Safari && (
<Box sx={{ mt: 1 }}><b>Safari</b>: canceling the window selection may cause a 60-second delay.</Box>
)}
</Box>
)}>
<Tooltip
arrow disableInteractive variant='solid' placement='top-start'
title={
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
<b>Attach screen capture</b><br />
{error || 'Attach the image of a window, a browser tab, or a screen'}
</Box>
}
>
<Button
fullWidth
variant={capturing ? 'solid' : 'plain'}
color={!!error ? 'danger' : 'neutral'}
disabled={props.disabled}
fullWidth={props.fullWidth}
loading={capturing}
loadingPosition={capturing ? 'start' : 'center'}
startDecorator={<ScreenshotMonitorIcon />}
onClick={handleTakeScreenCapture}
sx={buttonAttachSx.desktop}
loading={capturing}
startDecorator={<ScreenshotMonitorIcon />}
sx={{ justifyContent: 'flex-start' }}
>
Screen
</Button>
@@ -1,47 +0,0 @@
import * as React from 'react';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import LanguageRoundedIcon from '@mui/icons-material/LanguageRounded';
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
export const ButtonAttachWebMemo = React.memo(ButtonAttachWeb);
function ButtonAttachWeb(props: {
isMobile?: boolean,
disabled?: boolean,
fullWidth?: boolean,
noToolTip?: boolean,
onOpenWebInput: () => void,
}) {
const button = props.isMobile ? (
<IconButton disabled={props.disabled} onClick={props.onOpenWebInput}>
<LanguageRoundedIcon />
</IconButton>
) : (
<Button
variant='plain'
color='neutral'
disabled={props.disabled}
fullWidth={props.fullWidth}
startDecorator={<LanguageRoundedIcon />}
onClick={props.onOpenWebInput}
sx={buttonAttachSx.desktop}
>
Web
</Button>
);
return (props.noToolTip || props.isMobile) ? button : (
<Tooltip arrow disableInteractive placement='top-start' title={(
<Box sx={buttonAttachSx.tooltip}>
<b>Add Web Content 🌐</b><br />
Import from websites and YouTube
</Box>
)}>
{button}
</Tooltip>
);
}
@@ -1,6 +1,6 @@
import * as React from 'react';
import type { ColorPaletteProp, SxProps } from '@mui/joy/styles/types';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
@@ -35,20 +35,14 @@ const desktopSx: SxProps = {
export const ButtonBeamMemo = React.memo(ButtonBeam);
function ButtonBeam(props: {
isMobile?: boolean,
color?: ColorPaletteProp,
disabled?: boolean,
hasContent?: boolean,
onClick: () => void,
}) {
function ButtonBeam(props: { isMobile?: boolean, disabled?: boolean, hasContent?: boolean, onClick: () => void }) {
return props.isMobile ? (
<IconButton variant='soft' color={props.color ?? 'primary'} disabled={props.disabled} onClick={props.onClick} sx={mobileSx}>
<IconButton variant='soft' color='primary' disabled={props.disabled} onClick={props.onClick} sx={mobileSx}>
<ChatBeamIcon />
</IconButton>
) : (
<Tooltip disableInteractive variant='solid' arrow placement='right' title={props.hasContent ? desktopLegend : desktopLegendNoContent}>
<Button variant='soft' color={props.color ?? 'primary'} disabled={props.disabled} onClick={props.onClick} endDecorator={<ChatBeamIcon />} sx={desktopSx}>
<Button variant='soft' color='primary' disabled={props.disabled} onClick={props.onClick} endDecorator={<ChatBeamIcon />} sx={desktopSx}>
Beam
</Button>
</Tooltip>
@@ -1,52 +1,26 @@
import * as React from 'react';
import { Alert, Box, IconButton } from '@mui/joy';
import { Box, IconButton } from '@mui/joy';
import { ColorPaletteProp, VariantProp } from '@mui/joy/styles/types';
import MicIcon from '@mui/icons-material/Mic';
import { ExternalDocsLink } from '~/common/components/ExternalDocsLink';
import { GoodTooltip } from '~/common/components/GoodTooltip';
import { KeyStroke } from '~/common/components/KeyStroke';
const micLegend = (errorMessage: string | null) =>
const micLegend =
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
Voice input<br />
<KeyStroke combo='Ctrl + M' sx={{ mt: 1, mb: 0.5 }} />
{errorMessage && (
<Alert variant='soft' color='danger' sx={{ mt: 2, mb: 0.5, flexDirection: 'column', alignItems: 'flex-start' }}>
{errorMessage}
<ExternalDocsLink color='danger' level='body-sm' docPage='help-feature-microphone'>
How to fix...
</ExternalDocsLink>
</Alert>
)}
</Box>;
export const ButtonMicMemo = React.memo(ButtonMic);
function ButtonMic(props: {
variant: VariantProp,
color: ColorPaletteProp,
errorMessage: string | null,
noBackground?: boolean,
onClick: () => void,
}) {
// Mobile: don't blur the textarea when clicking the mic button
const handleDontBlurTextArea = React.useCallback((event: React.MouseEvent) => {
const isTextAreaFocused = document.activeElement?.tagName === 'TEXTAREA';
// If a textarea is focused, prevent the default blur behavior
if (isTextAreaFocused)
event.preventDefault();
}, []);
return (
<GoodTooltip placement='top' arrow enableInteractive title={micLegend(props.errorMessage)}>
<IconButton variant={props.variant} color={props.color} onMouseDown={handleDontBlurTextArea} onClick={props.onClick} sx={props.noBackground ? { background: 'none' } : {}}>
<MicIcon />
</IconButton>
</GoodTooltip>
);
function ButtonMic(props: { variant: VariantProp, color: ColorPaletteProp, noBackground?: boolean, onClick: () => void }) {
return <GoodTooltip placement='top' title={micLegend}>
<IconButton variant={props.variant} color={props.color} onClick={props.onClick} sx={props.noBackground ? { background: 'none' } : {}}>
<MicIcon />
</IconButton>
</GoodTooltip>;
}
@@ -2,8 +2,8 @@ import * as React from 'react';
import { Box, IconButton, Tooltip } from '@mui/joy';
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
import RepeatIcon from '@mui/icons-material/Repeat';
import RepeatOnIcon from '@mui/icons-material/RepeatOn';
import AutoModeIcon from '@mui/icons-material/AutoMode';
const micContinuationLegend =
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
@@ -13,10 +13,10 @@ const micContinuationLegend =
export const ButtonMicContinuationMemo = React.memo(ButtonMicContinuation);
function ButtonMicContinuation(props: { isActive: boolean, variant: VariantProp, color: ColorPaletteProp, onClick: () => void, sx?: SxProps }) {
function ButtonMicContinuation(props: { variant: VariantProp, color: ColorPaletteProp, onClick: () => void, sx?: SxProps }) {
return <Tooltip placement='bottom' title={micContinuationLegend}>
<IconButton variant={props.variant} color={props.color} onClick={props.onClick} sx={props.sx}>
{props.isActive ? <RepeatOnIcon /> : <RepeatIcon />}
<AutoModeIcon />
</IconButton>
</Tooltip>;
}
@@ -1,292 +0,0 @@
import * as React from 'react';
import TimeAgo from 'react-timeago';
import { Box, Button, CircularProgress, ColorPaletteProp, Sheet, Typography, VariantProp } from '@mui/joy';
import AbcIcon from '@mui/icons-material/Abc';
import CodeIcon from '@mui/icons-material/Code';
import DescriptionOutlinedIcon from '@mui/icons-material/DescriptionOutlined';
import HtmlIcon from '@mui/icons-material/Html';
import ImageOutlinedIcon from '@mui/icons-material/ImageOutlined';
import PermMediaOutlinedIcon from '@mui/icons-material/PermMediaOutlined';
import PhotoSizeSelectLargeOutlinedIcon from '@mui/icons-material/PhotoSizeSelectLargeOutlined';
import PhotoSizeSelectSmallOutlinedIcon from '@mui/icons-material/PhotoSizeSelectSmallOutlined';
import PictureAsPdfIcon from '@mui/icons-material/PictureAsPdf';
import PivotTableChartIcon from '@mui/icons-material/PivotTableChart';
import TelegramIcon from '@mui/icons-material/Telegram';
import TextFieldsIcon from '@mui/icons-material/TextFields';
import TextureIcon from '@mui/icons-material/Texture';
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
import YouTubeIcon from '@mui/icons-material/YouTube';
import { RenderImageRefDBlob } from '~/modules/blocks/image/RenderImageRefDBlob';
import { RenderImageURL } from '~/modules/blocks/image/RenderImageURL';
import type { AttachmentDraft, AttachmentDraftConverterType, AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
import { DMessageDataRef, DMessageImageRefPart, isImageRefPart } from '~/common/stores/chat/chat.fragments';
import { LiveFileIcon } from '~/common/livefile/liveFile.icons';
import { TooltipOutlined } from '~/common/components/TooltipOutlined';
import { ellipsizeFront, ellipsizeMiddle } from '~/common/util/textUtils';
import type { LLMAttachmentDraft } from './useLLMAttachmentDrafts';
const ATTACHMENT_MIN_STYLE = {
height: '100%',
minHeight: '40px',
// commented, this is messing with the style
// minWidth: '64px',
};
const attachmentConverterSx = { width: 18, height: 18 } as const;
const attachmentIconSx = { width: 30, maxHeight: 30, overflow: 'hidden' } as const;
const webPreviewImageSx = {
...attachmentIconSx,
// transform: 'perspective(100px) rotateX(60deg)',
// transform: 'perspective(100px) rotateY(-20deg) rotateX(30deg)',
// transformStyle: 'preserve-3d',
} as const;
const ellipsizeLabel = (label?: string) => {
if (!label)
return '';
return ellipsizeMiddle((label || '')
.replace(/https?:\/\/(?:www\.)?/, ''), 30)
.replace(/\/$/, '')
.replace('…', '…\n…');
};
/**
* Displayed while a source is loading
*/
function InputLoadingPlaceholder(props: { label: string }) {
return <Sheet
color='success' variant='soft'
sx={{
border: '1px solid',
borderColor: 'success.solidBg',
borderRadius: 'sm',
display: 'flex', alignItems: 'center', justifyContent: 'center', gap: 1,
...ATTACHMENT_MIN_STYLE,
boxSizing: 'border-box',
px: 1, py: 0.5, // reduce
}}
>
<CircularProgress color='success' size='sm' />
<Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
{ellipsizeLabel(props.label)}
</Typography>
</Sheet>;
}
/**
* Displayed when there is an error loading the input (e.g. file does not exist)
*/
function InputErrorIndicator() {
return <WarningRoundedIcon sx={{ color: 'danger.solidBg' }} />;
}
const converterTypeToIconMap: { [key in AttachmentDraftConverterType]: React.ComponentType<any> | null } = {
'text': TextFieldsIcon,
'rich-text': CodeIcon,
'rich-text-cleaner': CodeIcon,
'rich-text-table': PivotTableChartIcon,
'image-original': ImageOutlinedIcon,
'image-resized-high': PhotoSizeSelectLargeOutlinedIcon,
'image-resized-low': PhotoSizeSelectSmallOutlinedIcon,
'image-to-default': ImageOutlinedIcon,
'image-ocr': AbcIcon,
'pdf-text': PictureAsPdfIcon,
'pdf-images': PermMediaOutlinedIcon,
'pdf-text-and-images': PermMediaOutlinedIcon,
'docx-to-html': DescriptionOutlinedIcon,
'url-page-text': TextFieldsIcon, // was LanguageIcon
'url-page-markdown': CodeIcon, // was LanguageIcon
'url-page-html': HtmlIcon, // was LanguageIcon
'url-page-null': TextureIcon,
'url-page-image': ImageOutlinedIcon,
'youtube-transcript': YouTubeIcon,
'youtube-transcript-simple': YouTubeIcon,
'ego-fragments-inlined': TelegramIcon,
'unhandled': TextureIcon,
};
function attachmentIcons(attachmentDraft: AttachmentDraft, noTooltips: boolean, onViewImageRefPart: (imageRefPart: DMessageImageRefPart) => void) {
const activeConterters = attachmentDraft.converters.filter(c => c.isActive);
if (activeConterters.length === 0)
return null;
// Alternate icon for the Web Page Screenshot
const urlImage = attachmentDraft.input?.urlImage;
const urlImageData = urlImage?.imgDataUrl;
// Alternate icon for Single-Image DBlob output fragments (just single for now, multiple may not look good)
let outputSingleImageRefDBlobs: Extract<DMessageDataRef, { reftype: 'dblob' }>[] = [];
if (!urlImageData && attachmentDraft.outputFragments.length === 1) {
const fragment = attachmentDraft.outputFragments[0];
if (isImageRefPart(fragment.part) && fragment.part.dataRef && fragment.part.dataRef.reftype === 'dblob')
outputSingleImageRefDBlobs = [fragment.part.dataRef];
}
const handleViewFirstImage = (e: React.MouseEvent) => {
e.preventDefault();
e.stopPropagation();
if (attachmentDraft.outputFragments[0] && isImageRefPart(attachmentDraft.outputFragments[0].part))
onViewImageRefPart(attachmentDraft.outputFragments[0].part);
};
// Whether to render the converters
const renderConverterIcons = !outputSingleImageRefDBlobs.length;
// 1+ icons
return <Box sx={{ display: 'flex', alignItems: 'center', gap: 1 }}>
{/* If we have a Web preview, show it first */}
{!!urlImageData && /*!imageDataRefs.length &&*/ (
<TooltipOutlined key='preview' title={noTooltips ? null : <>
{urlImage?.generator === 'youtube-thumbnail' ? 'Thumbnail' : 'Page screenshot'} {!!urlImage?.timestamp && <>as of <TimeAgo date={urlImage.timestamp} /></>}.
<br />
Select <b>Add {urlImage?.generator === 'youtube-thumbnail' ? 'Thumbnail' : 'Screenshot'}</b> to attach it too.
</>} placement='top-start'>
<div>
<RenderImageURL
imageURL={urlImageData}
variant='attachment-button'
scaledImageSx={webPreviewImageSx}
/>
</div>
</TooltipOutlined>
)}
{/* Render DBlob referred images in place of converter icons */}
{outputSingleImageRefDBlobs.map((dataRef, i) => dataRef && (
<TooltipOutlined key={`image-${dataRef.dblobAssetId}`} title={noTooltips ? null : <>View converted image{/* <br/>{dataRef?.bytesSize?.toLocaleString()} bytes */}</>} placement='top-start'>
<div>
<RenderImageRefDBlob
dataRefDBlobAssetId={dataRef.dblobAssetId}
dataRefMimeType={dataRef.mimeType}
variant='attachment-button'
scaledImageSx={attachmentIconSx}
onClick={handleViewFirstImage}
/>
</div>
</TooltipOutlined>
))}
{/*{activeConterters.some(c => c.id.startsWith('url-page-')) ? <LanguageIcon sx={{ opacity: 0.2, ml: -2.5 }} /> : null}*/}
{renderConverterIcons && activeConterters.map((_converter, idx) => {
const Icon = converterTypeToIconMap[_converter.id] ?? null;
return !Icon ? null : (
<TooltipOutlined key={`${_converter.id}-${idx}`} title={noTooltips ? null : `Attached as ${_converter.name}`} placement='top-start'>
<Icon sx={attachmentConverterSx} />
</TooltipOutlined>
);
})}
</Box>;
}
function attachmentLabelText(attachmentDraft: AttachmentDraft): string {
const converter = attachmentDraft.converters.find(c => c.isActive) ?? null;
if (converter && attachmentDraft.label === 'Rich Text') {
if (converter.id === 'rich-text-table')
return 'Rich Table';
if (converter.id === 'rich-text-cleaner')
return 'Clean HTML';
if (converter.id === 'rich-text')
return 'Rich HTML';
}
return ellipsizeFront(attachmentDraft.label, 22);
}
export const LLMAttachmentButtonMemo = React.memo(LLMAttachmentButton);
function LLMAttachmentButton(props: {
llmAttachment: LLMAttachmentDraft,
menuShown: boolean,
onToggleMenu: (attachmentDraftId: AttachmentDraftId, anchor: HTMLAnchorElement) => void,
onViewImageRefPart: (imageRefPart: DMessageImageRefPart) => void,
}) {
// derived state
const { attachmentDraft: draft, llmSupportsAllFragments } = props.llmAttachment;
const isInputLoading = draft.inputLoading;
const isInputError = !!draft.inputError;
const isUnconvertible = !draft.converters.length;
const isOutputLoading = draft.outputsConverting;
const isOutputMissing = !draft.outputFragments.length;
const hasLiveFiles = draft.outputFragments.some(_f => _f.liveFileId);
const showWarning = isUnconvertible || (isOutputMissing || !llmSupportsAllFragments);
// handlers
const { onToggleMenu } = props;
const handleToggleMenu = React.useCallback((event: React.MouseEvent<HTMLAnchorElement>) => {
event.preventDefault(); // added for the Right mouse click (to prevent the menu)
onToggleMenu(draft.id, event.currentTarget);
}, [draft.id, onToggleMenu]);
// choose variants and color
const color: ColorPaletteProp =
(isInputLoading || isOutputLoading) ? 'success'
: isInputError ? 'danger'
: showWarning ? 'warning'
: /*props.menuShown ? 'primary' :*/ 'neutral';
const variant: VariantProp =
(isInputLoading || isOutputLoading || isInputError || showWarning) ? 'soft'
: 'outlined';
// loading indicator before we are ready for a button
if (isInputLoading)
return <InputLoadingPlaceholder label={draft.label} />;
return (
<Button
size='sm'
color={color}
variant={variant}
onClick={handleToggleMenu}
onContextMenu={handleToggleMenu}
sx={{
backgroundColor: props.menuShown ? `${color}.softActiveBg` : variant === 'outlined' ? 'background.popup' : undefined,
border: variant === 'soft' ? '1px solid' : undefined,
borderColor: variant === 'soft' ? `${color}.solidBg` : undefined,
borderRadius: 'sm',
...ATTACHMENT_MIN_STYLE,
px: 1, py: 0.5, // reduce
gap: 1,
}}
>
{isInputError && <InputErrorIndicator />}
{/* Icons: Web Page Screenshot, Converter[s] */}
{attachmentIcons(draft, props.menuShown, props.onViewImageRefPart)}
{/* Label */}
<Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
{isOutputLoading ? 'Converting... ' : attachmentLabelText(draft)}
</Typography>
{/* Is Converting icon */}
{isOutputLoading && <CircularProgress color='success' size='sm' />}
{/* LiveFile is supported icon */}
{hasLiveFiles && (
<TooltipOutlined title='LiveFile is supported' placement='top-end'>
<LiveFileIcon />
</TooltipOutlined>
)}
</Button>
);
}
@@ -0,0 +1,211 @@
import * as React from 'react';
import { Box, Button, CircularProgress, ColorPaletteProp, Sheet, Typography } from '@mui/joy';
import AbcIcon from '@mui/icons-material/Abc';
import CodeIcon from '@mui/icons-material/Code';
import ImageOutlinedIcon from '@mui/icons-material/ImageOutlined';
import PermMediaOutlinedIcon from '@mui/icons-material/PermMediaOutlined';
import PhotoSizeSelectLargeOutlinedIcon from '@mui/icons-material/PhotoSizeSelectLargeOutlined';
import PhotoSizeSelectSmallOutlinedIcon from '@mui/icons-material/PhotoSizeSelectSmallOutlined';
import PictureAsPdfIcon from '@mui/icons-material/PictureAsPdf';
import PivotTableChartIcon from '@mui/icons-material/PivotTableChart';
import TelegramIcon from '@mui/icons-material/Telegram';
import TextFieldsIcon from '@mui/icons-material/TextFields';
import TextureIcon from '@mui/icons-material/Texture';
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
import { GoodTooltip } from '~/common/components/GoodTooltip';
import { ellipsizeFront, ellipsizeMiddle } from '~/common/util/textUtils';
import type { AttachmentDraft, AttachmentDraftConverterType, AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
import type { LLMAttachmentDraft } from './useLLMAttachmentDrafts';
// default attachment width
const ATTACHMENT_MIN_STYLE = {
height: '100%',
minHeight: '40px',
minWidth: '64px',
};
const ellipsizeLabel = (label?: string) => {
if (!label)
return '';
return ellipsizeMiddle((label || '')
.replace(/https?:\/\/(?:www\.)?/, ''), 30)
.replace(/\/$/, '')
.replace('…', '…\n…');
};
/**
* Displayed while a source is loading
*/
const LoadingIndicator = React.forwardRef((props: { label: string }, _ref) =>
<Sheet
color='success' variant='soft'
sx={{
border: '1px solid',
borderColor: 'success.solidBg',
borderRadius: 'sm',
display: 'flex', alignItems: 'center', justifyContent: 'center', gap: 1,
...ATTACHMENT_MIN_STYLE,
boxSizing: 'border-box',
px: 1,
py: 0.5,
}}
>
<CircularProgress color='success' size='sm' />
<Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
{ellipsizeLabel(props.label)}
</Typography>
</Sheet>,
);
LoadingIndicator.displayName = 'LoadingIndicator';
const InputErrorIndicator = () =>
<WarningRoundedIcon sx={{ color: 'danger.solidBg' }} />;
const converterTypeToIconMap: { [key in AttachmentDraftConverterType]: React.ComponentType<any> } = {
'text': TextFieldsIcon,
'rich-text': CodeIcon,
'rich-text-table': PivotTableChartIcon,
'pdf-text': PictureAsPdfIcon,
'pdf-images': PermMediaOutlinedIcon,
'image-original': ImageOutlinedIcon,
'image-resized-high': PhotoSizeSelectLargeOutlinedIcon,
'image-resized-low': PhotoSizeSelectSmallOutlinedIcon,
'image-to-default': ImageOutlinedIcon,
'image-ocr': AbcIcon,
'ego-fragments-inlined': TelegramIcon,
'unhandled': TextureIcon,
};
function attachmentConverterIcon(attachmentDraft: AttachmentDraft) {
const converter = attachmentDraft.converterIdx !== null ? attachmentDraft.converters[attachmentDraft.converterIdx] ?? null : null;
if (converter && converter.id) {
const Icon = converterTypeToIconMap[converter.id] ?? null;
if (Icon)
return <Icon sx={{ width: 24, height: 24 }} />;
}
return null;
}
function attachmentLabelText(attachmentDraft: AttachmentDraft): string {
const converter = attachmentDraft.converterIdx !== null ? attachmentDraft.converters[attachmentDraft.converterIdx] ?? null : null;
if (converter && attachmentDraft.label === 'Rich Text') {
if (converter.id === 'rich-text-table')
return 'Rich Table';
if (converter.id === 'rich-text')
return 'Rich HTML';
}
return ellipsizeFront(attachmentDraft.label, 24);
}
export function LLMAttachmentItem(props: {
llmAttachment: LLMAttachmentDraft,
menuShown: boolean,
onToggleMenu: (attachmentDraftId: AttachmentDraftId, anchor: HTMLAnchorElement) => void,
}) {
// derived state
const { attachmentDraft: draft, llmSupportsAllFragments } = props.llmAttachment;
const isInputLoading = draft.inputLoading;
const isInputError = !!draft.inputError;
const isUnconvertible = !draft.converters.length;
const isOutputLoading = draft.outputsConverting;
const isOutputMissing = !draft.outputFragments.length;
const showWarning = isUnconvertible || (isOutputMissing || !llmSupportsAllFragments);
// handlers
const { onToggleMenu } = props;
const handleToggleMenu = React.useCallback((event: React.MouseEvent<HTMLAnchorElement>) => {
event.preventDefault(); // added for the Right mouse click (to prevent the menu)
onToggleMenu(draft.id, event.currentTarget);
}, [draft.id, onToggleMenu]);
// compose tooltip
let tooltip: string | null = '';
if (draft.source.media !== 'text')
tooltip += draft.source.media + ': ';
tooltip += draft.label;
// if (hasInput)
// tooltip += `\n(${aInput.mimeType}: ${aInput.dataSize.toLocaleString()} bytes)`;
// if (aOutputs && aOutputs.length >= 1)
// tooltip += `\n\n${JSON.stringify(aOutputs)}`;
// choose variants and color
let color: ColorPaletteProp;
let variant: 'soft' | 'outlined' | 'contained' = 'soft';
if (isInputLoading || isOutputLoading) {
color = 'success';
} else if (isInputError) {
color = 'danger';
tooltip = props.menuShown ? null
: `Issue loading the attachment: ${draft.inputError}\n\n${tooltip}`;
} else if (showWarning) {
color = 'warning';
tooltip = props.menuShown ? null
: isUnconvertible
? `Attachments of type '${draft.input?.mimeType}' are not supported yet. You can open a feature request on GitHub.\n\n${tooltip}`
: `Not compatible with the selected LLM or file not supported. Please try another format.\n\n${tooltip}`;
} else {
// all good
tooltip = null;
color = /*props.menuShown ? 'primary' :*/ 'neutral';
variant = 'outlined';
}
return <Box>
<GoodTooltip
title={tooltip}
isError={isInputError}
isWarning={showWarning}
sx={{ p: 1, whiteSpace: 'break-spaces' }}
>
{isInputLoading
? <LoadingIndicator label={draft.label} />
: (
<Button
size='sm'
variant={variant} color={color}
onClick={handleToggleMenu}
onContextMenu={handleToggleMenu}
sx={{
backgroundColor: props.menuShown ? `${color}.softActiveBg` : variant === 'outlined' ? 'background.popup' : undefined,
border: variant === 'soft' ? '1px solid' : undefined,
borderColor: variant === 'soft' ? `${color}.solidBg` : undefined,
borderRadius: 'sm',
...ATTACHMENT_MIN_STYLE,
px: 1, py: 0.5,
display: 'flex', flexDirection: 'row', gap: 1,
}}
>
{isInputError
? <InputErrorIndicator />
: <>
{attachmentConverterIcon(draft)}
{isOutputLoading
? <>Converting <CircularProgress color='success' size='sm' /></>
: <Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
{attachmentLabelText(draft)}
</Typography>}
</>}
</Button>
)}
</GoodTooltip>
</Box>;
}
@@ -1,45 +1,26 @@
import * as React from 'react';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, Checkbox, Chip, CircularProgress, LinearProgress, Link, ListDivider, ListItem, ListItemDecorator, MenuItem, Radio, Typography } from '@mui/joy';
import AttachmentIcon from '@mui/icons-material/Attachment';
import { Box, CircularProgress, Link, ListDivider, ListItem, ListItemDecorator, MenuItem, Radio, Typography } from '@mui/joy';
import ClearIcon from '@mui/icons-material/Clear';
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
import DeleteForeverIcon from '@mui/icons-material/DeleteForever';
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import KeyboardArrowLeftIcon from '@mui/icons-material/KeyboardArrowLeft';
import KeyboardArrowRightIcon from '@mui/icons-material/KeyboardArrowRight';
import LaunchIcon from '@mui/icons-material/Launch';
import ReadMoreIcon from '@mui/icons-material/ReadMore';
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
import VisibilityIcon from '@mui/icons-material/Visibility';
import { CloseablePopup } from '~/common/components/CloseablePopup';
import { DMessageAttachmentFragment, DMessageDocPart, DMessageImageRefPart, isDocPart, isImageRefPart } from '~/common/stores/chat/chat.fragments';
import { LiveFileIcon } from '~/common/livefile/liveFile.icons';
import { copyToClipboard } from '~/common/util/clipboardUtils';
import { showImageDataURLInNewTab } from '~/common/util/imageUtils';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { showImageDataRefInNewTab } from '~/modules/blocks/image/RenderImageRefDBlob';
import { DMessageAttachmentFragment, isImageRefPart } from '~/common/stores/chat/chat.fragments';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import type { AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts-slice';
import type { LLMAttachmentDraft } from './useLLMAttachmentDrafts';
import type { LLMAttachmentDraftsAction } from './LLMAttachmentsList';
// configuration
const DEFAULT_DETAILS_OPEN = true;
const SHOW_INLINING_OPERATIONS = false;
const indicatorSx = {
fontSize: '1rem',
} as const;
const indicatorGapSx: SxProps = {
paddingLeft: '1.375rem',
};
// enable for debugging
export const DEBUG_LLMATTACHMENTS = true;
export function LLMAttachmentMenu(props: {
@@ -48,56 +29,30 @@ export function LLMAttachmentMenu(props: {
menuAnchor: HTMLAnchorElement,
isPositionFirst: boolean,
isPositionLast: boolean,
onDraftAction: (attachmentDraftId: AttachmentDraftId, actionId: LLMAttachmentDraftsAction) => void,
onClose: () => void,
onDraftAction?: (attachmentDraftId: AttachmentDraftId, actionId: LLMAttachmentDraftsAction) => void,
onViewDocPart: (docPart: DMessageDocPart) => void,
onViewImageRefPart: (imageRefPart: DMessageImageRefPart) => void
}) {
// state
const [showDetails, setShowDetails] = React.useState(DEFAULT_DETAILS_OPEN);
// external state
const uiComplexityMode = useUIPreferencesStore(state => state.complexityMode);
// derived state
const isUnmoveable = props.isPositionFirst && props.isPositionLast;
const {
attachmentDraft: draft,
llmSupportsAllFragments,
llmSupportsTextFragments,
llmTokenCountApprox,
} = props.llmAttachmentDraft;
const {
id: draftId,
source: draftSource,
input: draftInput,
outputsConverting: isConverting,
} = draft;
const isInputError = !!draft.inputError;
const draftId = draft.id;
const draftInput = draft.input;
const isConverting = draft.outputsConverting;
const isUnconvertible = !draft.converters.length;
const isOutputMissing = !draft.outputFragments.length;
const isOutputMultiple = draft.outputFragments.length > 1;
const hasLiveFiles = draft.outputFragments.some(_f => _f.liveFileId);
const showWarning = isUnconvertible || isOutputMissing || !llmSupportsAllFragments;
// hooks
const handleToggleShowDetails = React.useCallback(() => {
setShowDetails(on => !on);
}, []);
const isUnmoveable = props.isPositionFirst && props.isPositionLast;
// operations
const { attachmentDraftsStoreApi, onClose, onDraftAction, onViewDocPart, onViewImageRefPart } = props;
const { attachmentDraftsStoreApi, onDraftAction, onClose } = props;
const handleMoveUp = React.useCallback(() => {
attachmentDraftsStoreApi.getState().moveAttachmentDraft(draftId, -1);
@@ -113,55 +68,23 @@ export function LLMAttachmentMenu(props: {
}, [draftId, attachmentDraftsStoreApi, onClose]);
const handleSetConverterIdx = React.useCallback(async (converterIdx: number | null) => {
return attachmentDraftsStoreApi.getState().toggleAttachmentDraftConverterAndConvert(draftId, converterIdx);
return attachmentDraftsStoreApi.getState().setAttachmentDraftConverterIdxAndConvert(draftId, converterIdx);
}, [draftId, attachmentDraftsStoreApi]);
const handleDeleteOutputFragment = React.useCallback((event: React.MouseEvent, fragmentIndex: number) => {
event.preventDefault();
event.stopPropagation();
attachmentDraftsStoreApi.getState().removeAttachmentDraftOutputFragment(draftId, fragmentIndex);
}, [attachmentDraftsStoreApi, draftId]);
// const handleSummarizeText = React.useCallback(() => {
// onAttachmentDraftSummarizeText(draftId);
// }, [draftId, onAttachmentDraftSummarizeText]);
const handleCopyToClipboard = React.useCallback((event: React.MouseEvent, text: string) => {
event.preventDefault();
event.stopPropagation();
copyToClipboard(text, 'Attachment Text');
}, []);
const handleCopyLabelToClipboard = React.useCallback((event: React.MouseEvent, text: string) => {
event.preventDefault();
event.stopPropagation();
copyToClipboard(text, 'Attachment Name');
}, []);
const handleViewImageRefPart = React.useCallback((event: React.MouseEvent, imageRefPart: DMessageImageRefPart) => {
event.preventDefault();
event.stopPropagation();
onViewImageRefPart(imageRefPart);
}, [onViewImageRefPart]);
const handleViewDocPart = React.useCallback((event: React.MouseEvent, docPart: DMessageDocPart) => {
event.preventDefault();
event.stopPropagation();
onViewDocPart(docPart);
}, [onViewDocPart]);
const canHaveDetails = !!draftInput && !isConverting;
const showInputs = uiComplexityMode !== 'minimal';
return (
<CloseablePopup
menu anchorEl={props.menuAnchor} onClose={props.onClose}
dense
maxWidth={460}
minWidth={260}
noTopPadding
placement='top'
<CloseableMenu
dense placement='top'
open anchorEl={props.menuAnchor} onClose={props.onClose}
sx={{ minWidth: 260 }}
>
{/* Move Arrows */}
{!isUnmoveable && <Box sx={{ display: 'flex', alignItems: 'center', borderBottom: '1px solid', borderColor: 'divider' }}>
{!isUnmoveable && <Box sx={{ display: 'flex', alignItems: 'center' }}>
<MenuItem
disabled={props.isPositionFirst}
onClick={handleMoveUp}
@@ -177,219 +100,117 @@ export function LLMAttachmentMenu(props: {
<KeyboardArrowRightIcon />
</MenuItem>
</Box>}
{/*{(showDetails && canHaveDetails) && <ListItem variant='soft' sx={{ fontSize: 'sm', borderBottom: '1px solid', borderColor: 'divider' }}>*/}
{/* {draft.ref}*/}
{/*</ListItem>}*/}
{!isUnmoveable && <ListDivider sx={{ mt: 0 }} />}
{/* Render Converters as menu items */}
{!isUnconvertible && (
<ListItem sx={{ fontSize: 'sm', my: 0.75 }}>
Attach {draftSource.media === 'url' ? 'web page'
: draftSource.media === 'file' ? 'file'
: draftSource.media === 'text'
? (draftSource.method === 'drop' ? 'drop' : draftSource.method === 'clipboard-read' ? 'clipboard' : draftSource.method === 'paste' ? 'paste' : '')
: ''} as:
{uiComplexityMode === 'extra' && (
<Chip component='span' size='sm' color='neutral' variant='outlined' startDecorator={<ContentCopyIcon />} onClick={(event) => handleCopyLabelToClipboard(event, draft.label)} sx={{ ml: 'auto' }}>
copy name
</Chip>
)}
<ListItem>
<Typography level='body-sm'>
Attach as:
</Typography>
</ListItem>
)}
{!isUnconvertible && draft.converters.map((c, idx) =>
<MenuItem
disabled={c.disabled || isConverting}
key={'c-' + c.id}
onClick={async () => (c.isCheckbox || !c.isActive) && await handleSetConverterIdx(idx)}
onClick={async () => idx !== draft.converterIdx && await handleSetConverterIdx(idx)}
>
<ListItemDecorator>
{(isConverting && c.isActive)
{(isConverting && idx === draft.converterIdx)
? <CircularProgress size='sm' sx={{ '--CircularProgress-size': '1.25rem' }} />
: !c.isCheckbox
? <Radio key={'rd-' + idx} checked={c.isActive} disabled={isConverting} />
: <Checkbox key={'cb-' + idx} checked={c.isActive === true} disabled={isConverting} />
}
: <Radio checked={idx === draft.converterIdx} disabled={isConverting} />}
</ListItemDecorator>
{c.unsupported
? <Box>Unsupported 🤔 <Typography level='body-xs'>{c.name}</Typography></Box>
: c.name}
</MenuItem>,
)}
{/*{!isUnconvertible && <ListDivider sx={{ mb: 0 }} />}*/}
{!isUnconvertible && <ListDivider />}
{/* Progress indicator (mainly for OCRs of Images, PDFs, and PDF to Images) */}
{!!draft.outputsConversionProgress && draft.outputsConversionProgress < 1 && (
<LinearProgress determinate value={100 * draft.outputsConversionProgress} sx={{ mx: 1 }} />
)}
{SHOW_INLINING_OPERATIONS && !!onDraftAction && <ListDivider />}
{SHOW_INLINING_OPERATIONS && !!onDraftAction && (
<MenuItem onClick={() => onDraftAction?.(draftId, 'inline-text')} disabled={!llmSupportsTextFragments || isConverting}>
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
Inline text
</MenuItem>
)}
{SHOW_INLINING_OPERATIONS && !!onDraftAction && (
<MenuItem onClick={() => onDraftAction?.(draftId, 'copy-text')} disabled={!llmSupportsTextFragments || isConverting}>
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
Copy text
</MenuItem>
)}
{/* Warning box */}
{(isInputError || showWarning) && (
<Box>
<MenuItem
variant='soft'
color={isInputError ? 'danger' : 'warning'}
sx={{
mt: !isInputError ? 0.75 : 0,
mb: !isInputError ? 0 : 0.75,
border: '1px solid',
borderLeft: 'none',
borderRight: 'none',
borderColor: 'divider',
fontSize: 'sm',
py: 1,
}}
>
<ListItemDecorator>
{/*<WarningRoundedIcon />*/}
</ListItemDecorator>
<Box>
<Typography color={isInputError ? 'danger' : 'warning'} level='title-sm'>
{isInputError ? 'Loading Issue' : 'Warning'}
</Typography>
{isInputError ? <div>{draft.inputError}</div>
: isUnconvertible ? <div>Attachments of type {draft.input?.mimeType} are not supported yet. You can request this on GitHub.</div>
: isOutputMissing ? <div>File not supported. Please try another format.</div>
: !llmSupportsAllFragments ? <div>May not be compatible with the current model. Please try another format.</div>
: <>Unknown warning</>}
</Box>
</MenuItem>
</Box>
)}
{/* Details Expandable Menu */}
{!isInputError && <MenuItem
variant='soft'
color={isOutputMissing ? 'warning' : 'success'}
disabled={!canHaveDetails}
onClick={handleToggleShowDetails}
sx={{
mt: (isInputError || showWarning) ? 0 : 0.75,
mb: 0.75,
border: '1px solid',
borderLeft: 'none',
borderRight: 'none',
borderColor: 'divider',
}}
>
<ListItemDecorator>
{(showDetails && canHaveDetails) ? <ExpandLessIcon /> : <ExpandMoreIcon />}
</ListItemDecorator>
{!(showDetails && canHaveDetails) ? (
<Typography sx={{ fontSize: 'sm' }}>
Details
</Typography>
) : (
<Box sx={{ my: 0.5 }}>
{/* <- inputs */}
{showInputs && !!draftInput && (
<Typography level='body-sm' textColor='text.primary' startDecorator={<AttachmentIcon sx={indicatorSx} />}>
{draftInput.mimeType}{typeof draftInput.dataSize === 'number' ? ` · ${draftInput.dataSize.toLocaleString()} bytes` : ''}
{DEBUG_LLMATTACHMENTS && !!draftInput && !isConverting && (
<ListItem>
<ListItemDecorator />
<Box>
{!!draftInput && (
<Typography level='body-sm'>
🡐 {draftInput.mimeType} · {draftInput.dataSize.toLocaleString()}
</Typography>
)}
{showInputs && !!draftInput?.altMimeType && (
<Typography level='body-sm' sx={indicatorGapSx}>
{draftInput.altMimeType} · {draftInput.altData?.length.toLocaleString()}
{!!draftInput?.altMimeType && (
<Typography level='body-sm'>
<span style={{ color: 'transparent' }}>🡐</span> {draftInput.altMimeType} · {draftInput.altData?.length.toLocaleString()}
</Typography>
)}
{showInputs && !!draftInput?.urlImage && (
<Typography level='body-sm' sx={indicatorGapSx}>
{draftInput.urlImage.mimeType} · {draftInput.urlImage.width} x {draftInput.urlImage.height} · {draftInput.urlImage.imgDataUrl?.length.toLocaleString()}
{' · '}
<Link onClick={(event) => {
event.preventDefault();
event.stopPropagation();
showImageDataURLInNewTab(draftInput?.urlImage?.imgDataUrl || '');
}}>
open <LaunchIcon sx={{ mx: 0.5, fontSize: 16 }} />
</Link>
</Typography>
)}
{/*<Typography level='body-sm'>*/}
{/* Converters: {draft.converters.map(((converter, idx) => ` ${converter.id}${converter.isActive ? '*' : ''}`)).join(', ')}*/}
{/* Converters: {aConverters.map(((converter, idx) => ` ${converter.id}${(idx === draft.converterIdx) ? '*' : ''}`)).join(', ')}*/}
{/*</Typography>*/}
{/* -> Outputs */}
<Box sx={{ mt: 1 }}>
<Box>
{isOutputMissing ? (
<Typography level='body-sm' startDecorator={<ReadMoreIcon sx={indicatorSx} />}>...</Typography>
<Typography level='body-sm'>🡒 ...</Typography>
) : (
draft.outputFragments.map(({ part }, index) => {
if (isDocPart(part)) {
return (
<Typography key={index} level='body-sm' sx={{ color: 'text.primary' }} startDecorator={<ReadMoreIcon sx={indicatorSx} />}>
<span>{part.data.mimeType /* part.type: big-agi type, not source mime */} · {part.data.text.length.toLocaleString()} bytes ·&nbsp;</span>
<Chip component='span' size='sm' color='primary' variant='outlined' startDecorator={<VisibilityIcon />} onClick={(event) => handleViewDocPart(event, part)}>
view
</Chip>
<Chip component='span' size='sm' color='success' variant='outlined' startDecorator={<ContentCopyIcon />} onClick={(event) => handleCopyToClipboard(event, part.data.text)}>
copy
</Chip>
</Typography>
);
} else if (isImageRefPart(part)) {
const resolution = part.width && part.height ? `${part.width} x ${part.height}` : 'no resolution';
if (isImageRefPart(part)) {
const resolution = part.width && part.height ? `${part.width} x ${part.height}` : 'unknown resolution';
const mime = part.dataRef.reftype === 'dblob' ? part.dataRef.mimeType : 'unknown image';
return (
<Typography key={index} level='body-sm' sx={{ color: 'text.primary' }} startDecorator={<ReadMoreIcon sx={indicatorSx} />}>
<span>{mime /*.replace('image/', 'img: ')*/} · {resolution} · {part.dataRef.reftype === 'dblob' ? (part.dataRef.bytesSize?.toLocaleString() || 'no size') : '(remote)'} ·&nbsp;</span>
<Chip component='span' size={isOutputMultiple ? 'sm' : 'md'} color='primary' variant='outlined' startDecorator={<VisibilityIcon />} onClick={(event) => handleViewImageRefPart(event, part)}>
view
</Chip>
{isOutputMultiple && <Chip component='span' size={isOutputMultiple ? 'sm' : 'md'} color='danger' variant='outlined' startDecorator={<DeleteForeverIcon />} onClick={(event) => handleDeleteOutputFragment(event, index)}>
del
</Chip>}
<Typography key={index} level='body-sm'>
🡒 {mime/*unic.replace('image/', 'img: ')*/} · {resolution} · {part.dataRef.reftype === 'dblob' ? part.dataRef.bytesSize?.toLocaleString() : '(remote)'}
{' · '}
<Link onClick={() => showImageDataRefInNewTab(part.dataRef)}>
open <LaunchIcon sx={{ mx: 0.5, fontSize: 16 }} />
</Link>
</Typography>
);
} else if (part.pt === 'doc') {
return (
<Typography key={index} level='body-sm'>
🡒 text: {part.data.text.length.toLocaleString()} bytes
</Typography>
);
} else {
return (
<Typography key={index} level='body-sm' sx={{ color: 'text.primary' }} startDecorator={<ReadMoreIcon sx={indicatorSx} />}>
{(part as DMessageAttachmentFragment['part']).pt}: (other)
<Typography key={index} level='body-sm'>
🡒 {(part as DMessageAttachmentFragment['part']).pt}: (other)
</Typography>
);
}
})
)}
{!!llmTokenCountApprox && (
<Typography level='body-xs' mt={0.5} sx={indicatorGapSx}>
~{llmTokenCountApprox.toLocaleString()} tokens
<Typography level='body-sm' sx={{ ml: 1.75 }}>
~ {llmTokenCountApprox.toLocaleString()} tokens
</Typography>
)}
</Box>
{/* LiveFile notice */}
{hasLiveFiles && !!draftInput && (
<Typography level='body-xs' color='success' mt={1} startDecorator={<LiveFileIcon sx={{ width: 16, height: 16 }} />}>
LiveFile is supported
</Typography>
)}
</Box>
)}
</MenuItem>}
</ListItem>
)}
{DEBUG_LLMATTACHMENTS && !!draftInput && !isConverting && <ListDivider />}
{/* Remove */}
{/* Destructive Operations */}
{/*<MenuItem onClick={handleCopyToClipboard} disabled={!isOutputTextInlineable}>*/}
{/* <ListItemDecorator><ContentCopyIcon /></ListItemDecorator>*/}
{/* Copy*/}
{/*</MenuItem>*/}
{/*<MenuItem onClick={handleSummarizeText} disabled={!isOutputTextInlineable}>*/}
{/* <ListItemDecorator><CompressIcon color='success' /></ListItemDecorator>*/}
{/* Shrink*/}
{/*</MenuItem>*/}
<MenuItem onClick={() => onDraftAction(draftId, 'inline-text')} disabled={!llmSupportsTextFragments || isConverting}>
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
Inline text
</MenuItem>
<MenuItem onClick={() => onDraftAction(draftId, 'copy-text')} disabled={!llmSupportsTextFragments || isConverting}>
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
Copy text
</MenuItem>
<ListDivider />
<MenuItem onClick={handleRemove}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Remove
</MenuItem>
</CloseablePopup>
</CloseableMenu>
);
}
}
@@ -1,88 +1,43 @@
import * as React from 'react';
import { Box, CircularProgress, IconButton, ListDivider, ListItemDecorator, MenuItem } from '@mui/joy';
import AutoFixHighIcon from '@mui/icons-material/AutoFixHigh';
import { Box, IconButton, ListDivider, ListItemDecorator, MenuItem } from '@mui/joy';
import ClearIcon from '@mui/icons-material/Clear';
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
import type { AgiAttachmentPromptsData } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
import { CloseablePopup } from '~/common/components/CloseablePopup';
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
import { CloseableMenu } from '~/common/components/CloseableMenu';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import type { AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
import type { DMessageDocPart, DMessageImageRefPart } from '~/common/stores/chat/chat.fragments';
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts-slice';
import { ViewImageRefPartModal } from '../../message/fragments-content/ViewImageRefPartModal';
import type { LLMAttachmentDraft } from './useLLMAttachmentDrafts';
import { LLMAttachmentButtonMemo } from './LLMAttachmentButton';
import type { LLMAttachmentDrafts } from './useLLMAttachmentDrafts';
import { LLMAttachmentItem } from './LLMAttachmentItem';
import { LLMAttachmentMenu } from './LLMAttachmentMenu';
import { LLMAttachmentsPromptsButtonMemo } from './LLMAttachmentsPromptsButton';
import { ViewDocPartModal } from '../../message/fragments-content/ViewDocPartModal';
export type LLMAttachmentDraftsAction = 'inline-text' | 'copy-text';
const _style = {
bar: {
position: 'relative',
} as const,
barScrollX: {
height: '100%',
pr: 5,
overflowX: 'auto',
display: 'flex',
alignItems: 'center',
gap: 1,
} as const,
barWraps: {
display: 'flex',
flexWrap: 'wrap',
alignItems: 'center',
gap: 1,
} as const,
barMenuButton: {
// borderRadius: 'sm',
borderRadius: 0,
position: 'absolute', right: 0, top: 0,
backgroundColor: 'neutral.softDisabledBg',
} as const,
} as const;
/**
* Renderer of attachment drafts, with menus, etc.
*/
export function LLMAttachmentsList(props: {
agiAttachmentPrompts?: AgiAttachmentPromptsData,
attachmentDraftsStoreApi: AttachmentDraftsStoreApi,
canInlineSomeFragments: boolean,
llmAttachmentDrafts: LLMAttachmentDraft[],
onAttachmentDraftsAction?: (attachmentDraftId: AttachmentDraftId | null, actionId: LLMAttachmentDraftsAction) => void,
buttonsCanWrap?: boolean,
llmAttachmentDrafts: LLMAttachmentDrafts,
onAttachmentDraftsAction: (attachmentDraftId: AttachmentDraftId | null, actionId: LLMAttachmentDraftsAction) => void,
}) {
// state
const { showPromisedOverlay } = useOverlayComponents();
const [confirmClearAttachmentDrafts, setConfirmClearAttachmentDrafts] = React.useState<boolean>(false);
const [draftMenu, setDraftMenu] = React.useState<{ anchor: HTMLAnchorElement, attachmentDraftId: AttachmentDraftId } | null>(null);
const [overallMenuAnchor, setOverallMenuAnchor] = React.useState<HTMLAnchorElement | null>(null);
const [viewerDocPart, setViewerDocPart] = React.useState<DMessageDocPart | null>(null);
const [viewerImageRefPart, setViewerImageRefPart] = React.useState<DMessageImageRefPart | null>(null);
// derived state
const { agiAttachmentPrompts, canInlineSomeFragments, llmAttachmentDrafts } = props;
const { llmAttachmentDrafts, canInlineSomeFragments } = props.llmAttachmentDrafts;
const hasAttachments = llmAttachmentDrafts.length >= 1;
// derived item menu state
@@ -100,34 +55,28 @@ export function LLMAttachmentsList(props: {
const handleOverallMenuHide = React.useCallback(() => setOverallMenuAnchor(null), []);
const handleOverallMenuToggle = React.useCallback((event: React.MouseEvent<HTMLAnchorElement>) => {
event.shiftKey && console.log('llmAttachmentDrafts', llmAttachmentDrafts);
event.shiftKey && console.log(llmAttachmentDrafts);
event.preventDefault(); // added for the Right mouse click (to prevent the menu)
setOverallMenuAnchor(anchor => anchor ? null : event.currentTarget);
}, [llmAttachmentDrafts]);
const handleOverallCopyText = React.useCallback(() => {
handleOverallMenuHide();
onAttachmentDraftsAction?.(null, 'copy-text');
onAttachmentDraftsAction(null, 'copy-text');
}, [handleOverallMenuHide, onAttachmentDraftsAction]);
const handleOverallInlineText = React.useCallback(() => {
handleOverallMenuHide();
onAttachmentDraftsAction?.(null, 'inline-text');
onAttachmentDraftsAction(null, 'inline-text');
}, [handleOverallMenuHide, onAttachmentDraftsAction]);
const handleOverallClear = React.useCallback(async () => {
if (await showPromisedOverlay('chat-attachments-clear', { rejectWithValue: false }, ({ onResolve, onUserReject }) =>
<ConfirmationModal
open onClose={onUserReject} onPositive={() => onResolve(true)}
title='Confirm Removal'
positiveActionText='Remove All'
confirmationText={`This action will remove all (${llmAttachmentDrafts.length}) attachments. Do you want to proceed?`}
/>,
)) {
handleOverallMenuHide();
props.attachmentDraftsStoreApi.getState().removeAllAttachmentDrafts();
}
}, [handleOverallMenuHide, llmAttachmentDrafts.length, props.attachmentDraftsStoreApi, showPromisedOverlay]);
const handleOverallClear = React.useCallback(() => setConfirmClearAttachmentDrafts(true), []);
const handleOverallClearConfirmed = React.useCallback(() => {
handleOverallMenuHide();
setConfirmClearAttachmentDrafts(false);
props.attachmentDraftsStoreApi.getState().removeAllAttachmentDrafts();
}, [handleOverallMenuHide, props.attachmentDraftsStoreApi]);
// item menu
@@ -142,25 +91,9 @@ export function LLMAttachmentsList(props: {
const handleDraftAction = React.useCallback((attachmentDraftId: AttachmentDraftId, actionId: LLMAttachmentDraftsAction) => {
// pass-through, but close the menu as well, as the action is destructive for the caller
handleDraftMenuHide();
onAttachmentDraftsAction?.(attachmentDraftId, actionId);
onAttachmentDraftsAction(attachmentDraftId, actionId);
}, [handleDraftMenuHide, onAttachmentDraftsAction]);
const handleViewImageRefPart = React.useCallback((imageRefPart: DMessageImageRefPart) => {
setViewerImageRefPart(imageRefPart);
}, []);
const handleCloseImageViewer = React.useCallback(() => {
setViewerImageRefPart(null);
}, []);
const handleViewDocPart = React.useCallback((docPart: DMessageDocPart) => {
setViewerDocPart(docPart);
}, []);
const handleCloseDocPartViewer = React.useCallback(() => {
setViewerDocPart(null);
}, []);
// no components without attachments
if (!hasAttachments)
@@ -169,55 +102,38 @@ export function LLMAttachmentsList(props: {
return <>
{/* Attachment Drafts bar */}
<Box sx={_style.bar}>
<Box sx={{ position: 'relative' }}>
{/* Horizontally scrollable */}
<Box sx={!props.buttonsCanWrap ? _style.barScrollX : _style.barWraps}>
{/* AI Suggestion Button */}
{(!!agiAttachmentPrompts && (agiAttachmentPrompts.isVisible || agiAttachmentPrompts.hasData)) && (
<LLMAttachmentsPromptsButtonMemo data={agiAttachmentPrompts} />
)}
{/* Attachment Buttons */}
{/* Horizontally scrollable Attachments */}
<Box sx={{ display: 'flex', overflowX: 'auto', gap: 1, height: '100%', pr: 5 }}>
{llmAttachmentDrafts.map((llmAttachment) =>
<LLMAttachmentButtonMemo
<LLMAttachmentItem
key={llmAttachment.attachmentDraft.id}
llmAttachment={llmAttachment}
menuShown={llmAttachment.attachmentDraft.id === itemMenuAttachmentDraftId}
onToggleMenu={handleDraftMenuToggle}
onViewImageRefPart={handleViewImageRefPart}
/>,
)}
</Box>
{/* Overall Menu button */}
{!_style.barWraps && (
<IconButton
onClick={handleOverallMenuToggle}
onContextMenu={handleOverallMenuToggle}
sx={_style.barMenuButton}
>
<ExpandLessIcon />
</IconButton>
)}
<IconButton
onClick={handleOverallMenuToggle}
onContextMenu={handleOverallMenuToggle}
sx={{
// borderRadius: 'sm',
borderRadius: 0,
position: 'absolute', right: 0, top: 0,
backgroundColor: 'neutral.softDisabledBg',
}}
>
<ExpandLessIcon />
</IconButton>
</Box>
{/* Image Viewer Modal - when opening attachment images */}
{!!viewerImageRefPart && (
<ViewImageRefPartModal imageRefPart={viewerImageRefPart} onClose={handleCloseImageViewer} />
)}
{/* Text Viewer Modal */}
{!!viewerDocPart && (
<ViewDocPartModal docPart={viewerDocPart} onClose={handleCloseDocPartViewer} />
)}
{/* Single LLM Attachment Draft Menu */}
{/* LLM Draft Menu */}
{!!itemMenuAnchor && !!itemMenuAttachmentDraft && !!props.attachmentDraftsStoreApi && (
<LLMAttachmentMenu
attachmentDraftsStoreApi={props.attachmentDraftsStoreApi}
@@ -225,46 +141,43 @@ export function LLMAttachmentsList(props: {
menuAnchor={itemMenuAnchor}
isPositionFirst={itemMenuIndex === 0}
isPositionLast={itemMenuIndex === llmAttachmentDrafts.length - 1}
onDraftAction={handleDraftAction}
onClose={handleDraftMenuHide}
onDraftAction={!onAttachmentDraftsAction ? undefined : handleDraftAction}
onViewDocPart={handleViewDocPart}
onViewImageRefPart={handleViewImageRefPart}
/>
)}
{/* All Drafts Menu */}
{!!overallMenuAnchor && (
<CloseablePopup
menu anchorEl={overallMenuAnchor} onClose={handleOverallMenuHide}
dense
minWidth={200}
placement='top-start'
<CloseableMenu
dense placement='top-start'
open anchorEl={overallMenuAnchor} onClose={handleOverallMenuHide}
sx={{ minWidth: 200 }}
>
{/* uses the agiAttachmentPrompts to imagine what the user will ask aboud those */}
{!!agiAttachmentPrompts && (
<MenuItem color='primary' variant='soft' onClick={agiAttachmentPrompts.refetch} disabled={!hasAttachments || agiAttachmentPrompts.isFetching}>
<ListItemDecorator>{agiAttachmentPrompts.isFetching ? <CircularProgress size='sm' /> : <AutoFixHighIcon />}</ListItemDecorator>
What can I do?
</MenuItem>
)}
{!!agiAttachmentPrompts && <ListDivider />}
{!!onAttachmentDraftsAction && <MenuItem onClick={handleOverallInlineText} disabled={!canInlineSomeFragments}>
<MenuItem onClick={handleOverallInlineText} disabled={!canInlineSomeFragments}>
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
Inline all text
</MenuItem>}
{!!onAttachmentDraftsAction && <MenuItem onClick={handleOverallCopyText} disabled={!canInlineSomeFragments}>
</MenuItem>
<MenuItem onClick={handleOverallCopyText} disabled={!canInlineSomeFragments}>
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
Copy all text
</MenuItem>}
{!!onAttachmentDraftsAction && <ListDivider />}
</MenuItem>
<ListDivider />
<MenuItem onClick={handleOverallClear}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Remove All{llmAttachmentDrafts.length > 5 ? <span style={{ opacity: 0.5 }}> {llmAttachmentDrafts.length} attachments</span> : null}
</MenuItem>
</CloseablePopup>
</CloseableMenu>
)}
{/* 'Clear' Confirmation */}
{confirmClearAttachmentDrafts && (
<ConfirmationModal
open onClose={() => setConfirmClearAttachmentDrafts(false)} onPositive={handleOverallClearConfirmed}
title='Confirm Removal'
positiveActionText='Remove All'
confirmationText={`This action will remove all (${llmAttachmentDrafts.length}) attachments. Do you want to proceed?`}
/>
)}
</>;
@@ -1,71 +0,0 @@
import * as React from 'react';
import type { SxProps } from '@mui/joy/styles/types';
import { Box, CircularProgress, IconButton, Tooltip } from '@mui/joy';
import AutoFixHighIcon from '@mui/icons-material/AutoFixHigh';
import type { AgiAttachmentPromptsData } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
import { AgiSquircleIcon } from '~/common/components/icons/AgiSquircleIcon';
import { AGI_SUGGESTIONS_COLOR } from '../textarea/ComposerTextAreaActions';
export const LLMAttachmentsPromptsButtonMemo = React.memo(LLMAttachmentsPromptsButton);
const promptGenIconButtonSx: SxProps = {
// minWidth: 40,
backgroundColor: 'background.level1',
boxShadow: `inset 0 4px 6px -4px rgb(var(--joy-palette-${AGI_SUGGESTIONS_COLOR}-darkChannel) / 40%)`,
borderRadius: '2rem',
borderBottomLeftRadius: 0,
// borderColor: `${AGI_SUGGESTIONS_COLOR}.outlinedBorder`,
// '&:hover': {
// backgroundColor: 'background.level1',
// },
'&:hover': {
backgroundColor: `${AGI_SUGGESTIONS_COLOR}.solidBg`,
borderColor: `${AGI_SUGGESTIONS_COLOR}.solidBg`,
color: `${AGI_SUGGESTIONS_COLOR}.solidColor`,
},
};
const brightenSx: SxProps = {
...promptGenIconButtonSx,
backgroundColor: 'background.popup',
boxShadow: 'xs',
};
function LLMAttachmentsPromptsButton({ data }: { data: AgiAttachmentPromptsData }) {
const tooltipTitle =
data.error ? (data.error.message || 'Error guessing actions')
: data.isFetching ? null
: data.isPending ? <Box sx={{ display: 'flex', gap: 1 }}><AgiSquircleIcon inverted sx={{ color: 'white', borderRadius: '1rem' }} /> What can I do?</Box>
: 'Give me more ideas';
const button = (
<IconButton
variant={data.error ? 'soft' : data.hasData ? 'outlined' : 'soft'}
color={data.error ? 'danger' : data.hasData ? AGI_SUGGESTIONS_COLOR : AGI_SUGGESTIONS_COLOR}
size='sm'
disabled={data.isFetching}
onClick={data.refetch}
// onClick={data.hasData ? data.clear : data.refetch}
sx={(data.hasData && !data.isFetching) ? brightenSx : promptGenIconButtonSx}
>
{data.isFetching ? (
<CircularProgress size='sm' color='neutral' />
) : (
<AutoFixHighIcon fontSize='small' />
)}
</IconButton>
);
return !tooltipTitle ? button : (
<Tooltip variant='outlined' disableInteractive placement='left' arrow title={tooltipTitle}>
{button}
</Tooltip>
);
}
@@ -1,12 +1,13 @@
import * as React from 'react';
import { DLLM, LLM_IF_OAI_Vision } from '~/modules/llms/store-llms';
import type { AttachmentDraft } from '~/common/attachment-drafts/attachment.types';
import type { DLLM } from '~/common/stores/llms/llms.types';
import type { DMessageAttachmentFragment } from '~/common/stores/chat/chat.fragments';
import { estimateTokensForFragments } from '~/common/stores/chat/chat.tokens';
export interface LLMAttachmentDraftsCollection {
export interface LLMAttachmentDrafts {
llmAttachmentDrafts: LLMAttachmentDraft[];
canAttachAllFragments: boolean;
canInlineSomeFragments: boolean;
@@ -22,52 +23,23 @@ export interface LLMAttachmentDraft {
}
export function useLLMAttachmentDrafts(attachmentDrafts: AttachmentDraft[], chatLLM: DLLM | null, chatLLMSupportsImages: boolean): LLMAttachmentDraftsCollection {
/* [Optimization] Use a Ref to store the previous state of llmAttachmentDrafts and chatLLM
*
* Note that this works on 2 levels:
* - 1. avoids recomputation, but more importantly,
* - 2. avoids re-rendering by keeping those llmAttachmentDrafts objects stable.
*
* Important to notice that the attachmentDraft objects[] are stable to start with, so we can
* safely use reference equality to check if internal properties (or order) have changed.
*/
const prevStateRef = React.useRef<{
chatLLM: DLLM | null;
llmAttachmentDrafts: LLMAttachmentDraft[];
}>({ llmAttachmentDrafts: [], chatLLM: null });
export function useLLMAttachmentDrafts(attachmentDrafts: AttachmentDraft[], chatLLM: DLLM | null): LLMAttachmentDrafts {
return React.useMemo(() => {
// [Optimization]
const equalChatLLM = chatLLM === prevStateRef.current.chatLLM;
// LLM-dependent multi-modal enablement
const supportedTypes: DMessageAttachmentFragment['part']['pt'][] = chatLLMSupportsImages ? ['image_ref', 'doc'] : ['doc'];
const supportsImages = !!chatLLM?.interfaces?.includes(LLM_IF_OAI_Vision);
const supportedTypes: DMessageAttachmentFragment['part']['pt'][] = supportsImages ? ['image_ref', 'doc'] : ['doc'];
const supportedTextTypes: DMessageAttachmentFragment['part']['pt'][] = supportedTypes.filter(pt => pt === 'doc');
// Add LLM-specific properties to each attachment draft
const llmAttachmentDrafts = attachmentDrafts.map((a, index) => {
// [Optimization] If not change in LLM and the attachmentDraft is the same object reference, reuse the previous LLMAttachmentDraft
let prevDraft: LLMAttachmentDraft | undefined = prevStateRef.current.llmAttachmentDrafts[index];
// if not found, search by id
if (!prevDraft)
prevDraft = prevStateRef.current.llmAttachmentDrafts.find(_pd => _pd.attachmentDraft.id === a.id);
if (equalChatLLM && prevDraft && prevDraft.attachmentDraft === a)
return prevDraft;
// Otherwise, create a new LLMAttachmentDraft
return {
attachmentDraft: a,
llmSupportsAllFragments: !a.outputFragments ? false : a.outputFragments.every(op => supportedTypes.includes(op.part.pt)),
llmSupportsTextFragments: !a.outputFragments ? false : a.outputFragments.some(op => supportedTextTypes.includes(op.part.pt)),
llmTokenCountApprox: chatLLM
? estimateTokensForFragments(chatLLM, 'user', a.outputFragments, true, 'useLLMAttachmentDrafts')
: null,
};
});
const llmAttachmentDrafts = attachmentDrafts.map((a): LLMAttachmentDraft => ({
attachmentDraft: a,
llmSupportsAllFragments: !a.outputFragments ? false : a.outputFragments.every(op => supportedTypes.includes(op.part.pt)),
llmSupportsTextFragments: !a.outputFragments ? false : a.outputFragments.some(op => supportedTextTypes.includes(op.part.pt)),
llmTokenCountApprox: chatLLM
? estimateTokensForFragments(a.outputFragments, chatLLM, true, 'useLLMAttachmentDrafts')
: null,
}));
// Calculate the overall properties
const canAttachAllFragments = llmAttachmentDrafts.every(a => a.llmSupportsAllFragments);
@@ -76,15 +48,11 @@ export function useLLMAttachmentDrafts(attachmentDrafts: AttachmentDraft[], chat
? llmAttachmentDrafts.reduce((acc, a) => acc + (a.llmTokenCountApprox || 0), 0)
: null;
// [Optimization] Update the ref with the new state
prevStateRef.current = { llmAttachmentDrafts, chatLLM };
return {
llmAttachmentDrafts,
canAttachAllFragments,
canInlineSomeFragments,
llmTokenCountApprox,
};
}, [attachmentDrafts, chatLLM, chatLLMSupportsImages]); // Dependencies for the outer useMemo
}, [attachmentDrafts, chatLLM]);
}

Some files were not shown because too many files have changed in this diff Show More