mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-11 06:00:15 -07:00
Compare commits
141 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 13f502bd76 | |||
| 11055b12ca | |||
| d0ea96eec0 | |||
| 02eafc03f1 | |||
| 33d07a0313 | |||
| 763b852148 | |||
| d5b0617fd7 | |||
| e3ce83674c | |||
| 5cc5df6909 | |||
| 11d8cf8996 | |||
| eae578970e | |||
| e076953c6a | |||
| 5c455591ea | |||
| 19b3dcd927 | |||
| 702e27edbf | |||
| 7c872de9af | |||
| 53b18143e7 | |||
| d812813aac | |||
| 9505b7fd7f | |||
| 9e07822598 | |||
| 6d6604a043 | |||
| 64d5071eb4 | |||
| 4a29ff0b19 | |||
| 6acab83ac5 | |||
| a3391b46ec | |||
| 9d021a0ea9 | |||
| 5b35435136 | |||
| 38b1cd1e4b | |||
| 50e4bf30f2 | |||
| 6f8d6462b9 | |||
| 596bb1ccc6 | |||
| 8023d4fd7e | |||
| 5808c5ae27 | |||
| 0945bc1e74 | |||
| c82ea978da | |||
| 9184e28691 | |||
| 59784af72c | |||
| 8feb1881b9 | |||
| 62747e07f1 | |||
| 934511a21f | |||
| e36b71db9c | |||
| 924cd7018f | |||
| d5e91f9ce7 | |||
| f1ad8cd55e | |||
| d177c73642 | |||
| 011bcf8ccd | |||
| 7d0e5809e1 | |||
| b369148057 | |||
| 2e0105b5ed | |||
| 3f24ade8e6 | |||
| 9cdaf26174 | |||
| 3b2c604615 | |||
| 223689316b | |||
| 6456a0de0c | |||
| 57458fb32f | |||
| b2521060cc | |||
| 13b6a1ba7e | |||
| ec81d802d5 | |||
| f6eca257d6 | |||
| e744b1afcd | |||
| bfcae972f7 | |||
| 360f886c37 | |||
| 305c278e1c | |||
| ccfcf6235f | |||
| 62f7d92bb2 | |||
| f8915141c8 | |||
| 7e1e4af19b | |||
| 439c462a9b | |||
| 95aa71abd6 | |||
| 3c829cbf97 | |||
| 29a31d5ca3 | |||
| 4a8bb24c0f | |||
| 6b6c3afe0c | |||
| fd41388584 | |||
| b418b69dc3 | |||
| e1e2962a02 | |||
| f1662e174f | |||
| a73c55fc1f | |||
| 0aa923a99d | |||
| b75160bb2b | |||
| 3d515102a1 | |||
| b857cc18d8 | |||
| 4737d962db | |||
| 7ba71078a8 | |||
| bee0fa8751 | |||
| 5916dfb08d | |||
| 9d13b03923 | |||
| 48e6385ac7 | |||
| cf664ff486 | |||
| 5ccf8ba128 | |||
| 3cd5917207 | |||
| e2dcca274f | |||
| 7369e898af | |||
| 1e2c12fddb | |||
| 4f7369b940 | |||
| f566049890 | |||
| fbc2da8b09 | |||
| af70b39515 | |||
| e080d72e8a | |||
| fd24e3676a | |||
| 942cd461f5 | |||
| 9567e1cbaa | |||
| 2d5d31268e | |||
| b376608709 | |||
| 551e502caf | |||
| 9fb7fcd22f | |||
| 1cda7d195b | |||
| 4a02923dda | |||
| a8a45631c2 | |||
| eaa755d4ce | |||
| 872396a90e | |||
| 6b3a2772cc | |||
| f378733abe | |||
| 0cf8f0439d | |||
| ab53087b3a | |||
| b50923a3b7 | |||
| 1b4a8da313 | |||
| 31684c2fee | |||
| fedd4b1fda | |||
| a41667f427 | |||
| 021fa3b313 | |||
| b7ca69aa0e | |||
| 1efcadbf46 | |||
| 598a6a8e0b | |||
| 1cd441a2f5 | |||
| 783dc55d02 | |||
| 88418d1ed0 | |||
| 6a74d1900f | |||
| 5566e29bcc | |||
| 1f49195251 | |||
| c5e15ece14 | |||
| 7ceb176d70 | |||
| b93bd1bd0b | |||
| 088133ec37 | |||
| 784766442d | |||
| e014a7c828 | |||
| 224e745a71 | |||
| 89f3e6f955 | |||
| e79b429c5e | |||
| c240f6bd5b | |||
| 33312e0fd9 |
@@ -15,12 +15,14 @@ assignees: enricoros
|
||||
- [ ] Assign all the shipped roadmap Issues
|
||||
- [ ] Assign the relevant [recently closed Isssues](https://github.com/enricoros/big-agi/issues?q=is%3Aclosed+sort%3Aupdated-desc)
|
||||
- Code changes:
|
||||
- [ ] Create a release branch 'release-x.y.z', and commit:
|
||||
- [ ] Create a release branch 'release-x.y.z': `git checkout -b release-1.2.3`
|
||||
- [ ] Create a temporary tag `git tag v1.2.3 && git push opensource --tags`
|
||||
- [ ] Create a [New Draft GitHub Release](https://github.com/enricoros/big-agi/releases/new), and generate the automated changelog (for new contributors)
|
||||
- [ ] Update the release version in package.json, and `npm i`
|
||||
- [ ] Update in-app News [src/apps/news/news.data.tsx](src/apps/news/news.data.tsx)
|
||||
- [ ] Update in-app News [src/apps/news/news.data.tsx](/src/apps/news/news.data.tsx)
|
||||
- [ ] Update the in-app News version number
|
||||
- [ ] Update the readme with the new release
|
||||
- [ ] Copy the highlights to the [changelog](docs/changelog.md)
|
||||
- [ ] Copy the highlights to the [docs/changelog.md](/docs/changelog.md)
|
||||
- Release:
|
||||
- [ ] merge onto main
|
||||
- [ ] verify deployment on Vercel
|
||||
@@ -32,20 +34,32 @@ assignees: enricoros
|
||||
- [ ] Discord announcement
|
||||
- [ ] Twitter announcement
|
||||
|
||||
## Artifacts
|
||||
|
||||
1) first copy and paste the former release `discord announcement`, `news.data.ts`, `changelog.md`, `README.md`
|
||||
2) then copy and paste the milestone and each indivdual issue (content will be downloaded)
|
||||
3) then paste the git changelog 1.2.2...1.2.3
|
||||
## Links
|
||||
Milestone:
|
||||
Former release task:
|
||||
GitHub release:
|
||||
|
||||
### news.data.tsx
|
||||
|
||||
## Artifacts Generation
|
||||
|
||||
1) The following is my opensource application
|
||||
- paste README.md
|
||||
2) I am announcing a new version, 1.7.0. The following were the announcements for 1.6.0. Discord announcement, GitHub Release, in-app news.data.tsx, changelog.md.
|
||||
- paste the former: `discord announcement`, `GitHub release`, `news.data.tsx`, `changelog.md`
|
||||
3) The following is the new data I have for 1.7.0
|
||||
- paste the link to the milestone (closed) and each individual issue (content will be downloaded)
|
||||
- paste the git changelog `git log v1.6.0..v1.7.0 | clip`
|
||||
|
||||
|
||||
### news.data.TSX
|
||||
|
||||
```markdown
|
||||
I need the following from you:
|
||||
|
||||
1. a table summarizing all the new features in 1.2.3, which will be used for the artifacts later
|
||||
1. a table summarizing all the new features in 1.2.3 (description, significance, usefulness, do not link the commit, but have the issue number), which will be used for the artifacts later
|
||||
2. after the table score each feature from a user impact and magnitude point of view
|
||||
3. Improve the table, in decreasing order of importance for features, fixing any detail that's missing
|
||||
3. Improve the table, in decreasing order of importance for features, fixing any detail that's missing, in particular check if there are commits of significance from a user or developer point of view, which are not contained in the table
|
||||
4. I want you then to update the news.data.tsx for the new release
|
||||
```
|
||||
|
||||
|
||||
@@ -7,11 +7,15 @@
|
||||
# To get a newer version, you will need to update the SHA.
|
||||
# You can also reference a tag or branch, but the action may change without warning.
|
||||
|
||||
name: Create and publish a Docker image
|
||||
name: Create and publish Docker images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
branches:
|
||||
- main
|
||||
- main-stable # Trigger on pushes to the main-stable branch
|
||||
tags:
|
||||
- 'v*' # Trigger on version tags (e.g., v1.7.0)
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
@@ -26,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
@@ -40,11 +44,17 @@ jobs:
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=raw,value=development,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/main-stable' }}
|
||||
type=ref,event=tag # Use the tag name as a tag for tag builds
|
||||
type=semver,pattern={{version}} # Generate semantic versioning tags for tag builds
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
@@ -1,6 +1,6 @@
|
||||
# BIG-AGI 🧠✨
|
||||
|
||||
Welcome to big-AGI 👋, the GPT application for Pro users that combines utility,
|
||||
Welcome to big-AGI 👋, the GPT application for professionals that need form, function,
|
||||
simplicity, and speed. Powered by the latest models from 7 vendors, including
|
||||
open-source, `big-AGI` offers best-in-class Voice and Chat with AI Personas,
|
||||
visualizations, coding, drawing, calling, and quite more -- all in a polished UX.
|
||||
@@ -21,7 +21,19 @@ shows the current developments and future ideas.
|
||||
- Got a suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
- Want to contribute? [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
|
||||
|
||||
### What's New in 1.6.0 - Nov 28, 2023 🌟
|
||||
### What's New in 1.7.1 · Dec 11, 2023 · Attachment Theory 🌟
|
||||
|
||||
- **Attachments System Overhaul**: Drag, paste, link, snap, text, images, PDFs and more. [#251](https://github.com/enricoros/big-agi/issues/251)
|
||||
- **Desktop Webcam Capture**: Image capture now available as Labs feature. [#253](https://github.com/enricoros/big-agi/issues/253)
|
||||
- **Independent Browsing**: Full browsing support with Browserless. [Learn More](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
|
||||
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
|
||||
- Optimized Voice Input and Performance
|
||||
- Latest Ollama and Oobabooga models
|
||||
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
|
||||
- [1.7.1]: Improved Ollama chats. [#270](https://github.com/enricoros/big-agi/issues/270)
|
||||
|
||||
### What's New in 1.6.0 - Nov 28, 2023
|
||||
|
||||
- **Web Browsing**: Download web pages within chats - [browsing guide](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Branching Discussions**: Create new conversations from any message
|
||||
@@ -82,7 +94,8 @@ the [past releases changelog](docs/changelog.md).
|
||||

|
||||

|
||||
|
||||
Clone this repo, install the dependencies, and run the development server:
|
||||
Clone this repo, install the dependencies (all locally), and run the development server (which auto-watches the
|
||||
files for changes):
|
||||
|
||||
```bash
|
||||
git clone https://github.com/enricoros/big-agi.git
|
||||
@@ -91,15 +104,23 @@ npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The app will be running on `http://localhost:3000`
|
||||
The development app will be running on `http://localhost:3000`. Development builds have the advantage of not requiring
|
||||
a build step, but can be slower than production builds. Also, development builds won't have timeout on edge functions.
|
||||
|
||||
Integrations:
|
||||
## 🌐 Deploy manually
|
||||
|
||||
* Local models: Ollama, Oobabooga, LocalAi, etc.
|
||||
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
|
||||
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
|
||||
The _production_ build of the application is optimized for performance and is performed by the `npm run build` command,
|
||||
after installing the required dependencies.
|
||||
|
||||
```bash
|
||||
# .. repeat the steps above up to `npm install`, then:
|
||||
npm run build
|
||||
npm run start --port 3000
|
||||
```
|
||||
|
||||
The app will be running on the specified port, e.g. `http://localhost:3000`.
|
||||
|
||||
Want to deploy with username/password? See the [Authentication](docs/deploy-authentication.md) guide.
|
||||
|
||||
## 🐳 Deploy with Docker
|
||||
|
||||
@@ -115,7 +136,7 @@ docker run -d -p 3000:3000 big-agi
|
||||
Or run the official container:
|
||||
|
||||
- manually: `docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi`
|
||||
- or, with docker-compose: `docker-compose up`
|
||||
- or, with docker-compose: `docker-compose up` or see [the documentation](docs/deploy-docker.md) for a composer file with integrated browsing
|
||||
|
||||
## ☁️ Deploy on Cloudflare Pages
|
||||
|
||||
@@ -127,7 +148,13 @@ Create your GitHub fork, create a Vercel project over that fork, and deploy it.
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
|
||||
|
||||
## Integrations:
|
||||
|
||||
* Local models: Ollama, Oobabooga, LocalAi, etc.
|
||||
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
|
||||
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
|
||||
|
||||
<br/>
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
# Very simple docker-compose file to run the app on http://localhost:3000 (or http://127.0.0.1:3000).
|
||||
#
|
||||
# For more examples, such runnin big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
|
||||
+16
-4
@@ -5,12 +5,24 @@ by release.
|
||||
|
||||
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
### 1.7.0 - Dec 2023
|
||||
### 1.8.0 - Dec 2023
|
||||
|
||||
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
|
||||
- milestone: [1.7.0](https://github.com/enricoros/big-agi/milestone/7)
|
||||
- milestone: [1.8.0](https://github.com/enricoros/big-agi/milestone/8)
|
||||
|
||||
### ✨ What's New in 1.6.0 👊 - Nov 28, 2023
|
||||
### What's New in 1.7.1 · Dec 11, 2023 · Attachment Theory 🌟
|
||||
|
||||
- **Attachments System Overhaul**: Drag, paste, link, snap, text, images, PDFs and more. [#251](https://github.com/enricoros/big-agi/issues/251)
|
||||
- **Desktop Webcam Capture**: Image capture now available as Labs feature. [#253](https://github.com/enricoros/big-agi/issues/253)
|
||||
- **Independent Browsing**: Full browsing support with Browserless. [Learn More](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
|
||||
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
|
||||
- Optimized Voice Input and Performance
|
||||
- Latest Ollama and Oobabooga models
|
||||
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
|
||||
- [1.7.1]: Improved Ollama chats. [#270](https://github.com/enricoros/big-agi/issues/270)
|
||||
|
||||
### What's New in 1.6.0 - Nov 28, 2023 · Surf's Up
|
||||
|
||||
- **Web Browsing**: Download web pages within chats - [browsing guide](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Branching Discussions**: Create new conversations from any message
|
||||
@@ -20,7 +32,7 @@ by release.
|
||||
- **New Features**: Anthropic Claude 2.1, `/help` command, and Flattener tool
|
||||
- **For Developers**: Code quality upgrades and snackbar notifications
|
||||
|
||||
### What's New in 1.5.0 - Nov 19, 2023
|
||||
### What's New in 1.5.0 - Nov 19, 2023 · Loaded
|
||||
|
||||
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
|
||||
- **Visualization Tool**: Create data representations with our new visualization capabilities
|
||||
|
||||
+54
-31
@@ -3,25 +3,68 @@
|
||||
Allows users to load web pages across various components of `big-AGI`. This feature is supported by Puppeteer-based
|
||||
browsing services, which are the most common way to render web pages in a headless environment.
|
||||
|
||||
Once configured, the Browsing service provides this functionality:
|
||||
|
||||
- **Paste a URL**: Simply paste/drag a URL into the chat, and `big-AGI` will load and attach the page (very effective)
|
||||
- **Use /browse**: Type `/browse [URL]` in the chat to command `big-AGI` to load the specified web page
|
||||
- **ReAct**: ReAct will automatically use the `loadURL()` function whenever a URL is encountered
|
||||
|
||||
First of all, you need to procure a Puppteer web browsing service endpoint. `big-AGI` supports services like:
|
||||
|
||||
- [BrightData](https://brightdata.com/products/scraping-browser) Scraping Browser
|
||||
- [Cloudflare](https://developers.cloudflare.com/browser-rendering/) Browser Rendering, or
|
||||
- any other Puppeteer-based service that provides a WebSocket endpoint (WSS)
|
||||
- **including [your own browser](#your-own-chrome-browser)**
|
||||
| Service | Working | Type | Location | Special Features |
|
||||
|--------------------------------------------------------------------------------------|---------|-------------|----------------|---------------------------------------------|
|
||||
| [BrightData Scraping Browser](https://brightdata.com/products/scraping-browser) | Yes | Proprietary | Cloud | Advanced scraping tools, global IP pool |
|
||||
| [Cloudflare Browser Rendering](https://developers.cloudflare.com/browser-rendering/) | ? | Proprietary | Cloud | Integrated CDN, optimized browser rendering |
|
||||
| ⬇️ [Browserless 2.0](#-browserless-20) | Okay | OpenSource | Local (Docker) | Parallelism, debug viewer, advanced APIs |
|
||||
| ⬇️ [Your Chrome Browser (ALPHA)](#-your-own-chrome-browser) | Alpha | Proprietary | Local (Chrome) | Personal, experimental use (ALPHA!) |
|
||||
| other Puppeteer-based WSS Services | ? | Varied | Cloud/Local | Service-specific features |
|
||||
|
||||
## Configuration
|
||||
|
||||
1. **Procure an Endpoint**: Ensure that your browsing service is running and has a WebSocket endpoint available:
|
||||
- this mustbe in the form: `wss://${auth}@{some host}:{port}`
|
||||
1. **Procure an Endpoint**
|
||||
- Ensure that your browsing service is running (remote or local) and has a WebSocket endpoint available
|
||||
- Write down the address: `wss://${auth}@{some host}:{port}`, or ws:// for local services on your machine
|
||||
|
||||
2. **Configure `big-AGI`**: navigate to **Preferences** > **Tools** > **Browse** and enter the 'wss://...' connection
|
||||
string provided by your browsing service
|
||||
2. **Configure `big-AGI`**
|
||||
- navigate to **Preferences** > **Tools** > **Browse**
|
||||
- Enter the 'wss://...' connection string provided by your browsing service
|
||||
|
||||
3. **Enable Features**: Choose which browse-related features you want to enable:
|
||||
- **Attach URLs**: Automatically load and attach a page when pasting a URL into the composer
|
||||
- **/browse Command**: Use the `/browse` command in the chat to load a web page
|
||||
- **ReAct**: Enable the `loadURL()` function in ReAct for advanced interactions
|
||||
- **Attach URLs**: Automatically load and attach a page when pasting a URL into the composer
|
||||
- **/browse Command**: Use the `/browse` command in the chat to load a web page
|
||||
- **ReAct**: Enable the `loadURL()` function in ReAct for advanced interactions
|
||||
|
||||
### 🌐 Browserless 2.0
|
||||
|
||||
[Browserless 2.0](https://github.com/browserless/browserless) is a Docker-based service that provides a headless
|
||||
browsing experience compatible with `big-AGI`. An open-source solution that simplifies web automation tasks,
|
||||
in a scalable manner.
|
||||
|
||||
Launch Browserless with:
|
||||
|
||||
```bash
|
||||
docker run -p 9222:3000 browserless/chrome:latest
|
||||
```
|
||||
|
||||
Now you can use the following connection string in `big-AGI`: `ws://127.0.0.1:9222`.
|
||||
You can also browse to [http://127.0.0.1:9222](http://127.0.0.1:9222) to see the Browserless debug viewer
|
||||
and configure some options.
|
||||
|
||||
Note: if you are using `docker-compose`, please see the
|
||||
[docker/docker-compose-browserless.yaml](docker/docker-compose-browserless.yaml) file for an example
|
||||
on how to run `big-AGI` and Browserless simultaneously in a single application.
|
||||
|
||||
### 🌐 Your own Chrome browser
|
||||
|
||||
***EXPERIMENTAL - UNTESTED*** - You can use your own Chrome browser as a browsing service, by configuring it to expose
|
||||
a WebSocket endpoint.
|
||||
|
||||
- close all the Chrome instances (on Windows, check the Task Manager if still running)
|
||||
- start Chrome with the following command line options (on Windows, you can edit the shortcut properties):
|
||||
- `--remote-debugging-port=9222`
|
||||
- go to http://localhost:9222/json/version and copy the `webSocketDebuggerUrl` value
|
||||
- it should be something like: `ws://localhost:9222/...`
|
||||
- paste the value into the Endpoint configuration (see point 2 in the configuration)
|
||||
|
||||
### Server-Side Configuration
|
||||
|
||||
@@ -33,26 +76,6 @@ Always deploy your own user authentication, authorization and security solution.
|
||||
route that provides browsing service, shall be secured with a user authentication and authorization solution,
|
||||
to prevent unauthorized access to the browsing service.
|
||||
|
||||
### Your own Chrome browser
|
||||
|
||||
***EXPERIMENTAL - UNTESTED*** - You can use your own Chrome browser as a browsing service, by configuring it to expose
|
||||
a WebSocket endpoint.
|
||||
|
||||
- close all the Chrome instances (on Windows, check the Task Manager if still running)
|
||||
- start Chrome with the following command line options (on Windows, you can edit the shortcut properties):
|
||||
- `--remote-debugging-port=9222`
|
||||
- go to http://localhost:9222/json/version and copy the `webSocketDebuggerUrl` value
|
||||
- it should be something like: `ws://localhost:9222/...`
|
||||
- paste the value into the Endpoint configuration (see point 2 above)
|
||||
|
||||
## Usage
|
||||
|
||||
Once configured, you can start using the browse functionality:
|
||||
|
||||
- **Paste a URL**: Simply paste a URL into the chat, and `big-AGI` will load the page if the Attach URLs feature is enabled
|
||||
- **Use /browse**: Type `/browse [URL]` in the chat to command `big-AGI` to load the specified web page
|
||||
- **ReAct**: ReAct will automatically use the `loadURL()` function whenever a URL is encountered
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter any issues or have questions about configuring the browse functionality, join our community on Discord for support and discussions.
|
||||
|
||||
@@ -4,7 +4,7 @@ Integrate local Large Language Models (LLMs) with
|
||||
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
|
||||
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
|
||||
|
||||
_Last updated on Nov 7, 2023_
|
||||
_Last updated on Dec 7, 2023_
|
||||
|
||||
### Components
|
||||
|
||||
@@ -20,26 +20,31 @@ This guide assumes that **big-AGI** is already installed on your system. Note th
|
||||
|
||||
### Text-web-ui Installation & Configuration:
|
||||
|
||||
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation).
|
||||
- Download the one-click installer, extract it, and double-click on "start" - ~10 minutes
|
||||
- Close it afterwards as we need to modify the startup flags
|
||||
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation):
|
||||
- Follow the instructions in the official page (basicall clone the repo and run a script) [~10 minutes]
|
||||
- Stop the Web UI as we need to modify the startup flags to enable the OpenAI API
|
||||
2. Enable the **openai extension**
|
||||
- Edit `CMD_FLAGS.txt`
|
||||
- Make sure that `--listen --extensions openai` is present and uncommented
|
||||
- Make sure that `--listen --api` is present and uncommented
|
||||
3. Restart text-generation-webui
|
||||
- Double-click on "start"
|
||||
- You should see something like:
|
||||
```
|
||||
2023-11-07 21:24:26 INFO:Loading the extension "openai"...
|
||||
2023-11-07 21:24:27 INFO:OpenAI compatible API URL:
|
||||
2023-12-07 21:51:21 INFO:Loading the extension "openai"...
|
||||
2023-12-07 21:51:21 INFO:OpenAI-compatible API URL:
|
||||
|
||||
http://0.0.0.0:5000/v1
|
||||
http://0.0.0.0:5000
|
||||
...
|
||||
INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)
|
||||
Running on local URL: http://0.0.0.0:7860
|
||||
```
|
||||
- The OpenAI API is now running on port 5000, on both localhost (127.0.0.1) and your network IP address
|
||||
- This shows that:
|
||||
- The Web UI is running on port 7860: http://127.0.0.1:7860
|
||||
- **The OpenAI API is running on port 5000: http://127.0.0.1:5000**
|
||||
4. Load your first model
|
||||
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
|
||||
- Switch to the **Model** tab
|
||||
- Download, for instance, `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - 4.3 GB
|
||||
- Download, for instance, `TheBloke/Llama-2-7B-Chat-GPTQ`
|
||||
- Select the model once it's loaded
|
||||
|
||||
### Integrating text-web-ui with big-AGI:
|
||||
@@ -51,4 +56,6 @@ This guide assumes that **big-AGI** is already installed on your system. Note th
|
||||
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
|
||||
- Select model & Chat
|
||||
|
||||

|
||||
|
||||
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
|
||||
+10
-5
@@ -5,15 +5,20 @@ This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama
|
||||
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
|
||||
editing tools, models switching, personas, and more.
|
||||
|
||||
_Last updated Dec 11, 2023_
|
||||
|
||||

|
||||
|
||||
## Quick Integration Guide
|
||||
|
||||
1. **Ensure Ollama API Server is Running**: Before starting, make sure your Ollama API server is up and running.
|
||||
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**.
|
||||
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`).
|
||||
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models.
|
||||
5. **Start Using AI Personas**: Select an Ollama model and begin interacting with AI personas tailored to your needs.
|
||||
1. **Ensure Ollama API Server is Running**: Follow the official instructions to get Ollama up and running on your machine
|
||||
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**
|
||||
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`)
|
||||
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models
|
||||
> Optional: use the Ollama Admin interface to see which models are available and 'Pull' them in your local machine. Note
|
||||
that this operation will likely timeout due to Edge Functions timeout on the big-AGI server while pulling, and
|
||||
you'll have to press the 'Pull' button again, until a green message appears.
|
||||
5. **Chat with Ollama models**: select an Ollama model and begin chatting with AI personas
|
||||
|
||||
### Ollama: installation and Setup
|
||||
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
# Authentication
|
||||
|
||||
`big-AGI` does not come with built-in authentication. To secure your deployment, you can implement authentication
|
||||
in one of the following ways:
|
||||
|
||||
1. Build `big-AGI` with support for ⬇️ [HTTP Authentication](#http-authentication)
|
||||
2. Utilize user authentication features provided by your ⬇️ [cloud deployment platform](#cloud-deployments-authentication)
|
||||
3. Develop a custom authentication solution
|
||||
|
||||
<br/>
|
||||
|
||||
### HTTP Authentication
|
||||
|
||||
[HTTP Basic Authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication) is a simple method
|
||||
to secure your application.
|
||||
|
||||
To enable it in `big-AGI`, you **must manually build the application**:
|
||||
|
||||
- Build `big-AGI` with HTTP authentication enabled:
|
||||
- Clone the repository
|
||||
- Rename `middleware_BASIC_AUTH.ts` to `middleware.ts`
|
||||
- Build: usual simple build procedure (e.g. [Deploy manually](../README.md#-deploy-manually) or [Deploying with Docker](deploy-docker.md))
|
||||
|
||||
- Configure the following [environment variables](environment-variables.md) before launching `big-AGI`:
|
||||
```dotenv
|
||||
HTTP_BASIC_AUTH_USERNAME=<your username>
|
||||
HTTP_BASIC_AUTH_PASSWORD=<your password>
|
||||
```
|
||||
|
||||
- Start the application 🔒
|
||||
|
||||
<br/>
|
||||
|
||||
### Cloud Deployments Authentication
|
||||
|
||||
> This approach allows you to enable authentication without rebuilding the application by using the features
|
||||
> provided by your cloud platform to manage user accounts and access.
|
||||
|
||||
Many cloud deployment platforms offer built-in authentication mechanisms. Refer to the platform's documentation
|
||||
for setup instructions:
|
||||
|
||||
1. [CloudFlare Access / Zero Trust](https://www.cloudflare.com/zero-trust/products/access/)
|
||||
2. [Vercel Authentication](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/vercel-authentication)
|
||||
3. [Vercel Password Protection](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/password-protection)
|
||||
4. Let us know when you test more solutions (Heroku, AWS IAM, Google IAP, etc.)
|
||||
@@ -0,0 +1,31 @@
|
||||
# This file is used to run `big-AGI` and `browserless` with Docker Compose.
|
||||
#
|
||||
# The two containers are linked together and `big-AGI` is configured to use `browserless`
|
||||
# as its Puppeteer endpoint (from the containers intranet, it is available browserless:3000).
|
||||
#
|
||||
# From your host, you can access big-AGI on http://127.0.0.1:3000 and browserless on http://127.0.0.1:9222.
|
||||
#
|
||||
# To start the containers, run:
|
||||
# docker-compose -f docs/docker/docker-compose-browserless.yaml up
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:main
|
||||
ports:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
depends_on:
|
||||
- browserless
|
||||
|
||||
browserless:
|
||||
image: browserless/chrome:latest
|
||||
ports:
|
||||
- "9222:3000" # Map host's port 9222 to container's port 3000
|
||||
environment:
|
||||
- MAX_CONCURRENT_SESSIONS=10
|
||||
@@ -9,10 +9,6 @@ which take place over _defaults_. This file is kept in sync with [`../src/server
|
||||
|
||||
Environment variables can be set by creating a `.env` file in the root directory of the project.
|
||||
|
||||
> For Docker deployment, ensure all necessary environment variables are set **both during build and run**.
|
||||
> If the Docker container is built without setting environment variables, the frontend UI will be unaware
|
||||
> of them, despite the backend being able to use them at runtime.
|
||||
|
||||
The following is an example `.env` for copy-paste convenience:
|
||||
|
||||
```bash
|
||||
@@ -45,6 +41,13 @@ GOOGLE_CLOUD_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
# Browse
|
||||
PUPPETEER_WSS_ENDPOINT=
|
||||
|
||||
# Backend Analytics
|
||||
BACKEND_ANALYTICS=
|
||||
|
||||
# Backend HTTP Basic Authentication
|
||||
HTTP_BASIC_AUTH_USERNAME=
|
||||
HTTP_BASIC_AUTH_PASSWORD=
|
||||
```
|
||||
|
||||
## Variables Documentation
|
||||
@@ -95,19 +98,23 @@ It is currently supported for:
|
||||
|
||||
Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:-------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
|
||||
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
|
||||
| **Browse** | |
|
||||
| `PUPPETEER_WSS_ENDPOINT` | Puppeteer WebSocket endpoint - used for browsing, etc. |
|
||||
| Variable | Description |
|
||||
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
|
||||
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
|
||||
| **Browse** | |
|
||||
| `PUPPETEER_WSS_ENDPOINT` | Puppeteer WebSocket endpoint - used for browsing, etc. |
|
||||
| **Backend** | |
|
||||
| `BACKEND_ANALYTICS` | Semicolon-separated list of analytics flags (see backend.analytics.ts). Flags: `domain` logs the responding domain. |
|
||||
| `HTTP_BASIC_AUTH_USERNAME` | Username for HTTP Basic Authentication. See the [Authentication](deploy-authentication.md) guide. |
|
||||
| `HTTP_BASIC_AUTH_PASSWORD` | Password for HTTP Basic Authentication. |
|
||||
|
||||
---
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 730 KiB |
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Middleware to protect `big-AGI` with HTTP Basic Authentication
|
||||
*
|
||||
* For more information on how to deploy with HTTP Basic Authentication, see:
|
||||
* - [deploy-authentication.md](docs/deploy-authentication.md)
|
||||
*/
|
||||
|
||||
import type { NextRequest } from 'next/server';
|
||||
import { NextResponse } from 'next/server';
|
||||
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export function middleware(request: NextRequest) {
|
||||
|
||||
// Validate deployment configuration
|
||||
if (!process.env.HTTP_BASIC_AUTH_USERNAME || !process.env.HTTP_BASIC_AUTH_PASSWORD) {
|
||||
console.warn('HTTP Basic Authentication is enabled but not configured');
|
||||
return new Response('Unauthorized/Unconfigured', unauthResponse);
|
||||
}
|
||||
|
||||
// Request client authentication if no credentials are provided
|
||||
const authHeader = request.headers.get('authorization');
|
||||
if (!authHeader?.startsWith('Basic '))
|
||||
return new Response('Unauthorized', unauthResponse);
|
||||
|
||||
// Request authentication if credentials are invalid
|
||||
const base64Credentials = authHeader.split(' ')[1];
|
||||
const credentials = Buffer.from(base64Credentials, 'base64').toString('ascii');
|
||||
const [username, password] = credentials.split(':');
|
||||
if (
|
||||
!username || !password ||
|
||||
username !== process.env.HTTP_BASIC_AUTH_USERNAME ||
|
||||
password !== process.env.HTTP_BASIC_AUTH_PASSWORD
|
||||
)
|
||||
return new Response('Unauthorized', unauthResponse);
|
||||
|
||||
return NextResponse.next();
|
||||
}
|
||||
|
||||
|
||||
// Response to send when authentication is required
|
||||
const unauthResponse: ResponseInit = {
|
||||
status: 401,
|
||||
headers: {
|
||||
'WWW-Authenticate': 'Basic realm="Secure big-AGI"',
|
||||
},
|
||||
};
|
||||
|
||||
export const config = {
|
||||
matcher: [
|
||||
// Include root
|
||||
'/',
|
||||
// Include pages
|
||||
'/(call|index|news|personas|link)(.*)',
|
||||
// Include API routes
|
||||
'/api(.*)',
|
||||
// Note: this excludes _next, /images etc..
|
||||
],
|
||||
};
|
||||
Generated
+2
-2
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "1.6.0",
|
||||
"version": "1.7.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "big-agi",
|
||||
"version": "1.6.0",
|
||||
"version": "1.7.1",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "1.6.0",
|
||||
"version": "1.7.1",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
|
||||
@@ -75,11 +75,11 @@ function AppShareTarget() {
|
||||
if (intentURL) {
|
||||
setIsDownloading(true);
|
||||
callBrowseFetchPage(intentURL)
|
||||
.then(pageContent => {
|
||||
if (pageContent)
|
||||
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + pageContent + '\n```\n');
|
||||
.then(page => {
|
||||
if (page.stopReason !== 'error')
|
||||
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + page.content + '\n```\n');
|
||||
else
|
||||
setErrorMessage('Could not read any data');
|
||||
setErrorMessage('Could not read any data' + page.error ? ': ' + page.error : '');
|
||||
})
|
||||
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
|
||||
.finally(() => setIsDownloading(false));
|
||||
|
||||
@@ -113,7 +113,7 @@ export function CallUI(props: {
|
||||
setCallMessages(messages => [...messages, createDMessage('user', transcribed)]);
|
||||
}
|
||||
}, []);
|
||||
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000, false);
|
||||
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000);
|
||||
|
||||
// derived state
|
||||
const isRinging = stage === 'ring';
|
||||
|
||||
+45
-13
@@ -12,15 +12,16 @@ import { TradeConfig, TradeModal } from '~/modules/trade/TradeModal';
|
||||
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { useBrowseStore } from '~/modules/browse/store-module-browsing';
|
||||
import { useModelsStore } from '~/modules/llms/store-llms';
|
||||
import { useChatLLM, useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
|
||||
import { GlobalShortcutItem, ShortcutKeyName, useGlobalShortcuts } from '~/common/components/useGlobalShortcut';
|
||||
import { addSnackbar, removeSnackbar } from '~/common/components/useSnackbarsStore';
|
||||
import { createDMessage, DConversationId, DMessage, getConversation, useConversation } from '~/common/state/store-chats';
|
||||
import { GlobalShortcutItem, ShortcutKeyName, useGlobalShortcuts } from '~/common/components/useGlobalShortcut';
|
||||
import { useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
import { openLayoutLLMOptions, useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
|
||||
import type { ComposerOutputMultiPart } from './components/composer/composer.types';
|
||||
import { ChatDrawerItemsMemo } from './components/applayout/ChatDrawerItems';
|
||||
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
|
||||
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
|
||||
@@ -57,6 +58,8 @@ export function AppChat() {
|
||||
const composerTextAreaRef = React.useRef<HTMLTextAreaElement>(null);
|
||||
|
||||
// external state
|
||||
const { chatLLM } = useChatLLM();
|
||||
|
||||
const {
|
||||
chatPanes,
|
||||
focusedConversationId,
|
||||
@@ -180,13 +183,33 @@ export function AppChat() {
|
||||
setMessages(conversationId, history);
|
||||
}, [focusedSystemPurposeId, setMessages]);
|
||||
|
||||
const handleComposerNewMessage = async (chatModeId: ChatModeId, conversationId: DConversationId, userText: string) => {
|
||||
const handleComposerAction = (chatModeId: ChatModeId, conversationId: DConversationId, multiPartMessage: ComposerOutputMultiPart): boolean => {
|
||||
|
||||
// validate inputs
|
||||
if (multiPartMessage.length !== 1 || multiPartMessage[0].type !== 'text-block') {
|
||||
addSnackbar({
|
||||
key: 'chat-composer-action-invalid',
|
||||
message: 'Only a single text part is supported for now.',
|
||||
type: 'issue',
|
||||
overrides: {
|
||||
autoHideDuration: 2000,
|
||||
},
|
||||
});
|
||||
return false;
|
||||
}
|
||||
const userText = multiPartMessage[0].text;
|
||||
|
||||
// find conversation
|
||||
const conversation = getConversation(conversationId);
|
||||
if (conversation)
|
||||
return await _handleExecute(chatModeId, conversationId, [
|
||||
...conversation.messages,
|
||||
createDMessage('user', userText),
|
||||
]);
|
||||
if (!conversation)
|
||||
return false;
|
||||
|
||||
// start execution (async)
|
||||
void _handleExecute(chatModeId, conversationId, [
|
||||
...conversation.messages,
|
||||
createDMessage('user', userText),
|
||||
]);
|
||||
return true;
|
||||
};
|
||||
|
||||
const handleConversationExecuteHistory = async (conversationId: DConversationId, history: DMessage[]) =>
|
||||
@@ -291,7 +314,14 @@ export function AppChat() {
|
||||
|
||||
// Shortcuts
|
||||
|
||||
const handleOpenChatLlmOptions = React.useCallback(() => {
|
||||
const { chatLLMId } = useModelsStore.getState();
|
||||
if (!chatLLMId) return;
|
||||
openLayoutLLMOptions(chatLLMId);
|
||||
}, []);
|
||||
|
||||
const shortcuts = React.useMemo((): GlobalShortcutItem[] => [
|
||||
['o', true, true, false, handleOpenChatLlmOptions],
|
||||
['r', true, true, false, handleMessageRegenerateLast],
|
||||
['n', true, false, true, handleConversationNew],
|
||||
['b', true, false, true, () => isFocusedChatEmpty || focusedConversationId && handleConversationBranch(focusedConversationId, null)],
|
||||
@@ -299,7 +329,7 @@ export function AppChat() {
|
||||
['d', true, false, true, () => focusedConversationId && handleConversationDelete(focusedConversationId, false)],
|
||||
[ShortcutKeyName.Left, true, false, true, () => handleNavigateHistory('back')],
|
||||
[ShortcutKeyName.Right, true, false, true, () => handleNavigateHistory('forward')],
|
||||
], [focusedConversationId, handleConversationBranch, handleConversationDelete, handleConversationNew, handleMessageRegenerateLast, handleNavigateHistory, isFocusedChatEmpty]);
|
||||
], [focusedConversationId, handleConversationBranch, handleConversationDelete, handleConversationNew, handleMessageRegenerateLast, handleNavigateHistory, handleOpenChatLlmOptions, isFocusedChatEmpty]);
|
||||
useGlobalShortcuts(shortcuts);
|
||||
|
||||
|
||||
@@ -357,6 +387,7 @@ export function AppChat() {
|
||||
|
||||
<ChatMessageList
|
||||
conversationId={_conversationId}
|
||||
chatLLMContextTokens={chatLLM?.contextTokens}
|
||||
isMessageSelectionMode={isMessageSelectionMode}
|
||||
setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
@@ -395,10 +426,11 @@ export function AppChat() {
|
||||
</Box>
|
||||
|
||||
<Composer
|
||||
chatLLM={chatLLM}
|
||||
composerTextAreaRef={composerTextAreaRef}
|
||||
conversationId={focusedConversationId}
|
||||
isDeveloperMode={focusedSystemPurposeId === 'Developer'}
|
||||
composerTextAreaRef={composerTextAreaRef}
|
||||
onNewMessage={handleComposerNewMessage}
|
||||
onAction={handleComposerAction}
|
||||
sx={{
|
||||
zIndex: 21, // position: 'sticky', bottom: 0,
|
||||
backgroundColor: 'background.surface',
|
||||
@@ -427,7 +459,7 @@ export function AppChat() {
|
||||
{/* [confirmation] Reset Conversation */}
|
||||
{!!clearConversationId && <ConfirmationModal
|
||||
open onClose={() => setClearConversationId(null)} onPositive={handleConfirmedClearConversation}
|
||||
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
|
||||
confirmationText={'Are you sure you want to discard all messages?'} positiveActionText={'Clear conversation'}
|
||||
/>}
|
||||
|
||||
{/* [confirmation] Delete All */}
|
||||
|
||||
@@ -4,8 +4,7 @@ import { shallow } from 'zustand/shallow';
|
||||
import { Box, List } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { useChatLLM } from '~/modules/llms/store-llms';
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
|
||||
import { ShortcutKeyName, useGlobalShortcut } from '~/common/components/useGlobalShortcut';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
@@ -24,6 +23,7 @@ import { useChatShowSystemMessages } from '../store-app-chat';
|
||||
*/
|
||||
export function ChatMessageList(props: {
|
||||
conversationId: DConversationId | null,
|
||||
chatLLMContextTokens?: number,
|
||||
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
onConversationBranch: (conversationId: DConversationId, messageId: string) => void,
|
||||
onConversationExecuteHistory: (conversationId: DConversationId, history: DMessage[]) => void,
|
||||
@@ -40,20 +40,21 @@ export function ChatMessageList(props: {
|
||||
|
||||
// external state
|
||||
const [showSystemMessages] = useChatShowSystemMessages();
|
||||
const { conversationMessages, editMessage, deleteMessage, historyTokenCount } = useChatStore(state => {
|
||||
const { conversationMessages, historyTokenCount, editMessage, deleteMessage, setMessages } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
conversationMessages: conversation ? conversation.messages : [],
|
||||
editMessage: state.editMessage, deleteMessage: state.deleteMessage,
|
||||
historyTokenCount: conversation ? conversation.tokenCount : 0,
|
||||
deleteMessage: state.deleteMessage,
|
||||
editMessage: state.editMessage,
|
||||
setMessages: state.setMessages,
|
||||
};
|
||||
}, shallow);
|
||||
const { chatLLM } = useChatLLM();
|
||||
const { mayWork: isImaginable } = useCapabilityProdia();
|
||||
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
|
||||
|
||||
// derived state
|
||||
const { conversationId, onConversationExecuteHistory, onConversationBranch, onTextDiagram, onTextImagine, onTextSpeak } = props;
|
||||
const { conversationId, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine, onTextSpeak } = props;
|
||||
|
||||
|
||||
// text actions
|
||||
@@ -76,6 +77,14 @@ export function ChatMessageList(props: {
|
||||
}
|
||||
}, [conversationId, onConversationExecuteHistory]);
|
||||
|
||||
const handleConversationTruncate = React.useCallback((messageId: string) => {
|
||||
const messages = getConversation(conversationId)?.messages;
|
||||
if (conversationId && messages) {
|
||||
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + 1);
|
||||
setMessages(conversationId, truncatedHistory);
|
||||
}
|
||||
}, [conversationId, setMessages]);
|
||||
|
||||
const handleMessageDelete = React.useCallback((messageId: string) => {
|
||||
conversationId && deleteMessage(conversationId, messageId);
|
||||
}, [conversationId, deleteMessage]);
|
||||
@@ -178,7 +187,7 @@ export function ChatMessageList(props: {
|
||||
<CleanerMessage
|
||||
key={'sel-' + message.id}
|
||||
message={message}
|
||||
isBottom={idx === 0} remainingTokens={(chatLLM ? chatLLM.contextTokens : 0) - historyTokenCount}
|
||||
isBottom={idx === 0} remainingTokens={(props.chatLLMContextTokens || 0) - historyTokenCount}
|
||||
selected={selectedMessages.has(message.id)} onToggleSelected={handleSelectMessage}
|
||||
/>
|
||||
|
||||
@@ -192,6 +201,7 @@ export function ChatMessageList(props: {
|
||||
isImagining={isImagining} isSpeaking={isSpeaking}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onConversationRestartFrom={handleConversationRestartFrom}
|
||||
onConversationTruncate={handleConversationTruncate}
|
||||
onMessageDelete={handleMessageDelete}
|
||||
onMessageEdit={handleMessageEdit}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
|
||||
@@ -14,8 +14,8 @@ import { openLayoutLLMOptions, openLayoutModelsSetup } from '~/common/layout/sto
|
||||
|
||||
function AppBarLLMDropdown(props: {
|
||||
llms: DLLM[],
|
||||
llmId: DLLMId | null,
|
||||
setLlmId: (llmId: DLLMId | null) => void,
|
||||
chatLlmId: DLLMId | null,
|
||||
setChatLlmId: (llmId: DLLMId | null) => void,
|
||||
placeholder?: string,
|
||||
}) {
|
||||
|
||||
@@ -23,7 +23,7 @@ function AppBarLLMDropdown(props: {
|
||||
const llmItems: DropdownItems = {};
|
||||
let prevSourceId: DModelSourceId | null = null;
|
||||
for (const llm of props.llms) {
|
||||
if (!llm.hidden || llm.id === props.llmId) {
|
||||
if (!llm.hidden || llm.id === props.chatLlmId) {
|
||||
if (!prevSourceId || llm.sId !== prevSourceId) {
|
||||
if (prevSourceId)
|
||||
llmItems[`sep-${llm.id}`] = { type: 'separator', title: llm.sId };
|
||||
@@ -33,22 +33,25 @@ function AppBarLLMDropdown(props: {
|
||||
}
|
||||
}
|
||||
|
||||
const handleChatLLMChange = (_event: any, value: DLLMId | null) => value && props.setLlmId(value);
|
||||
const handleChatLLMChange = (_event: any, value: DLLMId | null) => value && props.setChatLlmId(value);
|
||||
|
||||
const handleOpenLLMOptions = () => props.llmId && openLayoutLLMOptions(props.llmId);
|
||||
const handleOpenLLMOptions = () => props.chatLlmId && openLayoutLLMOptions(props.chatLlmId);
|
||||
|
||||
|
||||
return (
|
||||
<AppBarDropdown
|
||||
items={llmItems}
|
||||
value={props.llmId} onChange={handleChatLLMChange}
|
||||
value={props.chatLlmId} onChange={handleChatLLMChange}
|
||||
placeholder={props.placeholder || 'Models …'}
|
||||
appendOption={<>
|
||||
|
||||
{props.llmId && (
|
||||
{props.chatLlmId && (
|
||||
<ListItemButton key='menu-opt' onClick={handleOpenLLMOptions}>
|
||||
<ListItemDecorator><SettingsIcon color='success' /></ListItemDecorator>
|
||||
Options
|
||||
<Box sx={{ flexGrow: 1, display: 'flex', justifyContent: 'space-between', gap: 1 }}>
|
||||
Options
|
||||
<KeyStroke combo='Ctrl + Shift + O' />
|
||||
</Box>
|
||||
</ListItemButton>
|
||||
)}
|
||||
|
||||
@@ -74,7 +77,7 @@ export function useChatLLMDropdown() {
|
||||
}), shallow);
|
||||
|
||||
const chatLLMDropdown = React.useMemo(
|
||||
() => <AppBarLLMDropdown llms={llms} llmId={chatLLMId} setLlmId={setChatLLMId} />,
|
||||
() => <AppBarLLMDropdown llms={llms} chatLlmId={chatLLMId} setChatLlmId={setChatLLMId} />,
|
||||
[llms, chatLLMId, setChatLLMId],
|
||||
);
|
||||
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
|
||||
|
||||
import { CameraCaptureModal } from './CameraCaptureModal';
|
||||
|
||||
|
||||
const attachCameraLegend = (isMobile: boolean) =>
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
<b>Attach photo</b><br />
|
||||
{isMobile ? 'Auto-OCR to read text' : 'See the world, on the go'}
|
||||
</Box>;
|
||||
|
||||
|
||||
export const ButtonAttachCameraMemo = React.memo(ButtonAttachCamera);
|
||||
|
||||
function ButtonAttachCamera(props: { isMobile?: boolean, onAttachImage: (file: File) => void }) {
|
||||
// state
|
||||
const [open, setOpen] = React.useState(false);
|
||||
|
||||
return <>
|
||||
|
||||
{/* The Button */}
|
||||
{props.isMobile ? (
|
||||
<IconButton variant='plain' color='neutral' onClick={() => setOpen(true)}>
|
||||
<AddAPhotoIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip variant='solid' placement='top-start' title={attachCameraLegend(!!props.isMobile)}>
|
||||
<Button fullWidth variant='plain' color='neutral' onClick={() => setOpen(true)} startDecorator={<AddAPhotoIcon />}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
Camera
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{/* The actual capture dialog, which will stream the video */}
|
||||
{open && (
|
||||
<CameraCaptureModal
|
||||
onCloseModal={() => setOpen(false)}
|
||||
onAttachImage={props.onAttachImage}
|
||||
/>
|
||||
)}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import ContentPasteGoIcon from '@mui/icons-material/ContentPasteGo';
|
||||
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
|
||||
|
||||
const pasteClipboardLegend =
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
<b>Attach clipboard 📚</b><br />
|
||||
Auto-converts to the best types<br />
|
||||
<KeyStroke combo='Ctrl + Shift + V' sx={{ mt: 1, mb: 0.5 }} />
|
||||
</Box>;
|
||||
|
||||
|
||||
export const ButtonAttachClipboardMemo = React.memo(ButtonAttachClipboard);
|
||||
|
||||
function ButtonAttachClipboard(props: { isMobile?: boolean, onClick: () => void }) {
|
||||
return props.isMobile ? (
|
||||
<IconButton onClick={props.onClick}>
|
||||
<ContentPasteGoIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip variant='solid' placement='top-start' title={pasteClipboardLegend}>
|
||||
<Button fullWidth variant='plain' color='neutral' startDecorator={<ContentPasteGoIcon />} onClick={props.onClick}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
Paste
|
||||
</Button>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import AttachFileOutlinedIcon from '@mui/icons-material/AttachFileOutlined';
|
||||
|
||||
|
||||
const attachFileLegend =
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
<b>Attach files</b><br />
|
||||
Drag & drop in chat for faster loads ⚡
|
||||
</Box>;
|
||||
|
||||
|
||||
export const ButtonAttachFileMemo = React.memo(ButtonAttachFile);
|
||||
|
||||
function ButtonAttachFile(props: { isMobile?: boolean, onAttachFilePicker: () => void }) {
|
||||
return props.isMobile ? (
|
||||
<IconButton onClick={props.onAttachFilePicker}>
|
||||
<AttachFileOutlinedIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip variant='solid' placement='top-start' title={attachFileLegend}>
|
||||
<Button fullWidth variant='plain' color='neutral' onClick={props.onAttachFilePicker} startDecorator={<AttachFileOutlinedIcon />}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
File
|
||||
</Button>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
|
||||
|
||||
const callConversationLegend =
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
Quick call regarding this chat
|
||||
</Box>;
|
||||
|
||||
export function ButtonCall(props: { isMobile?: boolean, disabled?: boolean, onClick: () => void, sx?: SxProps }) {
|
||||
return props.isMobile ? (
|
||||
<IconButton variant='soft' color='primary' disabled={props.disabled} onClick={props.onClick} sx={props.sx}>
|
||||
<CallIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip variant='solid' arrow placement='right' title={callConversationLegend}>
|
||||
<Button variant='soft' color='primary' disabled={props.disabled} onClick={props.onClick} endDecorator={<CallIcon />} sx={props.sx}>
|
||||
Call
|
||||
</Button>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Button, IconButton } from '@mui/joy';
|
||||
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
|
||||
|
||||
import { CameraCaptureModal } from './CameraCaptureModal';
|
||||
|
||||
const CAMERA_ENABLE_ON_DESKTOP = false;
|
||||
|
||||
|
||||
export function ButtonCameraCapture(props: { isMobile: boolean, onOCR: (ocrText: string) => void }) {
|
||||
// state
|
||||
const [open, setOpen] = React.useState(false);
|
||||
|
||||
return <>
|
||||
|
||||
{/* The Button */}
|
||||
{props.isMobile ? (
|
||||
<IconButton variant='plain' color='neutral' onClick={() => setOpen(true)}>
|
||||
<AddAPhotoIcon />
|
||||
</IconButton>
|
||||
) : CAMERA_ENABLE_ON_DESKTOP ? (
|
||||
<Button
|
||||
fullWidth variant='plain' color='neutral' onClick={() => setOpen(true)} startDecorator={<AddAPhotoIcon />}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
OCR
|
||||
</Button>
|
||||
) : undefined}
|
||||
|
||||
{/* The actual capture dialog, which will stream the video */}
|
||||
{open && <CameraCaptureModal onCloseModal={() => setOpen(false)} onOCR={props.onOCR} />}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import ContentPasteGoIcon from '@mui/icons-material/ContentPasteGo';
|
||||
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
|
||||
|
||||
const pasteClipboardLegend =
|
||||
<Box sx={{ p: 1, lineHeight: 2 }}>
|
||||
<b>Paste as 📚 Markdown attachment</b><br />
|
||||
Also converts Code and Tables<br />
|
||||
<KeyStroke combo='Ctrl + Shift + V' />
|
||||
</Box>;
|
||||
|
||||
export function ButtonClipboardPaste(props: { isMobile: boolean, isDeveloperMode: boolean, onPaste: () => void }) {
|
||||
return props.isMobile ? (
|
||||
<IconButton onClick={props.onPaste}>
|
||||
<ContentPasteGoIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip
|
||||
variant='solid' placement='top-start'
|
||||
title={pasteClipboardLegend}>
|
||||
<Button fullWidth variant='plain' color='neutral' startDecorator={<ContentPasteGoIcon />} onClick={props.onPaste}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
{props.isDeveloperMode ? 'Paste code' : 'Paste'}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
import { Box, Button, IconButton, Stack, Tooltip } from '@mui/joy';
|
||||
import * as React from 'react';
|
||||
import AttachFileOutlinedIcon from '@mui/icons-material/AttachFileOutlined';
|
||||
|
||||
const attachFileLegend =
|
||||
<Stack sx={{ p: 1, gap: 1 }}>
|
||||
<Box sx={{ mb: 1 }}>
|
||||
<b>Attach a file</b>
|
||||
</Box>
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><b>Text</b></td>
|
||||
<td align='center' style={{ opacity: 0.5 }}>→</td>
|
||||
<td>📝 As-is</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>Code</b></td>
|
||||
<td align='center' style={{ opacity: 0.5 }}>→</td>
|
||||
<td>📚 Markdown</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>PDF</b></td>
|
||||
<td width={36} align='center' style={{ opacity: 0.5 }}>→</td>
|
||||
<td>📝 Text (summarized)</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<Box sx={{ mt: 1, fontSize: '14px' }}>
|
||||
Drag & drop in chat for faster loads ⚡
|
||||
</Box>
|
||||
</Stack>;
|
||||
|
||||
|
||||
export function ButtonFileAttach(props: { isMobile: boolean, onAttachFiles: (files: FileList) => Promise<void> }) {
|
||||
|
||||
// state
|
||||
const attachmentFileInputRef = React.useRef<HTMLInputElement>(null);
|
||||
|
||||
const handleShowFilePicker = () => attachmentFileInputRef.current?.click();
|
||||
|
||||
const handleLoadAttachment = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||
// NOTE: resetting the target value allows for the selector dialog to pop-up again
|
||||
const files = event.target?.files;
|
||||
if (files && files.length >= 1)
|
||||
props.onAttachFiles(files).finally(() => event.target.value = '');
|
||||
else
|
||||
event.target.value = '';
|
||||
};
|
||||
|
||||
return <>
|
||||
|
||||
{/* Mobile icon or Desktop button */}
|
||||
{props.isMobile ? (
|
||||
<IconButton onClick={handleShowFilePicker}>
|
||||
<AttachFileOutlinedIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Tooltip variant='solid' placement='top-start' title={attachFileLegend}>
|
||||
<Button fullWidth variant='plain' color='neutral' onClick={handleShowFilePicker} startDecorator={<AttachFileOutlinedIcon />}
|
||||
sx={{ justifyContent: 'flex-start' }}>
|
||||
Attach
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
<input type='file' multiple hidden ref={attachmentFileInputRef} onChange={handleLoadAttachment} />
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, IconButton } from '@mui/joy';
|
||||
import { ColorPaletteProp, VariantProp } from '@mui/joy/styles/types';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
|
||||
|
||||
const micLegend =
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
Voice input<br />
|
||||
<KeyStroke combo='Ctrl + M' sx={{ mt: 1, mb: 0.5 }} />
|
||||
</Box>;
|
||||
|
||||
|
||||
export const ButtonMicMemo = React.memo(ButtonMic);
|
||||
|
||||
function ButtonMic(props: { variant: VariantProp, color: ColorPaletteProp, noBackground?: boolean, onClick: () => void }) {
|
||||
return <GoodTooltip placement='top' title={micLegend}>
|
||||
<IconButton variant={props.variant} color={props.color} onClick={props.onClick} sx={props.noBackground ? { background: 'none' } : {}}>
|
||||
<MicIcon />
|
||||
</IconButton>
|
||||
</GoodTooltip>;
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, IconButton, Tooltip } from '@mui/joy';
|
||||
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import AutoModeIcon from '@mui/icons-material/AutoMode';
|
||||
|
||||
|
||||
const micContinuationLegend =
|
||||
<Box sx={{ px: 1, py: 0.75, lineHeight: '1.5rem' }}>
|
||||
Voice Continuation
|
||||
</Box>;
|
||||
|
||||
|
||||
export const ButtonMicContinuationMemo = React.memo(ButtonMicContinuation);
|
||||
|
||||
function ButtonMicContinuation(props: { variant: VariantProp, color: ColorPaletteProp, onClick: () => void, sx?: SxProps }) {
|
||||
return <Tooltip placement='bottom' title={micContinuationLegend}>
|
||||
<IconButton variant={props.variant} color={props.color} onClick={props.onClick} sx={props.sx}>
|
||||
<AutoModeIcon />
|
||||
</IconButton>
|
||||
</Tooltip>;
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Button, IconButton } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
import FormatPaintIcon from '@mui/icons-material/FormatPaint';
|
||||
|
||||
|
||||
export function ButtonOptionsDraw(props: { isMobile?: boolean, onClick: () => void, sx?: SxProps }) {
|
||||
return props.isMobile ? (
|
||||
<IconButton variant='soft' color='warning' onClick={props.onClick} sx={props.sx}>
|
||||
<FormatPaintIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Button variant='soft' color='warning' onClick={props.onClick} endDecorator={<FormatPaintIcon />} sx={props.sx}>
|
||||
Options
|
||||
</Button>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, CircularProgress, IconButton, LinearProgress, Modal, ModalClose, Option, Select, Sheet, Typography } from '@mui/joy';
|
||||
import { Box, Button, IconButton, Modal, ModalClose, Option, Select, Sheet, Typography } from '@mui/joy';
|
||||
import CameraAltIcon from '@mui/icons-material/CameraAlt';
|
||||
import DownloadIcon from '@mui/icons-material/Download';
|
||||
import InfoIcon from '@mui/icons-material/Info';
|
||||
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
|
||||
@@ -9,6 +10,12 @@ import { InlineError } from '~/common/components/InlineError';
|
||||
import { useCameraCapture } from '~/common/components/useCameraCapture';
|
||||
|
||||
|
||||
function prettyFileName(renderedFrame: HTMLCanvasElement) {
|
||||
const prettyDate = new Date().toISOString().replace(/[:-]/g, '').replace('T', '-').replace('Z', '');
|
||||
const prettyResolution = `${renderedFrame.width}x${renderedFrame.height}`;
|
||||
return `camera-${prettyDate}-${prettyResolution}.png`;
|
||||
}
|
||||
|
||||
function renderVideoFrameToCanvas(videoElement: HTMLVideoElement): HTMLCanvasElement {
|
||||
// paint the video on a canvas, to save it
|
||||
const canvas = document.createElement('canvas');
|
||||
@@ -19,6 +26,19 @@ function renderVideoFrameToCanvas(videoElement: HTMLVideoElement): HTMLCanvasEle
|
||||
return canvas;
|
||||
}
|
||||
|
||||
function renderVideoFrameToFile(videoElement: HTMLVideoElement, callback: (file: File) => void) {
|
||||
// video to canvas
|
||||
const renderedFrame = renderVideoFrameToCanvas(videoElement);
|
||||
|
||||
// canvas to blob to file to callback
|
||||
renderedFrame.toBlob((blob) => {
|
||||
if (blob) {
|
||||
const file = new File([blob], prettyFileName(renderedFrame), { type: blob.type });
|
||||
callback(file);
|
||||
}
|
||||
}, 'image/png');
|
||||
}
|
||||
|
||||
function downloadVideoFrameAsPNG(videoElement: HTMLVideoElement) {
|
||||
// video to canvas to png
|
||||
const renderedFrame = renderVideoFrameToCanvas(videoElement);
|
||||
@@ -26,15 +46,19 @@ function downloadVideoFrameAsPNG(videoElement: HTMLVideoElement) {
|
||||
|
||||
// auto-download
|
||||
const link = document.createElement('a');
|
||||
link.download = 'image.png';
|
||||
link.download = prettyFileName(renderedFrame);
|
||||
link.href = imageDataURL;
|
||||
link.click();
|
||||
}
|
||||
|
||||
|
||||
export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (ocrText: string) => void }) {
|
||||
export function CameraCaptureModal(props: {
|
||||
onCloseModal: () => void,
|
||||
onAttachImage: (file: File) => void
|
||||
// onOCR: (ocrText: string) => void }
|
||||
}) {
|
||||
// state
|
||||
const [ocrProgress, setOCRProgress] = React.useState<number | null>(null);
|
||||
// const [ocrProgress/*, setOCRProgress*/] = React.useState<number | null>(null);
|
||||
const [showInfo, setShowInfo] = React.useState(false);
|
||||
|
||||
// camera operations
|
||||
@@ -51,7 +75,7 @@ export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (oc
|
||||
props.onCloseModal();
|
||||
};
|
||||
|
||||
const handleVideoOCRClicked = async () => {
|
||||
/*const handleVideoOCRClicked = async () => {
|
||||
if (!videoRef.current) return;
|
||||
const renderedFrame = renderVideoFrameToCanvas(videoRef.current);
|
||||
|
||||
@@ -68,6 +92,14 @@ export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (oc
|
||||
setOCRProgress(null);
|
||||
stopAndClose();
|
||||
props.onOCR(result.data.text);
|
||||
};*/
|
||||
|
||||
const handleVideoSnapClicked = () => {
|
||||
if (!videoRef.current) return;
|
||||
renderVideoFrameToFile(videoRef.current, (file) => {
|
||||
props.onAttachImage(file);
|
||||
stopAndClose();
|
||||
});
|
||||
};
|
||||
|
||||
const handleVideoDownloadClicked = () => {
|
||||
@@ -111,7 +143,7 @@ export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (oc
|
||||
ref={videoRef} autoPlay playsInline
|
||||
style={{
|
||||
display: 'block', width: '100%', maxHeight: 'calc(100vh - 200px)',
|
||||
background: '#8888', opacity: ocrProgress !== null ? 0.5 : 1,
|
||||
background: '#8888', //opacity: ocrProgress !== null ? 0.5 : 1,
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -124,7 +156,7 @@ export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (oc
|
||||
{info}
|
||||
</Typography>}
|
||||
|
||||
{ocrProgress !== null && <CircularProgress sx={{ position: 'absolute', top: 'calc(50% - 34px / 2)', left: 'calc(50% - 34px / 2)', zIndex: 2 }} />}
|
||||
{/*{ocrProgress !== null && <CircularProgress sx={{ position: 'absolute', top: 'calc(50% - 34px / 2)', left: 'calc(50% - 34px / 2)', zIndex: 2 }} />}*/}
|
||||
</Box>
|
||||
|
||||
{/* Bottom controls (zoom, ocr, download) & progress */}
|
||||
@@ -134,16 +166,30 @@ export function CameraCaptureModal(props: { onCloseModal: () => void, onOCR: (oc
|
||||
|
||||
{zoomControl}
|
||||
|
||||
{ocrProgress !== null && <LinearProgress color='primary' determinate value={100 * ocrProgress} sx={{ px: 2 }} />}
|
||||
{/*{ocrProgress !== null && <LinearProgress color='primary' determinate value={100 * ocrProgress} sx={{ px: 2 }} />}*/}
|
||||
|
||||
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'space-between' }}>
|
||||
<IconButton disabled={!info} variant='soft' color='neutral' size='lg' onClick={() => setShowInfo(info => !info)} sx={{ zIndex: 30 }}>
|
||||
{/* Info */}
|
||||
<IconButton disabled={!info} variant='soft' color='neutral' onClick={() => setShowInfo(info => !info)} sx={{ zIndex: 30 }}>
|
||||
<InfoIcon />
|
||||
</IconButton>
|
||||
<Button disabled={ocrProgress !== null} fullWidth variant='solid' size='lg' onClick={handleVideoOCRClicked} sx={{ flex: 1, maxWidth: 260 }}>
|
||||
Extract Text
|
||||
{/*<Button disabled={ocrProgress !== null} fullWidth variant='solid' size='lg' onClick={handleVideoOCRClicked} sx={{ flex: 1, maxWidth: 260 }}>*/}
|
||||
{/* Extract Text*/}
|
||||
{/*</Button>*/}
|
||||
|
||||
{/* Capture */}
|
||||
<Button
|
||||
fullWidth
|
||||
variant='solid' color='neutral'
|
||||
onClick={handleVideoSnapClicked}
|
||||
endDecorator={<CameraAltIcon />}
|
||||
sx={{ flex: 1, maxWidth: 200, py: 2, borderRadius: '3rem' }}
|
||||
>
|
||||
Capture
|
||||
</Button>
|
||||
<IconButton variant='soft' color='neutral' size='lg' onClick={handleVideoDownloadClicked}>
|
||||
|
||||
{/* Download */}
|
||||
<IconButton variant='soft' color='neutral' onClick={handleVideoDownloadClicked}>
|
||||
<DownloadIcon />
|
||||
</IconButton>
|
||||
</Box>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,18 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Badge, ColorPaletteProp, Tooltip } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
import { Badge, Box, ColorPaletteProp, Tooltip } from '@mui/joy';
|
||||
|
||||
|
||||
export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, indirectTokens?: number) {
|
||||
const usedTokens = directTokens + (indirectTokens || 0);
|
||||
function alignRight(value: number, columnSize: number = 7) {
|
||||
const str = value.toLocaleString();
|
||||
return str.padStart(columnSize);
|
||||
}
|
||||
|
||||
|
||||
export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, historyTokens?: number, responseMaxTokens?: number): {
|
||||
color: ColorPaletteProp, message: string, remainingTokens: number
|
||||
} {
|
||||
const usedTokens = directTokens + (historyTokens || 0) + (responseMaxTokens || 0);
|
||||
const remainingTokens = tokenLimit - usedTokens;
|
||||
const gteLimit = (remainingTokens <= 0 && tokenLimit > 0);
|
||||
|
||||
@@ -17,23 +24,24 @@ export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, i
|
||||
message += `Requested: ${usedTokens.toLocaleString()} tokens`;
|
||||
}
|
||||
// has full information (d + i < l)
|
||||
else if (indirectTokens) {
|
||||
else if (historyTokens || responseMaxTokens) {
|
||||
message +=
|
||||
`${Math.abs(remainingTokens).toLocaleString()} ${remainingTokens > 0 ? 'available' : 'excess'} tokens\n\n` +
|
||||
` = Model max tokens: ${tokenLimit.toLocaleString()}\n` +
|
||||
` - Chat Message: ${directTokens.toLocaleString()}` +
|
||||
(indirectTokens ? `\n- History + Response: ${indirectTokens?.toLocaleString()}` : '');
|
||||
`${Math.abs(remainingTokens).toLocaleString()} ${remainingTokens >= 0 ? 'available' : 'excess'} message tokens\n\n` +
|
||||
` = Model max tokens: ${alignRight(tokenLimit)}\n` +
|
||||
` - This message: ${alignRight(directTokens)}\n` +
|
||||
` - History: ${alignRight(historyTokens || 0)}\n` +
|
||||
` - Max response: ${alignRight(responseMaxTokens || 0)}`;
|
||||
}
|
||||
// Cleaner mode: d + ? < R (total is the remaining in this case)
|
||||
else {
|
||||
message +=
|
||||
`${(tokenLimit + usedTokens).toLocaleString()} available tokens after deleting this\n\n` +
|
||||
` = Currently free: ${tokenLimit.toLocaleString()}\n` +
|
||||
` + This message: ${usedTokens.toLocaleString()}`;
|
||||
` = Currently free: ${alignRight(tokenLimit)}\n` +
|
||||
` + This message: ${alignRight(usedTokens)}`;
|
||||
}
|
||||
|
||||
const color: ColorPaletteProp =
|
||||
(tokenLimit && remainingTokens < 1)
|
||||
(tokenLimit && remainingTokens < 0)
|
||||
? 'danger'
|
||||
: remainingTokens < tokenLimit / 4
|
||||
? 'warning'
|
||||
@@ -43,35 +51,61 @@ export function tokensPrettyMath(tokenLimit: number | 0, directTokens: number, i
|
||||
}
|
||||
|
||||
|
||||
export const TokenTooltip = (props: { message: string | null, color: ColorPaletteProp, placement?: 'top' | 'top-end', children: React.JSX.Element }) =>
|
||||
<Tooltip
|
||||
placement={props.placement}
|
||||
variant={props.color !== 'primary' ? 'solid' : 'soft'} color={props.color}
|
||||
title={props.message
|
||||
? <Box sx={{ p: 2, whiteSpace: 'pre' }}>
|
||||
{props.message}
|
||||
</Box>
|
||||
: null
|
||||
}
|
||||
sx={{
|
||||
fontFamily: 'code',
|
||||
boxShadow: 'xl',
|
||||
}}
|
||||
>
|
||||
{props.children}
|
||||
</Tooltip>;
|
||||
|
||||
|
||||
/**
|
||||
* Simple little component to show the token count (and a tooltip on hover)
|
||||
*/
|
||||
export function TokenBadge({ directTokens, indirectTokens, tokenLimit, showExcess, absoluteBottomRight, inline, sx }: { directTokens: number, indirectTokens?: number, tokenLimit: number, showExcess?: boolean, absoluteBottomRight?: boolean, inline?: boolean, sx?: SxProps }) {
|
||||
export const TokenBadgeMemo = React.memo(TokenBadge);
|
||||
|
||||
const fontSx: SxProps = { fontFamily: 'code', ...(sx || {}) };
|
||||
const outerSx: SxProps = absoluteBottomRight ? { position: 'absolute', bottom: 8, right: 8 } : {};
|
||||
const innerSx: SxProps = (absoluteBottomRight || inline) ? { position: 'static', transform: 'none', ...fontSx } : fontSx;
|
||||
function TokenBadge(props: {
|
||||
direct: number, history?: number, responseMax?: number, limit: number,
|
||||
showExcess?: boolean, absoluteBottomRight?: boolean, inline?: boolean,
|
||||
}) {
|
||||
|
||||
const { message, color, remainingTokens } = tokensPrettyMath(tokenLimit, directTokens, indirectTokens);
|
||||
const { message, color, remainingTokens } = tokensPrettyMath(props.limit, props.direct, props.history, props.responseMax);
|
||||
|
||||
// show the direct tokens, unless we exceed the limit and 'showExcess' is enabled
|
||||
const value = (showExcess && (tokenLimit && remainingTokens <= 0))
|
||||
const value = (props.showExcess && (props.limit && remainingTokens <= 0))
|
||||
? Math.abs(remainingTokens)
|
||||
: directTokens;
|
||||
: props.direct;
|
||||
|
||||
return (
|
||||
<Badge
|
||||
variant='solid' color={color} max={100000}
|
||||
invisible={!directTokens && remainingTokens >= 0}
|
||||
invisible={!props.direct && remainingTokens >= 0}
|
||||
badgeContent={
|
||||
<Tooltip title={<span style={{ whiteSpace: 'pre' }}>{message}</span>} color={color} sx={fontSx}>
|
||||
<TokenTooltip color={color} message={message}>
|
||||
<span>{value.toLocaleString()}</span>
|
||||
</Tooltip>
|
||||
</TokenTooltip>
|
||||
}
|
||||
sx={outerSx}
|
||||
sx={{
|
||||
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: 8 }),
|
||||
cursor: 'help',
|
||||
}}
|
||||
slotProps={{
|
||||
badge: {
|
||||
sx: innerSx,
|
||||
sx: {
|
||||
fontFamily: 'code',
|
||||
...((props.absoluteBottomRight || props.inline) && { position: 'static', transform: 'none' }),
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Tooltip, useTheme } from '@mui/joy';
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
|
||||
import { tokensPrettyMath } from './TokenBadge';
|
||||
import { tokensPrettyMath, TokenTooltip } from './TokenBadge';
|
||||
|
||||
|
||||
/**
|
||||
@@ -10,15 +10,17 @@ import { tokensPrettyMath } from './TokenBadge';
|
||||
*
|
||||
* The Textarea contains it within the Composer (at least).
|
||||
*/
|
||||
export function TokenProgressbar(props: { history: number, response: number, direct: number, limit: number }) {
|
||||
export const TokenProgressbarMemo = React.memo(TokenProgressbar);
|
||||
|
||||
function TokenProgressbar(props: { direct: number, history: number, responseMax: number, limit: number }) {
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
|
||||
if (!(props.limit > 0) || (!props.direct && !props.history && !props.response)) return null;
|
||||
if (!(props.limit > 0) || (!props.direct && !props.history && !props.responseMax)) return null;
|
||||
|
||||
// compute percentages
|
||||
let historyPct = 100 * props.history / props.limit;
|
||||
let responsePct = 100 * props.response / props.limit;
|
||||
let responsePct = 100 * props.responseMax / props.limit;
|
||||
let directPct = 100 * props.direct / props.limit;
|
||||
const totalPct = historyPct + responsePct + directPct;
|
||||
const isOverflow = totalPct >= 100;
|
||||
@@ -38,7 +40,7 @@ export function TokenProgressbar(props: { history: number, response: number, dir
|
||||
const overflowColor = theme.palette.danger.softColor;
|
||||
|
||||
// tooltip message/color
|
||||
const { message, color } = tokensPrettyMath(props.limit, props.direct, props.history + props.response);
|
||||
const { message, color } = tokensPrettyMath(props.limit, props.direct, props.history, props.responseMax);
|
||||
|
||||
// sizes
|
||||
const containerHeight = 8;
|
||||
@@ -46,11 +48,11 @@ export function TokenProgressbar(props: { history: number, response: number, dir
|
||||
|
||||
return (
|
||||
|
||||
<Tooltip title={<span style={{ whiteSpace: 'pre' }}>{message}</span>} color={color} sx={{ fontFamily: 'code' }}>
|
||||
<TokenTooltip color={color} message={props.direct ? null : message}>
|
||||
|
||||
<Box sx={{
|
||||
position: 'absolute', left: 1, right: 1, bottom: 1, height: containerHeight,
|
||||
overflow: 'hidden', borderBottomLeftRadius: 7, borderBottomRightRadius: 7,
|
||||
overflow: 'hidden', borderBottomLeftRadius: 5, borderBottomRightRadius: 5,
|
||||
}}>
|
||||
|
||||
{/* History */}
|
||||
@@ -79,6 +81,6 @@ export function TokenProgressbar(props: { history: number, response: number, dir
|
||||
|
||||
</Box>
|
||||
|
||||
</Tooltip>
|
||||
</TokenTooltip>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,201 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, CircularProgress, ColorPaletteProp, Sheet, Typography } from '@mui/joy';
|
||||
import AbcIcon from '@mui/icons-material/Abc';
|
||||
import CodeIcon from '@mui/icons-material/Code';
|
||||
import ImageOutlinedIcon from '@mui/icons-material/ImageOutlined';
|
||||
import PictureAsPdfIcon from '@mui/icons-material/PictureAsPdf';
|
||||
import PivotTableChartIcon from '@mui/icons-material/PivotTableChart';
|
||||
import TextFieldsIcon from '@mui/icons-material/TextFields';
|
||||
import TextureIcon from '@mui/icons-material/Texture';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { ellipsizeFront, ellipsizeMiddle } from '~/common/util/textUtils';
|
||||
|
||||
import type { Attachment, AttachmentConverterType, AttachmentId } from './store-attachments';
|
||||
import type { LLMAttachment } from './useLLMAttachments';
|
||||
|
||||
|
||||
// default attachment width
|
||||
const ATTACHMENT_MIN_STYLE = {
|
||||
height: '100%',
|
||||
minHeight: '40px',
|
||||
minWidth: '64px',
|
||||
};
|
||||
|
||||
|
||||
const ellipsizeLabel = (label?: string) => {
|
||||
if (!label)
|
||||
return '';
|
||||
return ellipsizeMiddle((label || '')
|
||||
.replace(/https?:\/\/(?:www\.)?/, ''), 30)
|
||||
.replace(/\/$/, '')
|
||||
.replace('…', '…\n…');
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Displayed while a source is loading
|
||||
*/
|
||||
const LoadingIndicator = React.forwardRef((props: { label: string }, _ref) =>
|
||||
<Sheet
|
||||
color='success' variant='soft'
|
||||
sx={{
|
||||
border: '1px solid',
|
||||
borderColor: 'success.solidBg',
|
||||
borderRadius: 'sm',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center', gap: 1,
|
||||
...ATTACHMENT_MIN_STYLE,
|
||||
boxSizing: 'border-box',
|
||||
px: 1,
|
||||
py: 0.5,
|
||||
}}
|
||||
>
|
||||
<CircularProgress color='success' size='sm' />
|
||||
<Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
|
||||
{ellipsizeLabel(props.label)}
|
||||
</Typography>
|
||||
</Sheet>,
|
||||
);
|
||||
LoadingIndicator.displayName = 'LoadingIndicator';
|
||||
|
||||
|
||||
const InputErrorIndicator = () =>
|
||||
<WarningRoundedIcon sx={{ color: 'danger.solidBg' }} />;
|
||||
|
||||
|
||||
const converterTypeToIconMap: { [key in AttachmentConverterType]: React.ComponentType<any> } = {
|
||||
'text': TextFieldsIcon,
|
||||
'rich-text': CodeIcon,
|
||||
'rich-text-table': PivotTableChartIcon,
|
||||
'pdf-text': PictureAsPdfIcon,
|
||||
'pdf-images': PictureAsPdfIcon,
|
||||
'image': ImageOutlinedIcon,
|
||||
'image-ocr': AbcIcon,
|
||||
'unhandled': TextureIcon,
|
||||
};
|
||||
|
||||
function attachmentConverterIcon(attachment: Attachment) {
|
||||
const converter = attachment.converterIdx !== null ? attachment.converters[attachment.converterIdx] ?? null : null;
|
||||
if (converter && converter.id) {
|
||||
const Icon = converterTypeToIconMap[converter.id] ?? null;
|
||||
if (Icon)
|
||||
return <Icon sx={{ width: 24, height: 24 }} />;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function attachmentLabelText(attachment: Attachment): string {
|
||||
return ellipsizeFront(attachment.label, 24);
|
||||
}
|
||||
|
||||
|
||||
export function AttachmentItem(props: {
|
||||
llmAttachment: LLMAttachment,
|
||||
menuShown: boolean,
|
||||
onItemMenuToggle: (attachmentId: AttachmentId, anchor: HTMLAnchorElement) => void,
|
||||
}) {
|
||||
|
||||
// derived state
|
||||
|
||||
const { onItemMenuToggle } = props;
|
||||
|
||||
const {
|
||||
attachment,
|
||||
isUnconvertible,
|
||||
isOutputMissing,
|
||||
isOutputAttachable,
|
||||
} = props.llmAttachment;
|
||||
|
||||
const {
|
||||
inputError,
|
||||
inputLoading: isInputLoading,
|
||||
outputsConverting: isOutputLoading,
|
||||
} = attachment;
|
||||
|
||||
const isInputError = !!inputError;
|
||||
const showWarning = isUnconvertible || isOutputMissing || !isOutputAttachable;
|
||||
|
||||
|
||||
const handleToggleMenu = React.useCallback((event: React.MouseEvent<HTMLAnchorElement>) => {
|
||||
event.stopPropagation();
|
||||
onItemMenuToggle(attachment.id, event.currentTarget);
|
||||
}, [attachment, onItemMenuToggle]);
|
||||
|
||||
|
||||
// compose tooltip
|
||||
let tooltip: string | null = '';
|
||||
if (attachment.source.media !== 'text')
|
||||
tooltip += attachment.source.media + ': ';
|
||||
tooltip += attachment.label;
|
||||
// if (hasInput)
|
||||
// tooltip += `\n(${aInput.mimeType}: ${aInput.dataSize.toLocaleString()} bytes)`;
|
||||
// if (aOutputs && aOutputs.length >= 1)
|
||||
// tooltip += `\n\n${JSON.stringify(aOutputs)}`;
|
||||
|
||||
// choose variants and color
|
||||
let color: ColorPaletteProp;
|
||||
let variant: 'soft' | 'outlined' | 'contained' = 'soft';
|
||||
if (isInputLoading || isOutputLoading) {
|
||||
color = 'success';
|
||||
} else if (isInputError) {
|
||||
tooltip = `Issue loading the attachment: ${attachment.inputError}\n\n${tooltip}`;
|
||||
color = 'danger';
|
||||
} else if (showWarning) {
|
||||
tooltip = props.menuShown
|
||||
? null
|
||||
: isUnconvertible
|
||||
? `Attachments of type '${attachment.input?.mimeType}' are not supported yet. You can open a feature request on GitHub.\n\n${tooltip}`
|
||||
: `Not compatible with the selected LLM or not supported. Please select another format.\n\n${tooltip}`;
|
||||
color = 'warning';
|
||||
} else {
|
||||
// all good
|
||||
tooltip = null;
|
||||
color = /*props.menuShown ? 'primary' :*/ 'neutral';
|
||||
variant = 'outlined';
|
||||
}
|
||||
|
||||
|
||||
return <Box>
|
||||
|
||||
<GoodTooltip
|
||||
title={tooltip}
|
||||
isError={isInputError}
|
||||
isWarning={showWarning}
|
||||
sx={{ p: 1, whiteSpace: 'break-spaces' }}
|
||||
>
|
||||
{isInputLoading
|
||||
? <LoadingIndicator label={attachment.label} />
|
||||
: (
|
||||
<Button
|
||||
size='sm'
|
||||
variant={variant} color={color}
|
||||
onClick={handleToggleMenu}
|
||||
sx={{
|
||||
backgroundColor: props.menuShown ? `${color}.softActiveBg` : variant === 'outlined' ? 'background.popup' : undefined,
|
||||
border: variant === 'soft' ? '1px solid' : undefined,
|
||||
borderColor: variant === 'soft' ? `${color}.solidBg` : undefined,
|
||||
borderRadius: 'sm',
|
||||
fontWeight: 'normal',
|
||||
...ATTACHMENT_MIN_STYLE,
|
||||
px: 1, py: 0.5,
|
||||
display: 'flex', flexDirection: 'row', gap: 1,
|
||||
}}
|
||||
>
|
||||
{isInputError
|
||||
? <InputErrorIndicator />
|
||||
: <>
|
||||
{attachmentConverterIcon(attachment)}
|
||||
{isOutputLoading
|
||||
? <>Converting <CircularProgress color='success' size='sm' /></>
|
||||
: <Typography level='title-sm' sx={{ whiteSpace: 'nowrap' }}>
|
||||
{attachmentLabelText(attachment)}
|
||||
</Typography>}
|
||||
</>}
|
||||
</Button>
|
||||
)}
|
||||
</GoodTooltip>
|
||||
|
||||
</Box>;
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, ListDivider, ListItemDecorator, MenuItem, Radio, Typography } from '@mui/joy';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
|
||||
import KeyboardArrowLeftIcon from '@mui/icons-material/KeyboardArrowLeft';
|
||||
import KeyboardArrowRightIcon from '@mui/icons-material/KeyboardArrowRight';
|
||||
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
|
||||
|
||||
import { CloseableMenu } from '~/common/components/CloseableMenu';
|
||||
import { copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
|
||||
import type { LLMAttachment } from './useLLMAttachments';
|
||||
import { useAttachmentsStore } from './store-attachments';
|
||||
|
||||
|
||||
// enable for debugging
|
||||
export const DEBUG_ATTACHMENTS = true;
|
||||
|
||||
|
||||
export function AttachmentMenu(props: {
|
||||
llmAttachment: LLMAttachment,
|
||||
menuAnchor: HTMLAnchorElement,
|
||||
isPositionFirst: boolean,
|
||||
isPositionLast: boolean,
|
||||
onAttachmentInlineText: (attachmentId: string) => void,
|
||||
onClose: () => void,
|
||||
}) {
|
||||
|
||||
// derived state
|
||||
|
||||
const isPositionFixed = props.isPositionFirst && props.isPositionLast;
|
||||
|
||||
const {
|
||||
attachment,
|
||||
attachmentOutputs,
|
||||
isUnconvertible,
|
||||
isOutputMissing,
|
||||
isOutputTextInlineable,
|
||||
tokenCountApprox,
|
||||
} = props.llmAttachment;
|
||||
|
||||
const {
|
||||
id: aId,
|
||||
input: aInput,
|
||||
converters: aConverters,
|
||||
converterIdx: aConverterIdx,
|
||||
outputs: aOutputs,
|
||||
} = attachment;
|
||||
|
||||
|
||||
// operations
|
||||
|
||||
const { onClose, onAttachmentInlineText } = props;
|
||||
|
||||
const handleInlineText = React.useCallback(() => {
|
||||
onClose();
|
||||
onAttachmentInlineText(aId);
|
||||
}, [aId, onAttachmentInlineText, onClose]);
|
||||
|
||||
const handleMoveUp = React.useCallback(() => {
|
||||
useAttachmentsStore.getState().moveAttachment(aId, -1);
|
||||
}, [aId]);
|
||||
|
||||
const handleMoveDown = React.useCallback(() => {
|
||||
useAttachmentsStore.getState().moveAttachment(aId, 1);
|
||||
}, [aId]);
|
||||
|
||||
const handleRemove = React.useCallback(() => {
|
||||
onClose();
|
||||
useAttachmentsStore.getState().removeAttachment(aId);
|
||||
}, [aId, onClose]);
|
||||
|
||||
const handleSetConverterIdx = React.useCallback(async (converterIdx: number | null) => {
|
||||
return useAttachmentsStore.getState().setConverterIdx(aId, converterIdx);
|
||||
}, [aId]);
|
||||
|
||||
// const handleSummarizeText = React.useCallback(() => {
|
||||
// onAttachmentSummarizeText(aId);
|
||||
// }, [aId, onAttachmentSummarizeText]);
|
||||
|
||||
const handleCopyOutputToClipboard = React.useCallback(() => {
|
||||
if (attachmentOutputs.length >= 1) {
|
||||
const concat = attachmentOutputs.map(output => {
|
||||
if (output.type === 'text-block')
|
||||
return output.text;
|
||||
else if (output.type === 'image-part')
|
||||
return output.base64Url;
|
||||
else
|
||||
return null;
|
||||
}).join('\n\n---\n\n');
|
||||
copyToClipboard(concat.trim(), 'Converted attachment');
|
||||
}
|
||||
}, [attachmentOutputs]);
|
||||
|
||||
|
||||
return (
|
||||
<CloseableMenu
|
||||
dense placement='top' sx={{ minWidth: 200 }}
|
||||
open anchorEl={props.menuAnchor} onClose={props.onClose}
|
||||
noTopPadding noBottomPadding
|
||||
>
|
||||
|
||||
{/* Move Arrows */}
|
||||
{!isPositionFixed && <Box sx={{ display: 'flex', alignItems: 'center' }}>
|
||||
<MenuItem
|
||||
disabled={props.isPositionFirst}
|
||||
onClick={handleMoveUp}
|
||||
sx={{ flex: 1, display: 'flex', justifyContent: 'center' }}
|
||||
>
|
||||
<KeyboardArrowLeftIcon />
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
disabled={props.isPositionLast}
|
||||
onClick={handleMoveDown}
|
||||
sx={{ flex: 1, display: 'flex', justifyContent: 'center' }}
|
||||
>
|
||||
<KeyboardArrowRightIcon />
|
||||
</MenuItem>
|
||||
</Box>}
|
||||
{!isPositionFixed && <ListDivider sx={{ mt: 0 }} />}
|
||||
|
||||
{/* Render Converters as menu items */}
|
||||
{/*{!isUnconvertible && <ListItem>*/}
|
||||
{/* <Typography level='body-md'>*/}
|
||||
{/* Attach as:*/}
|
||||
{/* </Typography>*/}
|
||||
{/*</ListItem>}*/}
|
||||
{!isUnconvertible && aConverters.map((c, idx) =>
|
||||
<MenuItem
|
||||
disabled={c.disabled}
|
||||
key={'c-' + c.id}
|
||||
onClick={async () => idx !== aConverterIdx && await handleSetConverterIdx(idx)}
|
||||
>
|
||||
<ListItemDecorator>
|
||||
<Radio checked={idx === aConverterIdx} />
|
||||
</ListItemDecorator>
|
||||
{c.unsupported
|
||||
? <Box>Unsupported 🤔 <Typography level='body-xs'>{c.name}</Typography></Box>
|
||||
: c.name}
|
||||
</MenuItem>,
|
||||
)}
|
||||
{!isUnconvertible && <ListDivider />}
|
||||
|
||||
{DEBUG_ATTACHMENTS && !!aInput && (
|
||||
<MenuItem onClick={handleCopyOutputToClipboard} disabled={!isOutputTextInlineable}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
<Box>
|
||||
{!!aInput && <Typography level='body-xs'>
|
||||
🡐 {aInput.mimeType}, {aInput.dataSize.toLocaleString()} bytes
|
||||
</Typography>}
|
||||
{/*<Typography level='body-xs'>*/}
|
||||
{/* Converters: {aConverters.map(((converter, idx) => ` ${converter.id}${(idx === aConverterIdx) ? '*' : ''}`)).join(', ')}*/}
|
||||
{/*</Typography>*/}
|
||||
<Typography level='body-xs'>
|
||||
🡒 {isOutputMissing ? 'empty' : aOutputs.map(output => `${output.type}, ${output.type === 'text-block' ? output.text.length.toLocaleString() : '(base64 image)'} bytes`).join(' · ')}
|
||||
</Typography>
|
||||
{!!tokenCountApprox && <Typography level='body-xs'>
|
||||
🡒 {tokenCountApprox.toLocaleString()} tokens
|
||||
</Typography>}
|
||||
</Box>
|
||||
</MenuItem>
|
||||
)}
|
||||
{DEBUG_ATTACHMENTS && !!aInput && <ListDivider />}
|
||||
|
||||
{/* Destructive Operations */}
|
||||
{/*<MenuItem onClick={handleCopyOutputToClipboard} disabled={!isOutputTextInlineable}>*/}
|
||||
{/* <ListItemDecorator><ContentCopyIcon /></ListItemDecorator>*/}
|
||||
{/* Copy*/}
|
||||
{/*</MenuItem>*/}
|
||||
{/*<MenuItem onClick={handleSummarizeText} disabled={!isOutputTextInlineable}>*/}
|
||||
{/* <ListItemDecorator><CompressIcon color='success' /></ListItemDecorator>*/}
|
||||
{/* Shrink*/}
|
||||
{/*</MenuItem>*/}
|
||||
<MenuItem onClick={handleInlineText} disabled={!isOutputTextInlineable}>
|
||||
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
|
||||
Inline text
|
||||
</MenuItem>
|
||||
<MenuItem onClick={handleRemove}>
|
||||
<ListItemDecorator><ClearIcon /></ListItemDecorator>
|
||||
Remove
|
||||
</MenuItem>
|
||||
|
||||
</CloseableMenu>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,170 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, IconButton, ListItemDecorator, MenuItem } from '@mui/joy';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
|
||||
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
|
||||
|
||||
import { CloseableMenu } from '~/common/components/CloseableMenu';
|
||||
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
|
||||
|
||||
import type { AttachmentId } from './store-attachments';
|
||||
import type { LLMAttachments } from './useLLMAttachments';
|
||||
import { AttachmentItem } from './AttachmentItem';
|
||||
import { AttachmentMenu } from './AttachmentMenu';
|
||||
|
||||
|
||||
/**
|
||||
* Renderer of attachments, with menus, etc.
|
||||
*/
|
||||
export function Attachments(props: {
|
||||
llmAttachments: LLMAttachments,
|
||||
onAttachmentInlineText: (attachmentId: AttachmentId) => void,
|
||||
onAttachmentsClear: () => void,
|
||||
onAttachmentsInlineText: () => void,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [confirmClearAttachments, setConfirmClearAttachments] = React.useState<boolean>(false);
|
||||
const [itemMenu, setItemMenu] = React.useState<{ anchor: HTMLAnchorElement, attachmentId: AttachmentId } | null>(null);
|
||||
const [overallMenuAnchor, setOverallMenuAnchor] = React.useState<HTMLAnchorElement | null>(null);
|
||||
|
||||
// derived state
|
||||
const { llmAttachments, onAttachmentsClear, onAttachmentInlineText, onAttachmentsInlineText } = props;
|
||||
|
||||
const { attachments, isOutputTextInlineable } = llmAttachments;
|
||||
|
||||
const hasAttachments = attachments.length >= 1;
|
||||
|
||||
// derived item menu state
|
||||
|
||||
const itemMenuAnchor = itemMenu?.anchor;
|
||||
const itemMenuAttachmentId = itemMenu?.attachmentId;
|
||||
const itemMenuAttachment = itemMenuAttachmentId ? attachments.find(la => la.attachment.id === itemMenu.attachmentId) : undefined;
|
||||
const itemMenuIndex = itemMenuAttachment ? attachments.indexOf(itemMenuAttachment) : -1;
|
||||
|
||||
|
||||
// item menu
|
||||
|
||||
const handleItemMenuToggle = React.useCallback((attachmentId: AttachmentId, anchor: HTMLAnchorElement) => {
|
||||
handleOverallMenuHide();
|
||||
setItemMenu(prev => prev?.attachmentId === attachmentId ? null : { anchor, attachmentId });
|
||||
}, []);
|
||||
|
||||
const handleItemMenuHide = React.useCallback(() => {
|
||||
setItemMenu(null);
|
||||
}, []);
|
||||
|
||||
|
||||
// item menu operations
|
||||
|
||||
const handleAttachmentInlineText = React.useCallback((attachmentId: string) => {
|
||||
handleItemMenuHide();
|
||||
onAttachmentInlineText(attachmentId);
|
||||
}, [handleItemMenuHide, onAttachmentInlineText]);
|
||||
|
||||
|
||||
// menu
|
||||
|
||||
const handleOverallMenuHide = () => setOverallMenuAnchor(null);
|
||||
|
||||
const handleOverallMenuToggle = (event: React.MouseEvent<HTMLAnchorElement>) =>
|
||||
setOverallMenuAnchor(anchor => anchor ? null : event.currentTarget);
|
||||
|
||||
|
||||
// overall operations
|
||||
|
||||
const handleAttachmentsInlineText = React.useCallback(() => {
|
||||
handleOverallMenuHide();
|
||||
onAttachmentsInlineText();
|
||||
}, [onAttachmentsInlineText]);
|
||||
|
||||
const handleClearAttachments = () => setConfirmClearAttachments(true);
|
||||
|
||||
const handleClearAttachmentsConfirmed = React.useCallback(() => {
|
||||
handleOverallMenuHide();
|
||||
setConfirmClearAttachments(false);
|
||||
onAttachmentsClear();
|
||||
}, [onAttachmentsClear]);
|
||||
|
||||
|
||||
// no components without attachments
|
||||
if (!hasAttachments)
|
||||
return null;
|
||||
|
||||
return <>
|
||||
|
||||
{/* Attachments bar */}
|
||||
<Box sx={{ position: 'relative' }}>
|
||||
|
||||
{/* Horizontally scrollable Attachments */}
|
||||
<Box sx={{ display: 'flex', overflowX: 'auto', gap: 1, height: '100%', pr: 5 }}>
|
||||
{attachments.map((llmAttachment) =>
|
||||
<AttachmentItem
|
||||
key={llmAttachment.attachment.id}
|
||||
llmAttachment={llmAttachment}
|
||||
menuShown={llmAttachment.attachment.id === itemMenuAttachmentId}
|
||||
onItemMenuToggle={handleItemMenuToggle}
|
||||
/>,
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{/* Overall Menu button */}
|
||||
<IconButton
|
||||
variant='plain' onClick={handleOverallMenuToggle}
|
||||
sx={{
|
||||
// borderRadius: 'sm',
|
||||
borderRadius: 0,
|
||||
position: 'absolute', right: 0, top: 0,
|
||||
backgroundColor: 'neutral.softDisabledBg',
|
||||
}}
|
||||
>
|
||||
<ExpandLessIcon />
|
||||
</IconButton>
|
||||
|
||||
</Box>
|
||||
|
||||
|
||||
{/* Attachment Menu */}
|
||||
{!!itemMenuAnchor && !!itemMenuAttachment && (
|
||||
<AttachmentMenu
|
||||
llmAttachment={itemMenuAttachment}
|
||||
menuAnchor={itemMenuAnchor}
|
||||
isPositionFirst={itemMenuIndex === 0}
|
||||
isPositionLast={itemMenuIndex === attachments.length - 1}
|
||||
onAttachmentInlineText={handleAttachmentInlineText}
|
||||
onClose={handleItemMenuHide}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
||||
{/* Overall Menu */}
|
||||
{!!overallMenuAnchor && (
|
||||
<CloseableMenu
|
||||
placement='top-start'
|
||||
open anchorEl={overallMenuAnchor} onClose={handleOverallMenuHide}
|
||||
noTopPadding noBottomPadding
|
||||
>
|
||||
<MenuItem onClick={handleAttachmentsInlineText} disabled={!isOutputTextInlineable}>
|
||||
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
|
||||
Inline <span style={{ opacity: 0.5 }}>text attachments</span>
|
||||
</MenuItem>
|
||||
<MenuItem onClick={handleClearAttachments}>
|
||||
<ListItemDecorator><ClearIcon /></ListItemDecorator>
|
||||
Clear
|
||||
</MenuItem>
|
||||
</CloseableMenu>
|
||||
)}
|
||||
|
||||
{/* 'Clear' Confirmation */}
|
||||
{confirmClearAttachments && (
|
||||
<ConfirmationModal
|
||||
open onClose={() => setConfirmClearAttachments(false)} onPositive={handleClearAttachmentsConfirmed}
|
||||
title='Confirm Removal'
|
||||
positiveActionText='Remove All'
|
||||
confirmationText={`This action will remove all (${attachments.length}) attachments. Do you want to proceed?`}
|
||||
/>
|
||||
)}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,335 @@
|
||||
import { callBrowseFetchPage } from '~/modules/browse/browse.client';
|
||||
|
||||
import { createBase36Uid } from '~/common/util/textUtils';
|
||||
import { htmlTableToMarkdown } from '~/common/util/htmlTableToMarkdown';
|
||||
import { pdfToText } from '~/common/util/pdfUtils';
|
||||
|
||||
import type { Attachment, AttachmentConverter, AttachmentId, AttachmentInput, AttachmentSource } from './store-attachments';
|
||||
import type { ComposerOutputMultiPart } from '../composer.types';
|
||||
|
||||
|
||||
// extensions to treat as plain text
|
||||
const PLAIN_TEXT_EXTENSIONS: string[] = ['.ts', '.tsx'];
|
||||
|
||||
/**
|
||||
* Creates a new Attachment object.
|
||||
*/
|
||||
export function attachmentCreate(source: AttachmentSource, checkDuplicates: AttachmentId[]): Attachment {
|
||||
return {
|
||||
id: createBase36Uid(checkDuplicates),
|
||||
source: source,
|
||||
label: 'Loading...',
|
||||
ref: '',
|
||||
inputLoading: false,
|
||||
inputError: null,
|
||||
input: undefined,
|
||||
converters: [],
|
||||
converterIdx: null,
|
||||
outputsConverting: false,
|
||||
outputs: [],
|
||||
// metadata: {},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously loads the input for an Attachment object.
|
||||
*
|
||||
* @param {Readonly<AttachmentSource>} source - The source of the attachment.
|
||||
* @param {(changes: Partial<Attachment>) => void} edit - A function to edit the Attachment object.
|
||||
*/
|
||||
export async function attachmentLoadInputAsync(source: Readonly<AttachmentSource>, edit: (changes: Partial<Attachment>) => void) {
|
||||
edit({ inputLoading: true });
|
||||
|
||||
switch (source.media) {
|
||||
|
||||
// Download URL (page, file, ..) and attach as input
|
||||
case 'url':
|
||||
edit({ label: source.refUrl, ref: source.refUrl });
|
||||
try {
|
||||
const page = await callBrowseFetchPage(source.url);
|
||||
if (page.content) {
|
||||
edit({
|
||||
input: {
|
||||
mimeType: 'text/plain',
|
||||
data: page.content,
|
||||
dataSize: page.content.length,
|
||||
},
|
||||
});
|
||||
} else
|
||||
edit({ inputError: 'No content found at this link' });
|
||||
} catch (error: any) {
|
||||
edit({ inputError: `Issue downloading page: ${error?.message || (typeof error === 'string' ? error : JSON.stringify(error))}` });
|
||||
}
|
||||
break;
|
||||
|
||||
// Attach file as input
|
||||
case 'file':
|
||||
edit({ label: source.refPath, ref: source.refPath });
|
||||
|
||||
// fix missing/wrong mimetypes
|
||||
let mimeType = source.fileWithHandle.type;
|
||||
if (!mimeType) {
|
||||
// see note on 'attachAppendDataTransfer'; this is a fallback for drag/drop missing Mimes sometimes
|
||||
console.warn('Assuming the attachment is text/plain. From:', source.origin, ', name:', source.refPath);
|
||||
mimeType = 'text/plain';
|
||||
} else {
|
||||
// possibly fix wrongly assigned mimetypes (from the extension alone)
|
||||
if (!mimeType.startsWith('text/') && PLAIN_TEXT_EXTENSIONS.some(ext => source.refPath.endsWith(ext)))
|
||||
mimeType = 'text/plain';
|
||||
}
|
||||
|
||||
// UX: just a hint of a loading state
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
try {
|
||||
const fileArrayBuffer = await source.fileWithHandle.arrayBuffer();
|
||||
edit({
|
||||
input: {
|
||||
mimeType,
|
||||
data: fileArrayBuffer,
|
||||
dataSize: fileArrayBuffer.byteLength,
|
||||
},
|
||||
});
|
||||
} catch (error: any) {
|
||||
edit({ inputError: `Issue loading file: ${error?.message || (typeof error === 'string' ? error : JSON.stringify(error))}` });
|
||||
}
|
||||
break;
|
||||
|
||||
case 'text':
|
||||
if (source.textHtml && source.textPlain) {
|
||||
edit({
|
||||
label: 'Rich Text',
|
||||
ref: '',
|
||||
input: {
|
||||
mimeType: 'text/plain',
|
||||
data: source.textPlain,
|
||||
dataSize: source.textPlain!.length,
|
||||
altMimeType: 'text/html',
|
||||
altData: source.textHtml,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
const text = source.textHtml || source.textPlain || '';
|
||||
edit({
|
||||
label: 'Text',
|
||||
ref: '',
|
||||
input: {
|
||||
mimeType: 'text/plain',
|
||||
data: text,
|
||||
dataSize: text.length,
|
||||
},
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
edit({ inputLoading: false });
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the possible converters for an Attachment object based on its input type.
|
||||
*
|
||||
* @param {AttachmentSource['media']} sourceType - The media type of the attachment source.
|
||||
* @param {Readonly<AttachmentInput>} input - The input of the attachment.
|
||||
* @param {(changes: Partial<Attachment>) => void} edit - A function to edit the Attachment object.
|
||||
*/
|
||||
export function attachmentDefineConverters(sourceType: AttachmentSource['media'], input: Readonly<AttachmentInput>, edit: (changes: Partial<Attachment>) => void) {
|
||||
|
||||
// return all the possible converters for the input
|
||||
const converters: AttachmentConverter[] = [];
|
||||
|
||||
switch (true) {
|
||||
|
||||
// plain text types
|
||||
case ['text/plain', 'text/html', 'text/markdown', 'text/csv', 'application/json'].includes(input.mimeType):
|
||||
// handle a secondary layer of HTML 'text' origins: drop, paste, and clipboard-read
|
||||
const textOriginHtml = sourceType === 'text' && input.altMimeType === 'text/html' && !!input.altData;
|
||||
const isHtmlTable = !!input.altData?.startsWith('<table');
|
||||
|
||||
// p1: Tables
|
||||
if (textOriginHtml && isHtmlTable) {
|
||||
converters.push({
|
||||
id: 'rich-text-table',
|
||||
name: 'Markdown Table',
|
||||
});
|
||||
}
|
||||
|
||||
// p2: Text
|
||||
converters.push({
|
||||
id: 'text',
|
||||
name: 'Text',
|
||||
});
|
||||
|
||||
// p3: Html
|
||||
if (textOriginHtml) {
|
||||
converters.push({
|
||||
id: 'rich-text',
|
||||
name: 'HTML',
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
||||
// PDF
|
||||
case ['application/pdf', 'application/x-pdf', 'application/acrobat'].includes(input.mimeType):
|
||||
converters.push({ id: 'pdf-text', name: `PDF To Text` });
|
||||
converters.push({ id: 'pdf-images', name: `PDF To Images`, disabled: true });
|
||||
break;
|
||||
|
||||
// images
|
||||
case input.mimeType.startsWith('image/'):
|
||||
converters.push({ id: 'image', name: `Image (coming soon)` });
|
||||
converters.push({ id: 'image-ocr', name: 'As Text (OCR)' });
|
||||
break;
|
||||
|
||||
// catch-all
|
||||
default:
|
||||
converters.push({ id: 'unhandled', name: `${input.mimeType}`, unsupported: true });
|
||||
converters.push({ id: 'text', name: 'As Text' });
|
||||
break;
|
||||
}
|
||||
|
||||
edit({ converters });
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts the input of an Attachment object based on the selected converter.
|
||||
*
|
||||
* @param {Readonly<Attachment>} attachment - The Attachment object to convert.
|
||||
* @param {number | null} converterIdx - The index of the selected conversion in the Attachment object's converters array.
|
||||
* @param {(changes: Partial<Attachment>) => void} edit - A function to edit the Attachment object.
|
||||
*/
|
||||
export async function attachmentPerformConversion(attachment: Readonly<Attachment>, converterIdx: number | null, edit: (changes: Partial<Attachment>) => void) {
|
||||
|
||||
// set converter index
|
||||
converterIdx = (converterIdx !== null && converterIdx >= 0 && converterIdx < attachment.converters.length) ? converterIdx : null;
|
||||
edit({
|
||||
converterIdx: converterIdx,
|
||||
outputs: [],
|
||||
});
|
||||
|
||||
// get converter
|
||||
const { ref, input } = attachment;
|
||||
const converter = converterIdx !== null ? attachment.converters[converterIdx] : null;
|
||||
if (!converter || !input)
|
||||
return;
|
||||
|
||||
edit({
|
||||
outputsConverting: true,
|
||||
});
|
||||
|
||||
// input datacould be a string or an ArrayBuffer
|
||||
function inputDataToString(data: string | ArrayBuffer | null | undefined): string {
|
||||
if (typeof data === 'string')
|
||||
return data;
|
||||
if (data instanceof ArrayBuffer)
|
||||
return new TextDecoder().decode(data);
|
||||
return '';
|
||||
}
|
||||
|
||||
// apply converter to the input
|
||||
const outputs: ComposerOutputMultiPart = [];
|
||||
switch (converter.id) {
|
||||
|
||||
// text as-is
|
||||
case 'text':
|
||||
outputs.push({
|
||||
type: 'text-block',
|
||||
text: inputDataToString(input.data),
|
||||
title: ref,
|
||||
collapsible: true,
|
||||
});
|
||||
break;
|
||||
|
||||
// html as-is
|
||||
case 'rich-text':
|
||||
outputs.push({
|
||||
type: 'text-block',
|
||||
text: input.altData!,
|
||||
title: ref,
|
||||
collapsible: true,
|
||||
});
|
||||
break;
|
||||
|
||||
// html to markdown table
|
||||
case 'rich-text-table':
|
||||
let mdTable: string;
|
||||
try {
|
||||
mdTable = htmlTableToMarkdown(input.altData!);
|
||||
} catch (error) {
|
||||
// fallback to text/plain
|
||||
mdTable = inputDataToString(input.data);
|
||||
}
|
||||
outputs.push({
|
||||
type: 'text-block',
|
||||
text: mdTable,
|
||||
title: ref,
|
||||
collapsible: true,
|
||||
});
|
||||
break;
|
||||
|
||||
case 'pdf-text':
|
||||
if (!(input.data instanceof ArrayBuffer)) {
|
||||
console.log('Expected ArrayBuffer for PDF converter, got:', typeof input.data);
|
||||
break;
|
||||
}
|
||||
// duplicate the ArrayBuffer to avoid mutation
|
||||
const pdfData = new Uint8Array(input.data.slice(0));
|
||||
const pdfText = await pdfToText(pdfData);
|
||||
outputs.push({
|
||||
type: 'text-block',
|
||||
text: pdfText,
|
||||
title: ref,
|
||||
collapsible: true,
|
||||
});
|
||||
break;
|
||||
|
||||
case 'pdf-images':
|
||||
// TODO: extract all pages as individual images
|
||||
break;
|
||||
|
||||
case 'image':
|
||||
// TODO: continue here
|
||||
/*outputs.push({
|
||||
type: 'image-part',
|
||||
base64Url: `data:notImplemented.yet:)`,
|
||||
collapsible: false,
|
||||
});*/
|
||||
break;
|
||||
|
||||
case 'image-ocr':
|
||||
if (!(input.data instanceof ArrayBuffer)) {
|
||||
console.log('Expected ArrayBuffer for Image OCR converter, got:', typeof input.data);
|
||||
break;
|
||||
}
|
||||
try {
|
||||
const { recognize } = await import('tesseract.js');
|
||||
const buffer = Buffer.from(input.data);
|
||||
const result = await recognize(buffer, undefined, {
|
||||
errorHandler: e => console.error(e),
|
||||
logger: (message) => {
|
||||
if (message.status === 'recognizing text')
|
||||
console.log('OCR progress:', message.progress);
|
||||
},
|
||||
});
|
||||
outputs.push({
|
||||
type: 'text-block',
|
||||
text: result.data.text,
|
||||
title: ref,
|
||||
collapsible: true,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'unhandled':
|
||||
// force the user to explicitly select 'as text' if they want to proceed
|
||||
break;
|
||||
}
|
||||
|
||||
// update
|
||||
edit({
|
||||
outputsConverting: false,
|
||||
outputs,
|
||||
});
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
|
||||
/// REDUCER
|
||||
|
||||
import { ContentReducer } from '~/modules/aifn/summarize/ContentReducer';
|
||||
|
||||
const [reducerText, setReducerText] = React.useState('');
|
||||
const [reducerTextTokens, setReducerTextTokens] = React.useState(0);
|
||||
|
||||
{reducerText?.length >= 1 &&
|
||||
<ContentReducer
|
||||
initialText={reducerText} initialTokens={reducerTextTokens} tokenLimit={remainingTokens}
|
||||
onReducedText={handleReducedText} onClose={handleReducerClose}
|
||||
/>
|
||||
}
|
||||
const handleReducerClose = () => setReducerText('');
|
||||
|
||||
const handleReducedText = (text: string) => {
|
||||
handleReducerClose();
|
||||
setComposeText(_t => _t + text);
|
||||
};
|
||||
|
||||
const handleAttachFiles = async (files: FileList, overrideFileNames?: string[]): Promise<void> => {
|
||||
|
||||
// see how we fare on budget
|
||||
if (chatLLMId) {
|
||||
const newTextTokens = countModelTokens(newText, chatLLMId, 'reducer trigger');
|
||||
|
||||
// simple trigger for the reduction dialog
|
||||
if (newTextTokens > remainingTokens) {
|
||||
setReducerTextTokens(newTextTokens);
|
||||
setReducerText(newText);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// within the budget, so just append
|
||||
setComposeText(text => expandPromptTemplate(PromptTemplates.Concatenate, { text: newText })(text));
|
||||
|
||||
|
||||
|
||||
*/
|
||||
@@ -0,0 +1,201 @@
|
||||
import { create } from 'zustand';
|
||||
import type { FileWithHandle } from 'browser-fs-access';
|
||||
|
||||
import type { ComposerOutputMultiPart } from '../composer.types';
|
||||
import { attachmentPerformConversion, attachmentCreate, attachmentDefineConverters, attachmentLoadInputAsync } from './pipeline';
|
||||
|
||||
|
||||
// Attachment Types
|
||||
|
||||
export type AttachmentSourceOriginDTO = 'drop' | 'paste';
|
||||
export type AttachmentSourceOriginFile = 'camera' | 'file-open' | 'clipboard-read' | AttachmentSourceOriginDTO;
|
||||
|
||||
export type AttachmentSource = {
|
||||
media: 'url';
|
||||
url: string;
|
||||
refUrl: string;
|
||||
} | {
|
||||
media: 'file';
|
||||
origin: AttachmentSourceOriginFile,
|
||||
fileWithHandle: FileWithHandle;
|
||||
refPath: string;
|
||||
} | {
|
||||
media: 'text';
|
||||
method: 'clipboard-read' | AttachmentSourceOriginDTO;
|
||||
textPlain?: string;
|
||||
textHtml?: string;
|
||||
};
|
||||
|
||||
|
||||
export type AttachmentInput = {
|
||||
mimeType: string; // Original MIME type of the file
|
||||
data: string | ArrayBuffer; // The original data of the attachment
|
||||
dataSize: number; // Size of the original data in bytes
|
||||
altMimeType?: string; // Alternative MIME type for the input
|
||||
altData?: string; // Alternative data for the input
|
||||
// preview?: AttachmentPreview; // Preview of the input
|
||||
};
|
||||
|
||||
|
||||
export type AttachmentConverterType =
|
||||
| 'text' | 'rich-text' | 'rich-text-table'
|
||||
| 'pdf-text' | 'pdf-images'
|
||||
| 'image' | 'image-ocr'
|
||||
| 'unhandled';
|
||||
|
||||
export type AttachmentConverter = {
|
||||
id: AttachmentConverterType;
|
||||
name: string;
|
||||
disabled?: boolean;
|
||||
unsupported?: boolean;
|
||||
// outputType: ComposerOutputPartType; // The type of the output after conversion
|
||||
// isAutonomous: boolean; // Whether the conversion does not require user input
|
||||
// isAsync: boolean; // Whether the conversion is asynchronous
|
||||
// progress: number; // Conversion progress percentage (0..1)
|
||||
// errorMessage?: string; // Error message if the conversion failed
|
||||
}
|
||||
|
||||
|
||||
export type AttachmentId = string;
|
||||
|
||||
export type Attachment = {
|
||||
readonly id: AttachmentId;
|
||||
readonly source: AttachmentSource,
|
||||
label: string;
|
||||
ref: string;
|
||||
|
||||
inputLoading: boolean;
|
||||
inputError: string | null;
|
||||
input?: AttachmentInput;
|
||||
|
||||
// options to convert the input
|
||||
converters: AttachmentConverter[]; // List of available converters for this attachment
|
||||
converterIdx: number | null; // Index of the selected converter
|
||||
|
||||
outputsConverting: boolean;
|
||||
outputs: ComposerOutputMultiPart; // undefined: not yet converted, []: conversion failed, [ {}+ ]: conversion succeeded
|
||||
|
||||
// metadata: {
|
||||
// size?: number; // Size of the attachment in bytes
|
||||
// creationDate?: Date; // Creation date of the file
|
||||
// modifiedDate?: Date; // Last modified date of the file
|
||||
// altText?: string; // Alternative text for images for screen readers
|
||||
// };
|
||||
};
|
||||
|
||||
|
||||
/*export type AttachmentPreview = {
|
||||
renderer: 'noPreview',
|
||||
title: string; // A title for the preview
|
||||
} | {
|
||||
renderer: 'textPreview'
|
||||
fileName: string; // The name of the file
|
||||
snippet: string; // A text snippet for documents
|
||||
tooltip?: string; // A tooltip for the preview
|
||||
} | {
|
||||
renderer: 'imagePreview'
|
||||
thumbnail: string; // A thumbnail preview for images, videos, etc.
|
||||
tooltip?: string; // A tooltip for the preview
|
||||
};*/
|
||||
|
||||
|
||||
/// Store
|
||||
|
||||
interface AttachmentsStore {
|
||||
|
||||
attachments: Attachment[];
|
||||
|
||||
createAttachment: (source: AttachmentSource) => Promise<void>;
|
||||
clearAttachments: () => void;
|
||||
removeAttachment: (attachmentId: AttachmentId) => void;
|
||||
moveAttachment: (attachmentId: AttachmentId, delta: 1 | -1) => void;
|
||||
setConverterIdx: (attachmentId: AttachmentId, converterIdx: number | null) => Promise<void>;
|
||||
|
||||
_editAttachment: (attachmentId: AttachmentId, update: Partial<Attachment> | ((attachment: Attachment) => Partial<Attachment>)) => void;
|
||||
_getAttachment: (attachmentId: AttachmentId) => Attachment | undefined;
|
||||
|
||||
}
|
||||
|
||||
export const useAttachmentsStore = create<AttachmentsStore>()(
|
||||
(_set, _get) => ({
|
||||
|
||||
attachments: [],
|
||||
|
||||
createAttachment: async (source: AttachmentSource) => {
|
||||
const { attachments, _getAttachment, _editAttachment, setConverterIdx } = _get();
|
||||
|
||||
const attachment = attachmentCreate(source, attachments.map(a => a.id));
|
||||
|
||||
_set({
|
||||
attachments: [...attachments, attachment],
|
||||
});
|
||||
|
||||
const editFn = (changes: Partial<Attachment>) => _editAttachment(attachment.id, changes);
|
||||
|
||||
// 1.Resolve the Input
|
||||
await attachmentLoadInputAsync(source, editFn);
|
||||
const loaded = _getAttachment(attachment.id);
|
||||
if (!loaded || !loaded.input)
|
||||
return;
|
||||
|
||||
// 2. Define the I->O Converters
|
||||
attachmentDefineConverters(source.media, loaded.input, editFn);
|
||||
const defined = _getAttachment(attachment.id);
|
||||
if (!defined || !defined.converters.length || defined.converterIdx !== null)
|
||||
return;
|
||||
|
||||
// 3. Select the first Converter
|
||||
const firstEnabledIndex = defined.converters.findIndex(_c => !_c.disabled);
|
||||
await setConverterIdx(attachment.id, firstEnabledIndex > -1 ? firstEnabledIndex : 0);
|
||||
},
|
||||
|
||||
clearAttachments: () => _set({
|
||||
attachments: [],
|
||||
}),
|
||||
|
||||
removeAttachment: (attachmentId: AttachmentId) =>
|
||||
_set(state => ({
|
||||
attachments: state.attachments.filter(attachment => attachment.id !== attachmentId),
|
||||
})),
|
||||
|
||||
moveAttachment: (attachmentId: AttachmentId, delta: 1 | -1) =>
|
||||
_set(state => {
|
||||
const attachments = [...state.attachments];
|
||||
const currentIdx = attachments.findIndex(a => a.id === attachmentId);
|
||||
|
||||
// If the attachment is not found, or if trying to move beyond the array boundaries, no move is needed
|
||||
if (currentIdx === -1 || (currentIdx === 0 && delta === -1) || (currentIdx === attachments.length - 1 && delta === 1))
|
||||
return state;
|
||||
|
||||
// Swap the attachment with the adjacent one in the direction of delta
|
||||
const targetIdx = currentIdx + delta;
|
||||
[attachments[currentIdx], attachments[targetIdx]] = [attachments[targetIdx], attachments[currentIdx]];
|
||||
|
||||
return { attachments };
|
||||
}),
|
||||
|
||||
setConverterIdx: async (attachmentId: AttachmentId, converterIdx: number | null) => {
|
||||
const { _getAttachment, _editAttachment } = _get();
|
||||
const attachment = _getAttachment(attachmentId);
|
||||
if (!attachment || attachment.converterIdx === converterIdx)
|
||||
return;
|
||||
|
||||
const editFn = (changes: Partial<Attachment>) => _editAttachment(attachmentId, changes);
|
||||
|
||||
await attachmentPerformConversion(attachment, converterIdx, editFn);
|
||||
},
|
||||
|
||||
_editAttachment: (attachmentId: AttachmentId, update: Partial<Attachment> | ((attachment: Attachment) => Partial<Attachment>)) =>
|
||||
_set(state => ({
|
||||
attachments: state.attachments.map((attachment: Attachment): Attachment =>
|
||||
attachment.id === attachmentId
|
||||
? { ...attachment, ...(typeof update === 'function' ? update(attachment) : update) }
|
||||
: attachment,
|
||||
),
|
||||
})),
|
||||
|
||||
_getAttachment: (attachmentId: AttachmentId) =>
|
||||
_get().attachments.find(a => a.id === attachmentId),
|
||||
|
||||
}),
|
||||
);
|
||||
@@ -0,0 +1,165 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
import type { FileWithHandle } from 'browser-fs-access';
|
||||
|
||||
import { addSnackbar } from '~/common/components/useSnackbarsStore';
|
||||
import { asValidURL } from '~/common/util/urlUtils';
|
||||
import { extractFilePathsWithCommonRadix } from '~/common/util/dropTextUtils';
|
||||
import { getClipboardItems } from '~/common/util/clipboardUtils';
|
||||
|
||||
import { AttachmentSourceOriginDTO, AttachmentSourceOriginFile, useAttachmentsStore } from './store-attachments';
|
||||
|
||||
|
||||
export const useAttachments = (enableLoadURLs: boolean) => {
|
||||
|
||||
// state
|
||||
|
||||
const { attachments, clearAttachments, createAttachment, removeAttachment } = useAttachmentsStore(state => ({
|
||||
attachments: state.attachments,
|
||||
clearAttachments: state.clearAttachments,
|
||||
createAttachment: state.createAttachment,
|
||||
removeAttachment: state.removeAttachment,
|
||||
}), shallow);
|
||||
|
||||
|
||||
// Creation helpers
|
||||
|
||||
const attachAppendFile = React.useCallback((origin: AttachmentSourceOriginFile, fileWithHandle: FileWithHandle, overrideFileName?: string) =>
|
||||
createAttachment({
|
||||
media: 'file', origin, fileWithHandle, refPath: overrideFileName || fileWithHandle.name,
|
||||
})
|
||||
, [createAttachment]);
|
||||
|
||||
|
||||
const attachAppendDataTransfer = React.useCallback((dt: DataTransfer, method: AttachmentSourceOriginDTO, attachText: boolean): 'as_files' | 'as_url' | 'as_text' | false => {
|
||||
|
||||
// attach File(s)
|
||||
if (dt.files.length >= 1) {
|
||||
// rename files from a common prefix, to better relate them (if the transfer contains a list of paths)
|
||||
let overrideFileNames: string[] = [];
|
||||
if (dt.types.includes('text/plain')) {
|
||||
const plainText = dt.getData('text/plain');
|
||||
overrideFileNames = extractFilePathsWithCommonRadix(plainText);
|
||||
}
|
||||
const overrideNames = overrideFileNames.length === dt.files.length;
|
||||
|
||||
// attach as Files (paste and drop keep the original filename)
|
||||
for (let i = 0; i < dt.files.length; i++) {
|
||||
const file = dt.files[i];
|
||||
// drag/drop of folders (or .tsx from IntelliJ) will have no type
|
||||
if (!file.type) {
|
||||
// NOTE: we are fixing it in attachmentLoadInputAsync, but would be better to do it here
|
||||
}
|
||||
void attachAppendFile(method, file, overrideNames ? overrideFileNames[i] || undefined : undefined);
|
||||
}
|
||||
return 'as_files';
|
||||
}
|
||||
|
||||
// attach as URL
|
||||
const textPlain = dt.getData('text/plain') || '';
|
||||
if (textPlain && enableLoadURLs) {
|
||||
const textPlainUrl = asValidURL(textPlain);
|
||||
if (textPlainUrl && textPlainUrl) {
|
||||
void createAttachment({
|
||||
media: 'url', url: textPlainUrl, refUrl: textPlain,
|
||||
});
|
||||
return 'as_url';
|
||||
}
|
||||
}
|
||||
|
||||
// attach as Text/Html (further conversion, e.g. to markdown is done later)
|
||||
const textHtml = dt.getData('text/html') || '';
|
||||
if (attachText && (textHtml || textPlain)) {
|
||||
void createAttachment({
|
||||
media: 'text', method, textPlain, textHtml,
|
||||
});
|
||||
return 'as_text';
|
||||
}
|
||||
|
||||
if (attachText)
|
||||
console.warn(`Unhandled '${method}' attachment: `, dt.types?.map(t => `${t}: ${dt.getData(t)}`));
|
||||
|
||||
// did not attach anything from this data transfer
|
||||
return false;
|
||||
}, [attachAppendFile, createAttachment, enableLoadURLs]);
|
||||
|
||||
|
||||
const attachAppendClipboardItems = React.useCallback(async () => {
|
||||
|
||||
// if there's an issue accessing the clipboard, show it passively
|
||||
const clipboardItems = await getClipboardItems();
|
||||
if (clipboardItems === null) {
|
||||
addSnackbar({
|
||||
key: 'clipboard-issue',
|
||||
type: 'issue',
|
||||
message: 'Clipboard empty or access denied',
|
||||
overrides: {
|
||||
autoHideDuration: 2000,
|
||||
},
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// loop on all the possible attachments
|
||||
for (const clipboardItem of clipboardItems) {
|
||||
|
||||
// attach as image
|
||||
let imageAttached = false;
|
||||
for (const mimeType of clipboardItem.types) {
|
||||
if (mimeType.startsWith('image/')) {
|
||||
try {
|
||||
const imageBlob = await clipboardItem.getType(mimeType);
|
||||
const imageName = mimeType.replace('image/', 'clipboard.').replaceAll('/', '.') || 'clipboard.png';
|
||||
const imageFile = new File([imageBlob], imageName, { type: mimeType });
|
||||
void attachAppendFile('clipboard-read', imageFile);
|
||||
imageAttached = true;
|
||||
} catch (error) {
|
||||
// ignore getType error..
|
||||
}
|
||||
}
|
||||
}
|
||||
if (imageAttached)
|
||||
continue;
|
||||
|
||||
// get the Plain text
|
||||
const textPlain = clipboardItem.types.includes('text/plain') ? await clipboardItem.getType('text/plain').then(blob => blob.text()) : '';
|
||||
|
||||
// attach as URL
|
||||
if (textPlain && enableLoadURLs) {
|
||||
const textPlainUrl = asValidURL(textPlain);
|
||||
if (textPlainUrl && textPlainUrl.trim()) {
|
||||
void createAttachment({
|
||||
media: 'url', url: textPlainUrl.trim(), refUrl: textPlain,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// attach as Text
|
||||
const textHtml = clipboardItem.types.includes('text/html') ? await clipboardItem.getType('text/html').then(blob => blob.text()) : '';
|
||||
if (textHtml || textPlain) {
|
||||
void createAttachment({
|
||||
media: 'text', method: 'clipboard-read', textPlain, textHtml,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
console.warn('Clipboard item has no text/html or text/plain item.', clipboardItem.types, clipboardItem);
|
||||
}
|
||||
}, [attachAppendFile, createAttachment, enableLoadURLs]);
|
||||
|
||||
|
||||
return {
|
||||
// state
|
||||
attachments,
|
||||
|
||||
// create attachments
|
||||
attachAppendClipboardItems,
|
||||
attachAppendDataTransfer,
|
||||
attachAppendFile,
|
||||
|
||||
// manage attachments
|
||||
clearAttachments,
|
||||
removeAttachment,
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,147 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { DLLMId } from '~/modules/llms/store-llms';
|
||||
|
||||
import { countModelTokens } from '~/common/util/token-counter';
|
||||
|
||||
import type { Attachment, AttachmentId } from './store-attachments';
|
||||
import type { ComposerOutputMultiPart, ComposerOutputPartType } from '../composer.types';
|
||||
|
||||
|
||||
export interface LLMAttachments {
|
||||
attachments: LLMAttachment[];
|
||||
getAttachmentOutputs: (initialTextBlockText: string | null, attachmentId: AttachmentId) => ComposerOutputMultiPart;
|
||||
getAttachmentsOutputs: (initialTextBlockText: string | null) => ComposerOutputMultiPart;
|
||||
isOutputAttacheable: boolean;
|
||||
isOutputTextInlineable: boolean;
|
||||
tokenCountApprox: number;
|
||||
}
|
||||
|
||||
export interface LLMAttachment {
|
||||
attachment: Attachment;
|
||||
attachmentOutputs: ComposerOutputMultiPart;
|
||||
isUnconvertible: boolean;
|
||||
isOutputMissing: boolean;
|
||||
isOutputAttachable: boolean;
|
||||
isOutputTextInlineable: boolean;
|
||||
tokenCountApprox: number | null;
|
||||
}
|
||||
|
||||
|
||||
export function useLLMAttachments(attachments: Attachment[], chatLLMId: DLLMId | null): LLMAttachments {
|
||||
return React.useMemo(() => {
|
||||
|
||||
// HACK: in the future, switch to LLM capabilities (LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, etc.)
|
||||
const supportsImages = !!chatLLMId?.endsWith('-vision-preview');
|
||||
const supportedOutputPartTypes: ComposerOutputPartType[] = supportsImages ? ['text-block', 'image-part'] : ['text-block'];
|
||||
|
||||
const llmAttachments = attachments.map(attachment => toLLMAttachment(attachment, supportedOutputPartTypes, chatLLMId));
|
||||
|
||||
const getAttachmentOutputs = (initialTextBlockText: string | null, attachmentId: AttachmentId): ComposerOutputMultiPart => {
|
||||
// get outputs of a specific attachment
|
||||
const outputs = attachments.find(a => a.id === attachmentId)?.outputs || [];
|
||||
return attachmentCollapseOutputs(initialTextBlockText, outputs);
|
||||
};
|
||||
|
||||
const getAttachmentsOutputs = (initialTextBlockText: string | null): ComposerOutputMultiPart => {
|
||||
// accumulate all outputs of all attachments
|
||||
const allOutputs = llmAttachments.reduce((acc, a) => acc.concat(a.attachment.outputs), [] as ComposerOutputMultiPart);
|
||||
return attachmentCollapseOutputs(initialTextBlockText, allOutputs);
|
||||
};
|
||||
|
||||
return {
|
||||
attachments: llmAttachments,
|
||||
getAttachmentOutputs,
|
||||
getAttachmentsOutputs,
|
||||
isOutputAttacheable: llmAttachments.every(a => a.isOutputAttachable),
|
||||
isOutputTextInlineable: llmAttachments.every(a => a.isOutputTextInlineable),
|
||||
tokenCountApprox: llmAttachments.reduce((acc, a) => acc + (a.tokenCountApprox || 0), 0),
|
||||
};
|
||||
}, [attachments, chatLLMId]);
|
||||
}
|
||||
|
||||
export function getTextBlockText(outputs: ComposerOutputMultiPart): string | null {
|
||||
const textOutputs = outputs.filter(part => part.type === 'text-block');
|
||||
return (textOutputs.length === 1 && textOutputs[0].type === 'text-block') ? textOutputs[0].text : null;
|
||||
}
|
||||
|
||||
|
||||
function toLLMAttachment(attachment: Attachment, supportedOutputPartTypes: ComposerOutputPartType[], llmForTokenCount: DLLMId | null): LLMAttachment {
|
||||
const { converters, outputs } = attachment;
|
||||
|
||||
const isUnconvertible = converters.length === 0;
|
||||
const isOutputMissing = outputs.length === 0;
|
||||
const isOutputAttachable = areAllOutputsSupported(outputs, supportedOutputPartTypes);
|
||||
const isOutputTextInlineable = areAllOutputsSupported(outputs, supportedOutputPartTypes.filter(pt => pt === 'text-block'));
|
||||
|
||||
const attachmentOutputs = attachmentCollapseOutputs(null, outputs);
|
||||
const tokenCountApprox = llmForTokenCount
|
||||
? attachmentOutputs.reduce((acc, output) => {
|
||||
if (output.type === 'text-block')
|
||||
return acc + countModelTokens(output.text, llmForTokenCount, 'attachments tokens count');
|
||||
console.warn('Unhandled token preview for output type:', output.type);
|
||||
return acc;
|
||||
}, 0)
|
||||
: null;
|
||||
|
||||
return {
|
||||
attachment,
|
||||
attachmentOutputs,
|
||||
isUnconvertible,
|
||||
isOutputMissing,
|
||||
isOutputAttachable,
|
||||
isOutputTextInlineable,
|
||||
tokenCountApprox,
|
||||
};
|
||||
}
|
||||
|
||||
function areAllOutputsSupported(outputs: ComposerOutputMultiPart, supportedOutputPartTypes: ComposerOutputPartType[]) {
|
||||
return outputs.length
|
||||
? outputs.every(output => supportedOutputPartTypes.includes(output.type))
|
||||
: false;
|
||||
}
|
||||
|
||||
function attachmentCollapseOutputs(initialTextBlockText: string | null, outputs: ComposerOutputMultiPart): ComposerOutputMultiPart {
|
||||
const accumulatedOutputs: ComposerOutputMultiPart = [];
|
||||
|
||||
// if there's initial text, make it a collapsible default (unquited) text block
|
||||
if (initialTextBlockText !== null) {
|
||||
accumulatedOutputs.push({
|
||||
type: 'text-block',
|
||||
text: initialTextBlockText,
|
||||
title: null,
|
||||
collapsible: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Accumulate attachment outputs of the same type and 'collapsible' into a single object of that type.
|
||||
for (const output of outputs) {
|
||||
const last = accumulatedOutputs[accumulatedOutputs.length - 1];
|
||||
|
||||
// accumulationg over an existing part of the same type
|
||||
if (last && last.type === output.type && output.collapsible) {
|
||||
switch (last.type) {
|
||||
case 'text-block':
|
||||
last.text += `\n\n\`\`\`${output.title}\n${output.text}\n\`\`\``;
|
||||
break;
|
||||
default:
|
||||
console.warn('Unhandled collapsing for output type:', output.type);
|
||||
}
|
||||
}
|
||||
// start a new part
|
||||
else {
|
||||
if (output.type === 'text-block') {
|
||||
accumulatedOutputs.push({
|
||||
type: 'text-block',
|
||||
text: `\n\n\`\`\`${output.title}\n${output.text}\n\`\`\``,
|
||||
title: null,
|
||||
collapsible: false,
|
||||
});
|
||||
} else {
|
||||
accumulatedOutputs.push(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return accumulatedOutputs;
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
export type ComposerOutputPartType = 'text-block' | 'image-part';
|
||||
|
||||
export type ComposerOutputPart = {
|
||||
type: 'text-block',
|
||||
text: string,
|
||||
title: string | null,
|
||||
collapsible: boolean,
|
||||
} | {
|
||||
// TODO: not implemented yet
|
||||
type: 'image-part',
|
||||
base64Url: string,
|
||||
collapsible: false,
|
||||
};
|
||||
|
||||
export type ComposerOutputMultiPart = ComposerOutputPart[];
|
||||
@@ -20,6 +20,7 @@ import ReplayIcon from '@mui/icons-material/Replay';
|
||||
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
|
||||
import SmartToyOutlinedIcon from '@mui/icons-material/SmartToyOutlined';
|
||||
import TelegramIcon from '@mui/icons-material/Telegram';
|
||||
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
|
||||
|
||||
import { CloseableMenu } from '~/common/components/CloseableMenu';
|
||||
import { DMessage } from '~/common/state/store-chats';
|
||||
@@ -205,6 +206,7 @@ export function ChatMessage(props: {
|
||||
isImagining?: boolean, isSpeaking?: boolean,
|
||||
onConversationBranch?: (messageId: string) => void,
|
||||
onConversationRestartFrom?: (messageId: string, offset: number) => void,
|
||||
onConversationTruncate?: (messageId: string) => void,
|
||||
onMessageDelete?: (messageId: string) => void,
|
||||
onMessageEdit?: (messageId: string, text: string) => void,
|
||||
onTextDiagram?: (messageId: string, text: string) => Promise<void>
|
||||
@@ -326,7 +328,12 @@ export function ChatMessage(props: {
|
||||
}
|
||||
};
|
||||
|
||||
const handleOpsDelete = (e: React.MouseEvent) => {
|
||||
const handleOpsTruncate = (_e: React.MouseEvent) => {
|
||||
props.onConversationTruncate && props.onConversationTruncate(messageId);
|
||||
closeOperationsMenu();
|
||||
};
|
||||
|
||||
const handleOpsDelete = (_e: React.MouseEvent) => {
|
||||
props.onMessageDelete && props.onMessageDelete(messageId);
|
||||
};
|
||||
|
||||
@@ -518,7 +525,7 @@ export function ChatMessage(props: {
|
||||
? <RenderLatex key={'latex-' + index} latexBlock={block} />
|
||||
: block.type === 'diff'
|
||||
? <RenderTextDiff key={'latex-' + index} diffBlock={block} />
|
||||
: (renderMarkdown && props.noMarkdown !== true && !fromSystem)
|
||||
: (renderMarkdown && props.noMarkdown !== true && !fromSystem && !(fromUser && block.content.startsWith('/')))
|
||||
? <RenderMarkdown key={'text-md-' + index} textBlock={block} />
|
||||
: <RenderText key={'text-' + index} textBlock={block} />)}
|
||||
|
||||
@@ -554,7 +561,7 @@ export function ChatMessage(props: {
|
||||
{/* Operations Menu (3 dots) */}
|
||||
{!!opsMenuAnchor && (
|
||||
<CloseableMenu
|
||||
placement='bottom-end' sx={{ minWidth: 280 }}
|
||||
dense placement='bottom-end' sx={{ minWidth: 280 }}
|
||||
open anchorEl={opsMenuAnchor} onClose={closeOperationsMenu}
|
||||
>
|
||||
<Box sx={{ display: 'flex', alignItems: 'center' }}>
|
||||
@@ -615,10 +622,16 @@ export function ChatMessage(props: {
|
||||
Speak
|
||||
</MenuItem>}
|
||||
{!!props.onConversationRestartFrom && <ListDivider />}
|
||||
{!!props.onConversationTruncate && (
|
||||
<MenuItem onClick={handleOpsTruncate} disabled={props.isBottom}>
|
||||
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
|
||||
Truncate <span style={{ opacity: 0.5 }}>after</span>
|
||||
</MenuItem>
|
||||
)}
|
||||
{!!props.onMessageDelete && (
|
||||
<MenuItem onClick={handleOpsDelete} disabled={false /*fromSystem*/}>
|
||||
<ListItemDecorator><ClearIcon /></ListItemDecorator>
|
||||
Delete
|
||||
Delete <span style={{ opacity: 0.5 }}>message</span>
|
||||
</MenuItem>
|
||||
)}
|
||||
</CloseableMenu>
|
||||
@@ -627,12 +640,12 @@ export function ChatMessage(props: {
|
||||
{/* Selection (Contextual) Menu */}
|
||||
{!!selMenuAnchor && (
|
||||
<CloseableMenu
|
||||
placement='bottom-start' sx={{ minWidth: 220 }}
|
||||
dense placement='bottom-start' sx={{ minWidth: 220 }}
|
||||
open anchorEl={selMenuAnchor} onClose={closeSelectionMenu}
|
||||
>
|
||||
<MenuItem onClick={handleOpsCopy} sx={{ flex: 1 }}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
Copy selection
|
||||
Copy <span style={{ opacity: 0.5 }}>selection</span>
|
||||
</MenuItem>
|
||||
{!!props.onTextDiagram && <MenuItem onClick={handleOpsDiagram} disabled={!couldDiagram || props.isImagining}>
|
||||
<ListItemDecorator><AccountTreeIcon color='success' /></ListItemDecorator>
|
||||
|
||||
@@ -6,7 +6,7 @@ import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
|
||||
import { DMessage } from '~/common/state/store-chats';
|
||||
|
||||
import { TokenBadge } from '../composer/TokenBadge';
|
||||
import { TokenBadgeMemo } from '../composer/TokenBadge';
|
||||
import { makeAvatar, messageBackground } from './ChatMessage';
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ export function CleanerMessage(props: { message: DMessage, isBottom: boolean, se
|
||||
</Typography>
|
||||
|
||||
{props.remainingTokens !== undefined && <Box sx={{ display: 'flex', minWidth: { xs: 32, sm: 45 }, justifyContent: 'flex-end' }}>
|
||||
<TokenBadge directTokens={messageTokenCount} tokenLimit={props.remainingTokens} inline />
|
||||
<TokenBadgeMemo direct={messageTokenCount} limit={props.remainingTokens} inline />
|
||||
</Box>}
|
||||
|
||||
<Typography level='body-md' sx={{
|
||||
|
||||
@@ -18,13 +18,13 @@ export const runBrowseUpdatingState = async (conversationId: string, url: string
|
||||
|
||||
try {
|
||||
|
||||
const text = await callBrowseFetchPage(url);
|
||||
if (!text) {
|
||||
const page = await callBrowseFetchPage(url);
|
||||
if (!page.content) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error('No text found.');
|
||||
}
|
||||
updateAssistantMessage({
|
||||
text: text,
|
||||
text: page.content,
|
||||
typing: false,
|
||||
});
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ interface AppChatStore {
|
||||
autoTitleChat: boolean;
|
||||
setAutoTitleChat: (autoTitleChat: boolean) => void;
|
||||
|
||||
micTimeoutMs: number;
|
||||
setMicTimeoutMs: (micTimeoutMs: number) => void;
|
||||
|
||||
showTextDiff: boolean;
|
||||
setShowTextDiff: (showTextDiff: boolean) => void;
|
||||
|
||||
@@ -46,6 +49,9 @@ const useAppChatStore = create<AppChatStore>()(persist(
|
||||
autoTitleChat: true,
|
||||
setAutoTitleChat: (autoTitleChat: boolean) => _set({ autoTitleChat }),
|
||||
|
||||
micTimeoutMs: 2000,
|
||||
setMicTimeoutMs: (micTimeoutMs: number) => _set({ micTimeoutMs }),
|
||||
|
||||
showTextDiff: false,
|
||||
setShowTextDiff: (showTextDiff: boolean) => _set({ showTextDiff }),
|
||||
|
||||
@@ -91,6 +97,12 @@ export const getChatAutoAI = (): {
|
||||
autoTitleChat: boolean,
|
||||
} => useAppChatStore.getState();
|
||||
|
||||
export const useChatMicTimeoutMsValue = (): number =>
|
||||
useAppChatStore(state => state.micTimeoutMs);
|
||||
|
||||
export const useChatMicTimeoutMs = (): [number, (micTimeoutMs: number) => void] =>
|
||||
useAppChatStore(state => [state.micTimeoutMs, state.setMicTimeoutMs], shallow);
|
||||
|
||||
export const useChatShowTextDiff = (): [boolean, (showDiff: boolean) => void] =>
|
||||
useAppChatStore(state => [state.showTextDiff, state.setShowTextDiff], shallow);
|
||||
|
||||
|
||||
+37
-14
@@ -1,19 +1,39 @@
|
||||
import * as React from 'react';
|
||||
import { keyframes } from '@emotion/react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import { Box, Button, Card, CardContent, Container, IconButton, Typography } from '@mui/joy';
|
||||
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
|
||||
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { ROUTE_INDEX } from '~/common/app.routes';
|
||||
import { capitalizeFirstLetter } from '~/common/util/textUtils';
|
||||
|
||||
import { newsCallout, NewsItems } from './news.data';
|
||||
|
||||
// number of news items to show by default, before the expander
|
||||
const DEFAULT_NEWS_COUNT = 2;
|
||||
|
||||
export const cssColorKeyframes = keyframes`
|
||||
0%, 100% {
|
||||
color: #636B74; /* Neutral main color (500) */
|
||||
}
|
||||
25% {
|
||||
color: #12467B; /* Primary darker shade (700) */
|
||||
}
|
||||
50% {
|
||||
color: #0B6BCB; /* Primary main color (500) */
|
||||
}
|
||||
75% {
|
||||
color: #97C3F0; /* Primary lighter shade (300) */
|
||||
}`;
|
||||
|
||||
|
||||
export function AppNews() {
|
||||
// state
|
||||
const [lastNewsIdx, setLastNewsIdx] = React.useState<number>(0);
|
||||
const [lastNewsIdx, setLastNewsIdx] = React.useState<number>(DEFAULT_NEWS_COUNT - 1);
|
||||
|
||||
// news selection
|
||||
const news = NewsItems.filter((_, idx) => idx <= lastNewsIdx);
|
||||
@@ -36,11 +56,11 @@ export function AppNews() {
|
||||
}}>
|
||||
|
||||
<Typography level='h1' sx={{ fontSize: '3rem' }}>
|
||||
Welcome to {Brand.Title.Base} {firstNews?.versionName}!
|
||||
Welcome to {Brand.Title.Base} <Box component='span' sx={{ animation: `${cssColorKeyframes} 10s infinite` }}>{firstNews?.versionCode}</Box>!
|
||||
</Typography>
|
||||
|
||||
<Typography>
|
||||
{capitalizeFirstLetter(Brand.Title.Base)} has been updated to version {firstNews?.versionName}.
|
||||
{capitalizeFirstLetter(Brand.Title.Base)} has been updated to version {firstNews?.versionCode}
|
||||
</Typography>
|
||||
|
||||
<Box>
|
||||
@@ -61,27 +81,30 @@ export function AppNews() {
|
||||
const firstCard = idx === 0;
|
||||
const hasCardAfter = news.length < NewsItems.length;
|
||||
const showExpander = hasCardAfter && (idx === news.length - 1);
|
||||
const addPadding = !firstCard; // || showExpander;
|
||||
const addPadding = false; //!firstCard; // || showExpander;
|
||||
return <Card key={'news-' + idx} sx={{ mb: 2, minHeight: 32 }}>
|
||||
<CardContent sx={{ position: 'relative', pr: addPadding ? 4 : 0 }}>
|
||||
{!!ni.text && <Typography level='title-lg' component='div'>
|
||||
{ni.text}
|
||||
</Typography>}
|
||||
<Box sx={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', gap: 1 }}>
|
||||
<GoodTooltip title={ni.versionName || null} placement='top-start'>
|
||||
<Typography level='title-sm' component='div' sx={{ flexGrow: 1 }}>
|
||||
{ni.text ? ni.text : ni.versionName ? `${ni.versionCode} · ${ni.versionName}` : `Version ${ni.versionCode}:`}
|
||||
</Typography>
|
||||
</GoodTooltip>
|
||||
{/*!firstCard &&*/ (
|
||||
<Typography level='body-sm'>
|
||||
{!!ni.versionDate && <TimeAgo date={ni.versionDate} />}
|
||||
</Typography>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{!!ni.items && (ni.items.length > 0) && <ul style={{ marginTop: 8, marginBottom: 8, paddingInlineStart: 24 }}>
|
||||
{ni.items.filter(item => item.dev !== true).map((item, idx) => <li key={idx}>
|
||||
<Typography component='div'>
|
||||
<Typography component='div' level='body-sm'>
|
||||
{item.text}
|
||||
</Typography>
|
||||
</li>)}
|
||||
</ul>}
|
||||
|
||||
{/*!firstCard &&*/ (
|
||||
<Typography level='body-sm' sx={{ position: 'absolute', right: 0, top: 0 }}>
|
||||
{ni.versionName}
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
{showExpander && (
|
||||
<IconButton
|
||||
variant='plain' size='sm'
|
||||
|
||||
+34
-12
@@ -6,10 +6,11 @@ import LaunchIcon from '@mui/icons-material/Launch';
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { clientUtmSource } from '~/common/util/pwaUtils';
|
||||
import { platformAwareKeystrokes } from '~/common/components/KeyStroke';
|
||||
|
||||
|
||||
// update this variable every time you want to broadcast a new version to clients
|
||||
export const incrementalVersion: number = 7;
|
||||
export const incrementalVersion: number = 8;
|
||||
|
||||
const B = (props: { href?: string, children: React.ReactNode }) => {
|
||||
const boldText = <Typography color={!!props.href ? 'primary' : 'warning'} sx={{ fontWeight: 600 }}>{props.children}</Typography>;
|
||||
@@ -26,7 +27,7 @@ const RIssues = `${OpenRepo}/issues`;
|
||||
export const newsCallout =
|
||||
<Card>
|
||||
<CardContent sx={{ gap: 2 }}>
|
||||
<Typography level='h3'>
|
||||
<Typography level='h4'>
|
||||
Open Roadmap
|
||||
</Typography>
|
||||
<Typography>
|
||||
@@ -66,12 +67,30 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},*/
|
||||
{
|
||||
versionName: '1.6.0',
|
||||
text: 'Surf\'s Up in Chat Waves:',
|
||||
versionCode: '1.7.1',
|
||||
versionName: 'Attachment Theory',
|
||||
versionDate: new Date('2023-12-11T06:00:00Z'), // new Date().toISOString()
|
||||
// versionDate: new Date('2023-12-10T12:00:00Z'), // 1.7.0
|
||||
items: [
|
||||
{ text: <>Redesigned <B href={RIssues + '/251'}>attachments system</B>: drag, paste, link, snap, images, text, pdfs</> },
|
||||
{ text: <>Desktop <B href={RIssues + '/253'}>webcam access</B> for direct image capture (Labs option)</> },
|
||||
{ text: <>Independent browsing with <B href={RCode + '/docs/config-browse.md'}>Browserless</B> support</> },
|
||||
{ text: <><B href={RIssues + '/256'}>Overheat</B> LLMs with higher temperature limits</> },
|
||||
{ text: <>Enhanced security via <B href={RCode + '/docs/deploy-authentication.md'}>password protection</B></> },
|
||||
{ text: <>{platformAwareKeystrokes('Ctrl+Shift+O')}: quick access to model options</> },
|
||||
{ text: <>Optimized voice input and performance</> },
|
||||
{ text: <>Latest Ollama and Oobabooga models</> },
|
||||
{ text: <>1.7.1: Improved <B href={RIssues + '/270'}>Ollama chats</B></> },
|
||||
],
|
||||
},
|
||||
{
|
||||
versionCode: '1.6.0',
|
||||
versionName: 'Surf\'s Up',
|
||||
versionDate: new Date('2023-11-28T21:00:00Z'),
|
||||
items: [
|
||||
{ text: <><B href={RIssues + '/237'}>Web Browsing</B> support, see the <B href={RCode + '/docs/config-browse.md'}>browsing user guide</B></> },
|
||||
{ text: <><B href={RIssues + '/235'}>Branching Discussions</B> at any message</> },
|
||||
{ text: <><B href={RIssues + '/207'}>Keyboard Navigation</B>: use Ctrl+Shift+Left/Right to navigate chats</> },
|
||||
{ text: <><B href={RIssues + '/207'}>Keyboard Navigation</B>: use {platformAwareKeystrokes('Ctrl+Shift+Left/Right')} to navigate chats</> },
|
||||
{ text: <><B href={RIssues + '/236'}>UI fixes</B> (thanks to the first sponsor)</> },
|
||||
{ text: <>Added support for Anthropic Claude 2.1</> },
|
||||
{ text: <>Large rendering performance optimization</> },
|
||||
@@ -80,8 +99,9 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
versionName: '1.5.0',
|
||||
text: 'Enjoy what\'s new:',
|
||||
versionCode: '1.5.0',
|
||||
versionName: 'Loaded!',
|
||||
versionDate: new Date('2023-11-19T21:00:00Z'),
|
||||
items: [
|
||||
{ text: <><B href={RIssues + '/190'}>Continued Voice</B> for hands-free interaction</> },
|
||||
{ text: <><B href={RIssues + '/192'}>Visualization</B> Tool for data representations</> },
|
||||
@@ -95,7 +115,7 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
versionName: '1.4.0',
|
||||
versionCode: '1.4.0',
|
||||
items: [
|
||||
{ text: <><B>Share and clone</B> conversations, with public links</> },
|
||||
{ text: <><B href={RCode + '/docs/config-azure-openai.md'}>Azure</B> models, incl. gpt-4-32k</> },
|
||||
@@ -105,7 +125,7 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
versionName: '1.3.5',
|
||||
versionCode: '1.3.5',
|
||||
items: [
|
||||
{ text: <>AI in the real world with <B>Camera OCR</B> - MOBILE-ONLY</> },
|
||||
{ text: <><B>Anthropic</B> models full support</> },
|
||||
@@ -116,7 +136,7 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
versionName: '1.3.1',
|
||||
versionCode: '1.3.1',
|
||||
items: [
|
||||
{ text: <><B>Flattener</B> - 4-mode conversations summarizer</> },
|
||||
{ text: <><B>Forking</B> - branch your conversations</> },
|
||||
@@ -126,7 +146,7 @@ export const NewsItems: NewsItem[] = [
|
||||
],
|
||||
},
|
||||
{
|
||||
versionName: '1.2.1',
|
||||
versionCode: '1.2.1',
|
||||
// text: '',
|
||||
items: [
|
||||
{ text: <>New home page: <b><Link href={Brand.URIs.Home + clientUtmSource()} target='_blank'>{Brand.URIs.Home.replace('https://', '')}</Link></b></> },
|
||||
@@ -138,7 +158,9 @@ export const NewsItems: NewsItem[] = [
|
||||
|
||||
|
||||
interface NewsItem {
|
||||
versionName: string;
|
||||
versionCode: string;
|
||||
versionName?: string;
|
||||
versionDate?: Date;
|
||||
text?: string | React.JSX.Element;
|
||||
items?: {
|
||||
text: string | React.JSX.Element;
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Button, FormControl, Radio, RadioGroup, Switch } from '@mui/joy';
|
||||
import { Button, FormControl, Switch } from '@mui/joy';
|
||||
import BuildCircleIcon from '@mui/icons-material/BuildCircle';
|
||||
import WidthNormalIcon from '@mui/icons-material/WidthNormal';
|
||||
import WidthWideIcon from '@mui/icons-material/WidthWide';
|
||||
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { FormRadioControl } from '~/common/components/forms/FormRadioControl';
|
||||
import { isPwa } from '~/common/util/pwaUtils';
|
||||
import { openLayoutModelsSetup } from '~/common/layout/store-applayout';
|
||||
import { useIsMobile } from '~/common/components/useMatchMedia';
|
||||
@@ -50,14 +51,10 @@ export function AppChatSettingsUI() {
|
||||
zenMode: state.zenMode, setZenMode: state.setZenMode,
|
||||
}), shallow);
|
||||
|
||||
const handleCenterModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setCenterMode(event.target.value as 'narrow' | 'wide' | 'full' || 'wide');
|
||||
|
||||
const handleEnterIsNewlineChange = (event: React.ChangeEvent<HTMLInputElement>) => setEnterIsNewline(!event.target.checked);
|
||||
|
||||
const handleDoubleClickToEditChange = (event: React.ChangeEvent<HTMLInputElement>) => setDoubleClickToEdit(event.target.checked);
|
||||
|
||||
const handleZenModeChange = (event: React.ChangeEvent<HTMLInputElement>) => setZenMode(event.target.value as 'clean' | 'cleaner');
|
||||
|
||||
const handleRenderMarkdownChange = (event: React.ChangeEvent<HTMLInputElement>) => setRenderMarkdown(event.target.checked);
|
||||
|
||||
const handleShowSearchBarChange = (event: React.ChangeEvent<HTMLInputElement>) => setShowPurposeFinder(event.target.checked);
|
||||
@@ -102,25 +99,27 @@ export function AppChatSettingsUI() {
|
||||
slotProps={{ endDecorator: { sx: { minWidth: 26 } } }} />
|
||||
</FormControl>}
|
||||
|
||||
<FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
|
||||
<FormLabelStart title='Appearance'
|
||||
description={zenMode === 'clean' ? 'Show senders' : 'Minimal UI'} />
|
||||
<RadioGroup orientation='horizontal' value={zenMode} onChange={handleZenModeChange}>
|
||||
{/*<Radio value='clean' label={<Face6Icon sx={{ width: 24, height: 24, mt: -0.25 }} />} />*/}
|
||||
<Radio value='clean' label='Clean' />
|
||||
<Radio value='cleaner' label='Zen' />
|
||||
</RadioGroup>
|
||||
</FormControl>
|
||||
<FormRadioControl
|
||||
title='Appearance'
|
||||
description={zenMode === 'clean' ? 'Show senders' : 'Minimal UI'}
|
||||
options={[
|
||||
{ label: 'Clean', value: 'clean' },
|
||||
{ label: 'Zen', value: 'cleaner' },
|
||||
]}
|
||||
value={zenMode} onChange={setZenMode} />
|
||||
|
||||
{!isPwa() && !isMobile && <FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
|
||||
<FormLabelStart title='Page Size'
|
||||
description={centerMode === 'full' ? 'Full screen chat' : centerMode === 'narrow' ? 'Narrow chat' : 'Wide'} />
|
||||
<RadioGroup orientation='horizontal' value={centerMode} onChange={handleCenterModeChange}>
|
||||
<Radio value='narrow' label={<WidthNormalIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
|
||||
<Radio value='wide' label={<WidthWideIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
|
||||
<Radio value='full' label='Full' />
|
||||
</RadioGroup>
|
||||
</FormControl>}
|
||||
{!isPwa() && !isMobile && (
|
||||
<FormRadioControl
|
||||
title='Page Size'
|
||||
description={centerMode === 'full' ? 'Full screen chat' : centerMode === 'narrow' ? 'Narrow chat' : 'Wide'}
|
||||
options={[
|
||||
{ value: 'narrow', label: <WidthNormalIcon sx={{ width: 25, height: 24, mt: -0.25 }} /> },
|
||||
{ value: 'wide', label: <WidthWideIcon sx={{ width: 25, height: 24, mt: -0.25 }} /> },
|
||||
{ value: 'full', label: 'Full' },
|
||||
]}
|
||||
value={centerMode} onChange={setCenterMode}
|
||||
/>
|
||||
)}
|
||||
|
||||
</>;
|
||||
}
|
||||
|
||||
@@ -26,8 +26,9 @@ const shortcutsMd = `
|
||||
| Ctrl + Alt + D | **Delete** chat |
|
||||
| Ctrl + Alt + B | **Branch** chat |
|
||||
| **Settings** | |
|
||||
| Ctrl + Shift + M | 🧠 Models |
|
||||
| Ctrl + Shift + P | ⚙️ Preferences |
|
||||
| Ctrl + Shift + M | 🧠 Models |
|
||||
| Ctrl + Shift + O | Options (current Chat Model) |
|
||||
| Ctrl + Shift + ? | Shortcuts |
|
||||
|
||||
`.trim();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { FormControl, Typography } from '@mui/joy';
|
||||
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import FormatPaintIcon from '@mui/icons-material/FormatPaint';
|
||||
import VerticalSplitIcon from '@mui/icons-material/VerticalSplit';
|
||||
@@ -9,36 +10,43 @@ import YouTubeIcon from '@mui/icons-material/YouTube';
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { FormSwitchControl } from '~/common/components/forms/FormSwitchControl';
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { useIsMobile } from '~/common/components/useMatchMedia';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
|
||||
|
||||
export function UxLabsSettings() {
|
||||
|
||||
// external state
|
||||
const isMobile = useIsMobile();
|
||||
const {
|
||||
labsCalling, /*labsEnhancedUI,*/ labsMagicDraw, labsPersonaYTCreator, labsSplitBranching,
|
||||
setLabsCalling, /*setLabsEnhancedUI,*/ setLabsMagicDraw, setLabsPersonaYTCreator, setLabsSplitBranching,
|
||||
labsCalling, labsCameraDesktop, /*labsEnhancedUI,*/ labsMagicDraw, labsPersonaYTCreator, labsSplitBranching,
|
||||
setLabsCalling, setLabsCameraDesktop, /*setLabsEnhancedUI,*/ setLabsMagicDraw, setLabsPersonaYTCreator, setLabsSplitBranching,
|
||||
} = useUXLabsStore();
|
||||
|
||||
return <>
|
||||
|
||||
<FormSwitchControl
|
||||
title={<><YouTubeIcon /> YouTube Personas</>} description={labsPersonaYTCreator ? 'Creator Enabled' : 'Disabled'}
|
||||
title={<><YouTubeIcon color={labsPersonaYTCreator ? 'primary' : undefined} sx={{ mr: 0.25 }} /> YouTube Personas</>} description={labsPersonaYTCreator ? 'Creator Enabled' : 'Disabled'}
|
||||
checked={labsPersonaYTCreator} onChange={setLabsPersonaYTCreator}
|
||||
/>
|
||||
|
||||
<FormSwitchControl
|
||||
title={<><FormatPaintIcon />Assisted Draw</>} description={labsMagicDraw ? 'Enabled' : 'Disabled'}
|
||||
title={<><FormatPaintIcon color={labsMagicDraw ? 'primary' : undefined} sx={{ mr: 0.25 }} />Assisted Draw</>} description={labsMagicDraw ? 'Enabled' : 'Disabled'}
|
||||
checked={labsMagicDraw} onChange={setLabsMagicDraw}
|
||||
/>
|
||||
|
||||
<FormSwitchControl
|
||||
title={<><CallIcon /> Voice Calls</>} description={labsCalling ? 'Call AGI' : 'Disabled'}
|
||||
title={<><CallIcon color={labsCalling ? 'primary' : undefined} sx={{ mr: 0.25 }} /> Voice Calls</>} description={labsCalling ? 'Call AGI' : 'Disabled'}
|
||||
checked={labsCalling} onChange={setLabsCalling}
|
||||
/>
|
||||
|
||||
{!isMobile && <FormSwitchControl
|
||||
title={<><AddAPhotoIcon color={labsCameraDesktop ? 'primary' : undefined} sx={{ mr: 0.25 }} /> Webcam</>} description={labsCameraDesktop ? 'Enabled' : 'Disabled'}
|
||||
checked={labsCameraDesktop} onChange={setLabsCameraDesktop}
|
||||
/>}
|
||||
|
||||
<FormSwitchControl
|
||||
title={<><VerticalSplitIcon /> Split Branching</>} description={labsSplitBranching ? 'Enabled' : 'Disabled'} disabled
|
||||
title={<><VerticalSplitIcon color={labsSplitBranching ? 'primary' : undefined} sx={{ mr: 0.25 }} /> Split Branching</>} description={labsSplitBranching ? 'Enabled' : 'Disabled'} disabled
|
||||
checked={labsSplitBranching} onChange={setLabsSplitBranching}
|
||||
/>
|
||||
|
||||
@@ -50,7 +58,7 @@ export function UxLabsSettings() {
|
||||
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
<FormLabelStart title='Graduated' />
|
||||
<Typography level='body-xs'>
|
||||
<Link href='https://github.com/enricoros/big-agi/issues/192' target='_blank'>Auto Diagrams</Link> · Relative chat size · Text Tools
|
||||
<Link href='https://github.com/enricoros/big-agi/issues/192' target='_blank'>Auto Diagrams</Link> · Relative chat size · Text Tools · LLM Overheat
|
||||
</Typography>
|
||||
</FormControl>
|
||||
|
||||
|
||||
@@ -1,23 +1,29 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { FormControl, Radio, RadioGroup } from '@mui/joy';
|
||||
import { FormControl } from '@mui/joy';
|
||||
|
||||
import { ChatAutoSpeakType, useChatAutoAI } from '../chat/store-app-chat';
|
||||
import { useChatAutoAI, useChatMicTimeoutMs } from '../chat/store-app-chat';
|
||||
|
||||
import { useElevenLabsVoices } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
|
||||
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { FormRadioControl } from '~/common/components/forms/FormRadioControl';
|
||||
import { LanguageSelect } from '~/common/components/LanguageSelect';
|
||||
import { useIsMobile } from '~/common/components/useMatchMedia';
|
||||
|
||||
|
||||
export function VoiceSettings() {
|
||||
|
||||
// external state
|
||||
const isMobile = useIsMobile();
|
||||
const { autoSpeak, setAutoSpeak } = useChatAutoAI();
|
||||
const { hasVoices } = useElevenLabsVoices();
|
||||
const [chatTimeoutMs, setChatTimeoutMs] = useChatMicTimeoutMs();
|
||||
|
||||
|
||||
const handleAutoSpeakChange = (e: React.ChangeEvent<HTMLInputElement>) => setAutoSpeak((e.target.value || 'off') as ChatAutoSpeakType);
|
||||
// this converts from string keys to numbers and vice versa
|
||||
const chatTimeoutValue: string = '' + chatTimeoutMs;
|
||||
const setChatTimeoutValue = (value: string) => value && setChatTimeoutMs(parseInt(value));
|
||||
|
||||
return <>
|
||||
|
||||
@@ -29,16 +35,29 @@ export function VoiceSettings() {
|
||||
<LanguageSelect />
|
||||
</FormControl>
|
||||
|
||||
<FormControl orientation='horizontal' sx={{ alignItems: 'center', justifyContent: 'space-between' }}>
|
||||
<FormLabelStart title='Speak Responses'
|
||||
description={autoSpeak === 'off' ? 'Off' : 'First paragraph'}
|
||||
tooltip={!hasVoices ? 'No voices available, please configure a voice synthesis service' : undefined} />
|
||||
<RadioGroup orientation='horizontal' value={autoSpeak} onChange={handleAutoSpeakChange}>
|
||||
<Radio disabled={!hasVoices} value='off' label='Off' />
|
||||
<Radio disabled={!hasVoices} value='firstLine' label='Start' />
|
||||
<Radio disabled={!hasVoices} value='all' label='Full' />
|
||||
</RadioGroup>
|
||||
</FormControl>
|
||||
{!isMobile && <FormRadioControl
|
||||
title='Mic Timeout'
|
||||
description={chatTimeoutMs < 1000 ? 'Best for quick calls' : chatTimeoutMs > 5000 ? 'Best for thinking' : 'Standard'}
|
||||
options={[
|
||||
{ value: '600', label: '.6s' },
|
||||
{ value: '2000', label: '2s' },
|
||||
{ value: '15000', label: '15s' },
|
||||
]}
|
||||
value={chatTimeoutValue} onChange={setChatTimeoutValue}
|
||||
/>}
|
||||
|
||||
<FormRadioControl
|
||||
title='Speak Responses'
|
||||
description={autoSpeak === 'off' ? 'Off' : 'First paragraph'}
|
||||
tooltip={!hasVoices ? 'No voices available, please configure a voice synthesis service' : undefined}
|
||||
disabled={!hasVoices}
|
||||
options={[
|
||||
{ value: 'off', label: 'Off' },
|
||||
{ value: 'firstLine', label: 'Start' },
|
||||
{ value: 'all', label: 'Full' },
|
||||
]}
|
||||
value={autoSpeak} onChange={setAutoSpeak}
|
||||
/>
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -46,6 +46,7 @@ export const appTheme = extendTheme({
|
||||
text: {
|
||||
icon: 'var(--joy-palette-neutral-700)', // <IconButton color='neutral' /> icon color
|
||||
secondary: 'var(--joy-palette-neutral-800)', // increase contrast a bit
|
||||
// tertiary: 'var(--joy-palette-neutral-700)', // increase contrast a bit
|
||||
},
|
||||
// popup [white] > surface [50] > level1 [100] > level2 [200] > level3 [300] > body [white -> 400]
|
||||
background: {
|
||||
|
||||
@@ -2,7 +2,7 @@ import * as React from 'react';
|
||||
import { KeyboardEvent } from 'react';
|
||||
|
||||
import { ClickAwayListener, Popper, PopperPlacementType } from '@mui/base';
|
||||
import { MenuList, styled, VariantProp } from '@mui/joy';
|
||||
import { MenuList, styled } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
|
||||
@@ -23,7 +23,8 @@ const Popup = styled(Popper)({
|
||||
*/
|
||||
export function CloseableMenu(props: {
|
||||
open: boolean, anchorEl: HTMLElement | null, onClose: () => void,
|
||||
variant?: VariantProp,
|
||||
dense?: boolean,
|
||||
// variant?: VariantProp,
|
||||
// color?: ColorPaletteProp,
|
||||
// size?: 'sm' | 'md' | 'lg',
|
||||
placement?: PopperPlacementType,
|
||||
@@ -71,13 +72,12 @@ export function CloseableMenu(props: {
|
||||
>
|
||||
<ClickAwayListener onClickAway={handleClose}>
|
||||
<MenuList
|
||||
variant={props.variant}
|
||||
// color={props.color}
|
||||
// variant={props.variant} color={props.color}
|
||||
onKeyDown={handleListKeyDown}
|
||||
sx={{
|
||||
'--Icon-fontSize': 'var(--joy-fontSize-xl2)',
|
||||
'--ListItem-minHeight': '3rem',
|
||||
'--ListItemDecorator-size': '2.75rem',
|
||||
'--ListItem-minHeight': props.dense ? '2.5rem' : '3rem',
|
||||
'--ListItemDecorator-size': '2.75rem', // icon width
|
||||
backgroundColor: 'background.popup',
|
||||
boxShadow: 'md',
|
||||
...(props.maxHeightGapPx !== undefined ? { maxHeight: `calc(100dvh - ${props.maxHeightGapPx}px)`, overflowY: 'auto' } : {}),
|
||||
|
||||
@@ -1,39 +1,41 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, Divider, Modal, ModalDialog, Typography } from '@mui/joy';
|
||||
import { Box, Button, Divider, Typography } from '@mui/joy';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import { GoodModal } from '~/common/components/GoodModal';
|
||||
|
||||
|
||||
/**
|
||||
* A confirmation dialog (Joy Modal)
|
||||
* Pass the question and the positive answer, and get called when it's time to close the dialog, or when the positive action is taken
|
||||
*/
|
||||
export function ConfirmationModal(props: {
|
||||
open: boolean, onClose: () => void, onPositive: () => void,
|
||||
open?: boolean, onClose: () => void, onPositive: () => void,
|
||||
title?: string | React.JSX.Element,
|
||||
confirmationText: string | React.JSX.Element,
|
||||
positiveActionText: string
|
||||
}) {
|
||||
return (
|
||||
<Modal open={props.open} onClose={props.onClose}>
|
||||
<ModalDialog variant='outlined' color='neutral'>
|
||||
<Typography component='h2' startDecorator={<WarningRoundedIcon />}>
|
||||
{props.title || 'Confirmation'}
|
||||
</Typography>
|
||||
{/*<ModalClose/>*/}
|
||||
<Divider sx={{ my: 2 }} />
|
||||
<Typography level='body-md'>
|
||||
{props.confirmationText}
|
||||
</Typography>
|
||||
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
|
||||
<Button variant='plain' color='neutral' onClick={props.onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button variant='solid' color='danger' onClick={props.onPositive} sx={{ lineHeight: '1.5em' }}>
|
||||
{props.positiveActionText}
|
||||
</Button>
|
||||
</Box>
|
||||
</ModalDialog>
|
||||
</Modal>
|
||||
<GoodModal
|
||||
open={props.open === undefined ? true : props.open}
|
||||
title={props.title || 'Confirmation'}
|
||||
titleStartDecorator={<WarningRoundedIcon sx={{ color: 'danger.solidBg' }} />}
|
||||
onClose={props.onClose}
|
||||
hideBottomClose
|
||||
>
|
||||
<Divider />
|
||||
<Typography level='body-md'>
|
||||
{props.confirmationText}
|
||||
</Typography>
|
||||
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
|
||||
<Button autoFocus variant='plain' color='neutral' onClick={props.onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button variant='solid' color='danger' onClick={props.onPositive} sx={{ lineHeight: '1.5em' }}>
|
||||
{props.positiveActionText}
|
||||
</Button>
|
||||
</Box>
|
||||
</GoodModal>
|
||||
);
|
||||
}
|
||||
@@ -9,14 +9,18 @@ import { SxProps } from '@mui/joy/styles/types';
|
||||
*/
|
||||
export function GoodModal(props: {
|
||||
title?: string | React.JSX.Element,
|
||||
strongerTitle?: boolean, noTitleBar?: boolean,
|
||||
titleStartDecorator?: React.JSX.Element,
|
||||
strongerTitle?: boolean,
|
||||
noTitleBar?: boolean,
|
||||
dividers?: boolean,
|
||||
open: boolean,
|
||||
onClose?: () => void,
|
||||
hideBottomClose?: boolean,
|
||||
startButton?: React.JSX.Element,
|
||||
sx?: SxProps,
|
||||
children: React.ReactNode,
|
||||
}) {
|
||||
const showBottomClose = !!props.onClose && props.hideBottomClose !== true;
|
||||
return (
|
||||
<Modal open={props.open} onClose={props.onClose}>
|
||||
<ModalOverflow>
|
||||
@@ -29,7 +33,7 @@ export function GoodModal(props: {
|
||||
}}>
|
||||
|
||||
{!props.noTitleBar && <Box sx={{ mb: -1, display: 'flex', flexDirection: 'row', alignItems: 'center', justifyContent: 'space-between' }}>
|
||||
<Typography level={props.strongerTitle !== true ? 'title-md' : 'title-lg'}>
|
||||
<Typography level={props.strongerTitle !== true ? 'title-md' : 'title-lg'} startDecorator={props.titleStartDecorator}>
|
||||
{props.title || ''}
|
||||
</Typography>
|
||||
{!!props.onClose && <ModalClose sx={{ position: 'static', mr: -1 }} />}
|
||||
@@ -41,9 +45,9 @@ export function GoodModal(props: {
|
||||
|
||||
{props.dividers === true && <Divider />}
|
||||
|
||||
{(!!props.startButton || !!props.onClose) && <Box sx={{ mt: 'auto', display: 'flex', flexWrap: 'wrap', gap: 1, justifyContent: 'space-between' }}>
|
||||
{(!!props.startButton || showBottomClose) && <Box sx={{ mt: 'auto', display: 'flex', flexWrap: 'wrap', gap: 1, justifyContent: 'space-between' }}>
|
||||
{props.startButton}
|
||||
{!!props.onClose && <Button variant='solid' color='neutral' onClick={props.onClose} sx={{ ml: 'auto', minWidth: 100 }}>
|
||||
{showBottomClose && <Button variant='solid' color='neutral' onClick={props.onClose} sx={{ ml: 'auto', minWidth: 100 }}>
|
||||
Close
|
||||
</Button>}
|
||||
</Box>}
|
||||
|
||||
@@ -1,12 +1,25 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Tooltip } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
|
||||
/**
|
||||
* Tooltip with text that wraps to multiple lines (doesn't go too long)
|
||||
*/
|
||||
export const GoodTooltip = (props: { title: string | React.JSX.Element, children: React.JSX.Element }) =>
|
||||
<Tooltip title={props.title} sx={{ maxWidth: { sm: '50vw', md: '25vw' } }}>
|
||||
export const GoodTooltip = (props: {
|
||||
title: string | React.JSX.Element | null,
|
||||
placement?: 'top' | 'bottom' | 'top-start',
|
||||
isError?: boolean, isWarning?: boolean,
|
||||
children: React.JSX.Element,
|
||||
sx?: SxProps
|
||||
}) =>
|
||||
<Tooltip
|
||||
title={props.title}
|
||||
placement={props.placement}
|
||||
variant={(props.isError || props.isWarning) ? 'soft' : undefined}
|
||||
color={props.isError ? 'danger' : props.isWarning ? 'warning' : undefined}
|
||||
sx={{ maxWidth: { sm: '50vw', md: '25vw' }, ...props.sx }}
|
||||
>
|
||||
{props.children}
|
||||
</Tooltip>;
|
||||
|
||||
@@ -23,7 +23,7 @@ export const FormLabelStart = (props: {
|
||||
<FormLabel
|
||||
onClick={props.onClick}
|
||||
sx={{
|
||||
width: settingsCol1Width,
|
||||
minWidth: settingsCol1Width,
|
||||
...(!!props.onClick && { cursor: 'pointer', textDecoration: 'underline' }),
|
||||
...props.sx,
|
||||
}}
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { FormControl, Radio, RadioGroup } from '@mui/joy';
|
||||
|
||||
|
||||
import { FormLabelStart } from './FormLabelStart';
|
||||
|
||||
|
||||
export type FormRadioOption<T extends string> = {
|
||||
value: T,
|
||||
label: string | React.JSX.Element,
|
||||
disabled?: boolean
|
||||
};
|
||||
|
||||
|
||||
export const FormRadioControl = <TValue extends string>(props: {
|
||||
title: string | React.JSX.Element,
|
||||
description?: string | React.JSX.Element,
|
||||
tooltip?: string | React.JSX.Element,
|
||||
disabled?: boolean;
|
||||
options: FormRadioOption<TValue>[];
|
||||
value: TValue;
|
||||
onChange: (value: TValue) => void;
|
||||
}) =>
|
||||
<FormControl orientation='horizontal' disabled={props.disabled} sx={{ justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
{(!!props.title || !!props.description) && <FormLabelStart title={props.title} description={props.description} tooltip={props.tooltip} />}
|
||||
<RadioGroup
|
||||
orientation='horizontal'
|
||||
value={props.value}
|
||||
onChange={(event: React.ChangeEvent<HTMLInputElement>) => event.target.value && props.onChange(event.target.value as TValue)}
|
||||
>
|
||||
{props.options.map((option) =>
|
||||
<Radio key={'opt-' + option.value} value={option.value} label={option.label} disabled={option.disabled || props.disabled} />,
|
||||
)}
|
||||
</RadioGroup>
|
||||
</FormControl>;
|
||||
@@ -13,6 +13,7 @@ export function FormSliderControl(props: {
|
||||
min?: number, max?: number, step?: number, defaultValue?: number,
|
||||
valueLabelDisplay?: 'on' | 'auto' | 'off',
|
||||
value: number, onChange: (value: number) => void,
|
||||
endAdornment?: React.ReactNode,
|
||||
}) {
|
||||
return (
|
||||
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
@@ -24,6 +25,7 @@ export function FormSliderControl(props: {
|
||||
valueLabelDisplay={props.valueLabelDisplay}
|
||||
// sx={{ py: 1, mt: 1.1 }}
|
||||
/>
|
||||
{props.endAdornment}
|
||||
</FormControl>
|
||||
);
|
||||
}
|
||||
@@ -2,8 +2,7 @@ import * as React from 'react';
|
||||
|
||||
import { FormControl, FormLabel, Radio, RadioGroup } from '@mui/joy';
|
||||
|
||||
|
||||
export type FormRadioOption<T extends string> = { label: string, value: T, disabled?: boolean };
|
||||
import { FormRadioOption } from './FormRadioControl';
|
||||
|
||||
|
||||
/**
|
||||
|
||||
@@ -3,7 +3,8 @@ import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { DLLM, useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { FormRadioOption, useFormRadio } from '~/common/components/forms/useFormRadio';
|
||||
import { FormRadioOption } from './FormRadioControl';
|
||||
import { useFormRadio } from './useFormRadio';
|
||||
|
||||
|
||||
type LlmType = 'chat' | 'fast';
|
||||
|
||||
@@ -3,7 +3,6 @@ import * as React from 'react';
|
||||
import { isBrowser, isChromeDesktop, isIPhoneUser } from '~/common/util/pwaUtils';
|
||||
|
||||
import { CapabilityBrowserSpeechRecognition } from './useCapabilities';
|
||||
import { useGlobalShortcut } from './useGlobalShortcut';
|
||||
import { useUIPreferencesStore } from '../state/store-ui';
|
||||
|
||||
|
||||
@@ -13,7 +12,8 @@ type DoneReason =
|
||||
| 'continuous-deadline' // we hit our `softStopTimeout` while listening continuously
|
||||
| 'api-unknown-timeout' // a timeout has occurred
|
||||
| 'api-error' // underlying .onerror
|
||||
| 'api-no-speech'; // underlying .onerror, user did not speak
|
||||
| 'api-no-speech' // underlying .onerror, user did not speak
|
||||
| 'react-unmount'; // the component is unmounting - the App shall never see this (set on unmount and not transmitted)
|
||||
|
||||
export interface SpeechResult {
|
||||
transcript: string; // the portion of the transcript that is finalized (or all the transcript if done)
|
||||
@@ -43,36 +43,51 @@ export const browserSpeechRecognitionCapability = (): CapabilityBrowserSpeechRec
|
||||
* We use a hook to default to 'false/null' and dynamically create the engine and update the UI.
|
||||
* @param onResultCallback - the callback to invoke when a result is received
|
||||
* @param softStopTimeout - FOR INTERIM LISTENING, on desktop: delay since the last word before sending the final result
|
||||
* @param useShortcutCtrlKey - the key to use as a shortcut to start/stop the speech recognition (e.g. 'm' for "Ctrl + M")
|
||||
*/
|
||||
export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) => void, softStopTimeout: number, useShortcutCtrlKey: string | false) => {
|
||||
export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) => void, softStopTimeout: number) => {
|
||||
|
||||
// external state (will update this function when changed)
|
||||
const preferredLanguage = useUIPreferencesStore(state => state.preferredLanguage);
|
||||
|
||||
// enablers
|
||||
const refRecognition = React.useRef<SpeechRecoControls | null>(null);
|
||||
const onResultCallbackRef = React.useRef(onResultCallback);
|
||||
const softStopTimeoutRef = React.useRef<number>(softStopTimeout);
|
||||
const speechControlsRef = React.useRef<SpeechRecoControls | null>(null);
|
||||
const preferredLanguageRef = React.useRef<string>(preferredLanguage);
|
||||
|
||||
// session
|
||||
const [isSpeechEnabled, setIsSpeechEnabled] = React.useState<boolean>(false);
|
||||
const refStarted = React.useRef<boolean>(false);
|
||||
const [isSpeechEnabled, setIsSpeechEnabled] = React.useState<boolean>(false);
|
||||
const [isRecording, setIsRecording] = React.useState<boolean>(false);
|
||||
const [isRecordingAudio, setIsRecordingAudio] = React.useState<boolean>(false);
|
||||
const [isRecordingSpeech, setIsRecordingSpeech] = React.useState<boolean>(false);
|
||||
const [isSpeechError, setIsSpeechError] = React.useState<boolean>(false);
|
||||
|
||||
// external state (will update this function when changed)
|
||||
const preferredLanguage = useUIPreferencesStore(state => state.preferredLanguage);
|
||||
|
||||
// Update the ref each time the component calling the hook re-renders with a new callback
|
||||
// Sync refs with external state (callback, soft timeout, language)
|
||||
React.useEffect(() => {
|
||||
// update callback
|
||||
onResultCallbackRef.current = onResultCallback;
|
||||
}, [onResultCallback]);
|
||||
|
||||
// update soft stop timeout
|
||||
softStopTimeoutRef.current = softStopTimeout;
|
||||
|
||||
// update language on the running instance (requires an instance method invocation)
|
||||
if (preferredLanguageRef.current !== preferredLanguage) {
|
||||
preferredLanguageRef.current = preferredLanguage;
|
||||
if (speechControlsRef.current)
|
||||
speechControlsRef.current.setLang(preferredLanguage);
|
||||
}
|
||||
}, [onResultCallback, softStopTimeout, preferredLanguage]);
|
||||
|
||||
|
||||
// create the Recognition engine
|
||||
React.useEffect(() => {
|
||||
if (!isBrowser) return;
|
||||
|
||||
// do not re-initialize, just update the language (if we're here there's a high chance the language has changed)
|
||||
if (refRecognition.current) {
|
||||
refRecognition.current.setLang(preferredLanguage);
|
||||
if (speechControlsRef.current) {
|
||||
console.warn('useSpeechRecognition: [dev-warn]: Speech recognition is already initialized.');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -86,7 +101,15 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
if (!webSpeechAPI)
|
||||
return;
|
||||
|
||||
// local memory within a session
|
||||
|
||||
// create the SpeechRecognition instance
|
||||
const instance = new webSpeechAPI();
|
||||
instance.lang = preferredLanguageRef.current;
|
||||
instance.interimResults = isChromeDesktop && softStopTimeoutRef.current > 0;
|
||||
instance.maxAlternatives = 1;
|
||||
instance.continuous = true;
|
||||
|
||||
// result within closure
|
||||
const speechResult: SpeechResult = {
|
||||
transcript: '',
|
||||
interimTranscript: '',
|
||||
@@ -94,13 +117,6 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
doneReason: undefined,
|
||||
};
|
||||
|
||||
const instance = new webSpeechAPI();
|
||||
instance.lang = preferredLanguage;
|
||||
instance.interimResults = isChromeDesktop && softStopTimeout > 0;
|
||||
instance.maxAlternatives = 1;
|
||||
instance.continuous = true;
|
||||
|
||||
// soft inactivity timer
|
||||
let inactivityTimeoutId: any | null = null;
|
||||
|
||||
const clearInactivityTimeout = () => {
|
||||
@@ -119,6 +135,7 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
}, timeoutMs);
|
||||
};
|
||||
|
||||
|
||||
instance.onaudiostart = () => setIsRecordingAudio(true);
|
||||
|
||||
instance.onaudioend = () => setIsRecordingAudio(false);
|
||||
@@ -137,7 +154,7 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
onResultCallbackRef.current(speechResult);
|
||||
// let the system handle the first stop (as long as possible)
|
||||
// if (instance.interimResults)
|
||||
// reloadInactivityTimeout(2 * softStopTimeout);
|
||||
// reloadInactivityTimeout(2 * softStopTimeoutRef.current);
|
||||
};
|
||||
|
||||
instance.onend = () => {
|
||||
@@ -197,35 +214,66 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
|
||||
// auto-stop
|
||||
if (instance.interimResults)
|
||||
reloadInactivityTimeout(softStopTimeout, 'continuous-deadline');
|
||||
reloadInactivityTimeout(softStopTimeoutRef.current, 'continuous-deadline');
|
||||
};
|
||||
|
||||
|
||||
// store the control interface
|
||||
refRecognition.current = {
|
||||
speechControlsRef.current = {
|
||||
setLang: (lang: string) => instance.lang = lang,
|
||||
start: () => instance.start(),
|
||||
stop: (reason: DoneReason) => {
|
||||
speechResult.doneReason = reason;
|
||||
instance.stop();
|
||||
},
|
||||
unmount: () => {
|
||||
// Clear any inactivity timeout to prevent it from running after unmount
|
||||
clearInactivityTimeout();
|
||||
|
||||
// Explicitly remove event listeners
|
||||
instance.onaudiostart = undefined;
|
||||
instance.onaudioend = undefined;
|
||||
instance.onspeechstart = undefined;
|
||||
instance.onspeechend = undefined;
|
||||
instance.onstart = undefined;
|
||||
instance.onend = undefined;
|
||||
instance.onerror = undefined;
|
||||
instance.onresult = undefined;
|
||||
|
||||
// Stop the recognition if it's running
|
||||
if (refStarted.current) {
|
||||
speechResult.doneReason = 'react-unmount';
|
||||
instance.stop();
|
||||
refStarted.current = false;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
refStarted.current = false;
|
||||
setIsSpeechEnabled(true);
|
||||
|
||||
}, [preferredLanguage, softStopTimeout]);
|
||||
// Cleanup function to play well when the component unmounts
|
||||
return () => {
|
||||
if (speechControlsRef.current) {
|
||||
speechControlsRef.current.unmount();
|
||||
speechControlsRef.current = null;
|
||||
}
|
||||
setIsSpeechEnabled(false);
|
||||
};
|
||||
}, []);
|
||||
|
||||
|
||||
// ACTIONS: start/stop recording
|
||||
|
||||
const startRecording = React.useCallback(() => {
|
||||
if (!refRecognition.current)
|
||||
if (!speechControlsRef.current)
|
||||
return console.error('startRecording: Speech recognition is not supported or not initialized.');
|
||||
if (refStarted.current)
|
||||
return console.error('startRecording: Start recording called while already recording.');
|
||||
|
||||
setIsSpeechError(false);
|
||||
try {
|
||||
refRecognition.current.start();
|
||||
speechControlsRef.current.start();
|
||||
} catch (error: any) {
|
||||
setIsSpeechError(true);
|
||||
console.log('Speech recognition error - clicking too quickly?', error?.message);
|
||||
@@ -233,12 +281,12 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
}, []);
|
||||
|
||||
const stopRecording = React.useCallback(() => {
|
||||
if (!refRecognition.current)
|
||||
if (!speechControlsRef.current)
|
||||
return console.error('stopRecording: Speech recognition is not supported or not initialized.');
|
||||
if (!refStarted.current)
|
||||
return console.error('stopRecording: Stop recording called while not recording.');
|
||||
|
||||
refRecognition.current.stop('manual');
|
||||
speechControlsRef.current.stop('manual');
|
||||
}, []);
|
||||
|
||||
const toggleRecording = React.useCallback(() => {
|
||||
@@ -248,7 +296,6 @@ export const useSpeechRecognition = (onResultCallback: (result: SpeechResult) =>
|
||||
startRecording();
|
||||
}, [startRecording, stopRecording]);
|
||||
|
||||
useGlobalShortcut(useShortcutCtrlKey, true, false, false, toggleRecording);
|
||||
|
||||
return {
|
||||
isRecording,
|
||||
@@ -288,17 +335,17 @@ interface ISpeechRecognition extends EventTarget {
|
||||
stop: () => void;
|
||||
// abort: () => void;
|
||||
|
||||
onaudiostart: (event: any) => void;
|
||||
// onsoundstart: (event: any) => void;
|
||||
onspeechstart: (event: any) => void;
|
||||
onspeechend: (event: any) => void;
|
||||
// onsoundend: (event: any) => void;
|
||||
onaudioend: (event: any) => void;
|
||||
onresult: (event: ISpeechRecognitionEvent) => void;
|
||||
// onnomatch: (event: any) => void;
|
||||
onerror: (event: any) => void;
|
||||
onstart: (event: any) => void;
|
||||
onend: (event: any) => void;
|
||||
onaudiostart?: (event: any) => void;
|
||||
// onsoundstart?: (event: any) => void;
|
||||
onspeechstart?: (event: any) => void;
|
||||
onspeechend?: (event: any) => void;
|
||||
// onsoundend?: (event: any) => void;
|
||||
onaudioend?: (event: any) => void;
|
||||
onresult?: (event: ISpeechRecognitionEvent) => void;
|
||||
// onnomatch?: (event: any) => void;
|
||||
onerror?: (event: any) => void;
|
||||
onstart?: (event: any) => void;
|
||||
onend?: (event: any) => void;
|
||||
}
|
||||
|
||||
interface ISpeechRecognitionEvent extends Event {
|
||||
@@ -310,4 +357,5 @@ interface SpeechRecoControls {
|
||||
setLang: (lang: string) => void;
|
||||
start: () => void;
|
||||
stop: (reason: DoneReason) => void;
|
||||
unmount: () => void;
|
||||
}
|
||||
@@ -15,6 +15,9 @@ interface UXLabsStore {
|
||||
labsCalling: boolean;
|
||||
setLabsCalling: (labsCalling: boolean) => void;
|
||||
|
||||
labsCameraDesktop: boolean;
|
||||
setLabsCameraDesktop: (labsCameraDesktop: boolean) => void;
|
||||
|
||||
labsEnhancedUI: boolean;
|
||||
setLabsEnhancedUI: (labsEnhancedUI: boolean) => void;
|
||||
|
||||
@@ -36,6 +39,9 @@ export const useUXLabsStore = create<UXLabsStore>()(
|
||||
labsCalling: false,
|
||||
setLabsCalling: (labsCalling: boolean) => set({ labsCalling }),
|
||||
|
||||
labsCameraDesktop: false,
|
||||
setLabsCameraDesktop: (labsCameraDesktop: boolean) => set({ labsCameraDesktop }),
|
||||
|
||||
labsEnhancedUI: false,
|
||||
setLabsEnhancedUI: (labsEnhancedUI: boolean) => set({ labsEnhancedUI }),
|
||||
|
||||
|
||||
@@ -24,8 +24,13 @@ export function copyToClipboard(text: string, typeLabel: string) {
|
||||
// NOTE: this could be implemented in a platform-agnostic manner with !!.read, but we call it out here for clarity
|
||||
export const supportsClipboardRead = !isFirefox;
|
||||
|
||||
export async function getClipboardItems(): Promise<ClipboardItem[]> {
|
||||
export async function getClipboardItems(): Promise<ClipboardItem[] | null> {
|
||||
if (!isBrowser || !window.navigator.clipboard?.read)
|
||||
return [];
|
||||
return await window.navigator.clipboard.read();
|
||||
try {
|
||||
return await window.navigator.clipboard.read();
|
||||
} catch (error: any) {
|
||||
console.warn('Failed to read clipboard: ', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,3 @@
|
||||
// Type guard to check if an item has a 'str' property
|
||||
function isTextItem(item: any): item is { str: string } {
|
||||
return 'str' in item && typeof item.str === 'string';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts text from a PDF file
|
||||
*
|
||||
@@ -12,17 +7,16 @@ function isTextItem(item: any): item is { str: string } {
|
||||
* is called. This is useful because the 'pdfjs-dist' library is quite large,
|
||||
* and we don't want to load it unless we need to. [Faster startup time!]
|
||||
*
|
||||
* @param file - The PDF file to extract text from
|
||||
* @param pdfBuffer The content of a PDF file
|
||||
*/
|
||||
export const pdfToText = async (file: File): Promise<string> => {
|
||||
export async function pdfToText(pdfBuffer: ArrayBuffer): Promise<string> {
|
||||
// Dynamically import the 'pdfjs-dist' library [nextjs]
|
||||
const { getDocument, GlobalWorkerOptions } = await import('pdfjs-dist');
|
||||
|
||||
// Set the worker script path
|
||||
GlobalWorkerOptions.workerSrc = '/workers/pdf.worker.min.js';
|
||||
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const pdf = await getDocument(arrayBuffer).promise;
|
||||
const pdf = await getDocument(pdfBuffer).promise;
|
||||
const textPages: string[] = []; // Initialize an array to hold text from all pages
|
||||
|
||||
for (let i = 1; i <= pdf.numPages; i++) {
|
||||
@@ -35,4 +29,9 @@ export const pdfToText = async (file: File): Promise<string> => {
|
||||
}
|
||||
|
||||
return textPages.join(''); // Join all the page texts at the end
|
||||
};
|
||||
}
|
||||
|
||||
// Type guard to check if an item has a 'str' property
|
||||
function isTextItem(item: any): item is { str: string } {
|
||||
return 'str' in item && typeof item.str === 'string';
|
||||
}
|
||||
@@ -1,3 +1,24 @@
|
||||
export function capitalizeFirstLetter(string: string) {
|
||||
return string.charAt(0).toUpperCase() + string.slice(1);
|
||||
}
|
||||
|
||||
export function createBase36Uid(checkDuplicates: string[]): string {
|
||||
let id = '';
|
||||
do {
|
||||
id = Math.random().toString(36).substring(2, 10);
|
||||
} while (checkDuplicates.includes(id));
|
||||
return id;
|
||||
}
|
||||
|
||||
export function ellipsizeFront(text: string, maxLength: number) {
|
||||
if (text.length <= maxLength)
|
||||
return text;
|
||||
return '…' + text.slice(-(maxLength - 1));
|
||||
}
|
||||
|
||||
export function ellipsizeMiddle(text: string, maxLength: number) {
|
||||
if (text.length <= maxLength)
|
||||
return text;
|
||||
const half = Math.floor(maxLength / 2);
|
||||
return text.slice(0, half) + '…' + text.slice(-(maxLength - half - 1));
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
|
||||
|
||||
import type { FormRadioOption } from '~/common/components/forms/useFormRadio';
|
||||
import type { FormRadioOption } from '~/common/components/forms/FormRadioControl';
|
||||
|
||||
|
||||
export type DiagramType = 'auto' | 'mind';
|
||||
|
||||
@@ -187,8 +187,8 @@ async function search(query: string): Promise<string> {
|
||||
|
||||
async function browse(url: string): Promise<string> {
|
||||
try {
|
||||
const data = await callBrowseFetchPage(url);
|
||||
return JSON.stringify(data ? { text: data } : { error: 'Issue reading the page' });
|
||||
const page = await callBrowseFetchPage(url);
|
||||
return JSON.stringify(page.content ? { text: page.content } : { error: 'Issue reading the page' });
|
||||
} catch (error) {
|
||||
console.error('Error browsing:', (error as Error).message);
|
||||
return 'An error occurred while browsing to the URL. Missing WSS Key?';
|
||||
|
||||
@@ -5,7 +5,7 @@ import { Alert, Box, Button, CircularProgress, Divider, FormControl, Option, Sel
|
||||
|
||||
import { DLLM, DLLMId, useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { TokenBadge } from '../../../apps/chat/components/composer/TokenBadge';
|
||||
import { TokenBadgeMemo } from '../../../apps/chat/components/composer/TokenBadge';
|
||||
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { GoodModal } from '~/common/components/GoodModal';
|
||||
@@ -145,7 +145,7 @@ export function ContentReducer(props: {
|
||||
lineHeight: 1.75,
|
||||
}} />
|
||||
|
||||
<TokenBadge directTokens={reducedTokens} tokenLimit={props.tokenLimit} absoluteBottomRight />
|
||||
<TokenBadgeMemo direct={reducedTokens} limit={props.tokenLimit} absoluteBottomRight />
|
||||
|
||||
{/* indicator we're processing */}
|
||||
{processing && (
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
import { track } from '@vercel/analytics/server';
|
||||
|
||||
import { env } from '~/server/env.mjs';
|
||||
|
||||
|
||||
// all the backend analytics flags
|
||||
type BackendAnalyticsFlag =
|
||||
| 'domain'; // logs which domain the initial (capabilities) request is sent to
|
||||
|
||||
|
||||
const checkAnalyticsFlag = (flag: BackendAnalyticsFlag): boolean =>
|
||||
env.BACKEND_ANALYTICS?.includes(flag) || false;
|
||||
|
||||
|
||||
export function analyticsListCapabilities(backendHostName: string) {
|
||||
if (checkAnalyticsFlag('domain')) {
|
||||
// Note: fire-and-forget
|
||||
void track('backend-domain', {
|
||||
hostname: backendHostName,
|
||||
vercel_url: process.env.VERCEL_URL || 'no-vercel',
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createTRPCRouter, publicProcedure } from '~/server/api/trpc.server';
|
||||
import { env } from '~/server/env.mjs';
|
||||
|
||||
import { analyticsListCapabilities } from './backend.analytics';
|
||||
|
||||
|
||||
/**
|
||||
* This is the primary router for the backend. Mainly, this deals with letting
|
||||
@@ -12,7 +14,8 @@ export const backendRouter = createTRPCRouter({
|
||||
|
||||
/* List server-side capabilities (pre-configured by the deployer) */
|
||||
listCapabilities: publicProcedure
|
||||
.query(async () => {
|
||||
.query(async ({ ctx }) => {
|
||||
analyticsListCapabilities(ctx.hostName);
|
||||
return {
|
||||
hasDB: !!env.POSTGRES_PRISMA_URL && !!env.POSTGRES_URL_NON_POOLING,
|
||||
hasBrowsing: !!env.PUPPETEER_WSS_ENDPOINT,
|
||||
|
||||
@@ -27,16 +27,16 @@ export function BrowseSettings() {
|
||||
<FormHelperText sx={{ display: 'block' }}>
|
||||
Configure a browsing service to enable loading links and pages. See the <Link
|
||||
href='https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md' target='_blank' noLinkStyle>
|
||||
browse functionality guide</Link> for more information.
|
||||
browse configuration guide</Link> for more information.
|
||||
</FormHelperText>
|
||||
|
||||
{!isServerConfig && <FormInputKey
|
||||
id='browse-wss' label='WSS Endpoint' noKey
|
||||
<FormInputKey
|
||||
id='browse-wss' label='Puppeteer Endpoint' noKey
|
||||
value={wssEndpoint} onChange={setWssEndpoint}
|
||||
rightLabel={!isServerConfig ? 'must be valid' : '✔️ already set in server'}
|
||||
required={!isServerConfig} isError={!isClientValid}
|
||||
rightLabel={!isServerConfig ? 'required' : '✔️ already set in server'}
|
||||
required={!isServerConfig} isError={!isClientValid && !isServerConfig}
|
||||
placeholder='wss://...'
|
||||
/>}
|
||||
/>
|
||||
|
||||
<FormControl disabled={!mayWork}>
|
||||
<Checkbox variant='outlined' label='Attach URLs' checked={inComposer} onChange={(event) => setEnableComposerAttach(event.target.checked)} />
|
||||
|
||||
@@ -3,40 +3,60 @@ import { useBrowseStore } from '~/modules/browse/store-module-browsing';
|
||||
import { apiAsyncNode } from '~/common/util/trpc.client';
|
||||
|
||||
|
||||
// show the screenshot in the dom
|
||||
const DEBUG_SHOW_SCREENSHOT = false;
|
||||
|
||||
|
||||
export const CmdRunBrowse: string[] = ['/browse'];
|
||||
|
||||
|
||||
export async function callBrowseFetchPage(url: string): Promise<string | null> {
|
||||
export async function callBrowseFetchPage(url: string) {
|
||||
|
||||
// thow if no URL is provided
|
||||
url = url?.trim() || '';
|
||||
if (!url)
|
||||
throw new Error('Invalid URL');
|
||||
throw new Error('Browsing error: Invalid URL');
|
||||
|
||||
// assume https if no protocol is provided
|
||||
// noinspection HttpUrlsUsage
|
||||
if (!url.startsWith('http://') && !url.startsWith('https://'))
|
||||
url = 'https://' + url;
|
||||
|
||||
try {
|
||||
const clientWssEndpoint = useBrowseStore.getState().wssEndpoint;
|
||||
|
||||
const clientWssEndpoint = useBrowseStore.getState().wssEndpoint;
|
||||
const { pages } = await apiAsyncNode.browse.fetchPages.mutate({
|
||||
access: {
|
||||
dialect: 'browse-wss',
|
||||
...(!!clientWssEndpoint && { wssEndpoint: clientWssEndpoint }),
|
||||
},
|
||||
subjects: [{ url }],
|
||||
screenshot: DEBUG_SHOW_SCREENSHOT ? {
|
||||
width: 512,
|
||||
height: 512,
|
||||
// quality: 100,
|
||||
} : undefined,
|
||||
});
|
||||
|
||||
const results = await apiAsyncNode.browse.fetchPages.mutate({
|
||||
access: {
|
||||
dialect: 'browse-wss',
|
||||
...(!!clientWssEndpoint && { wssEndpoint: clientWssEndpoint }),
|
||||
},
|
||||
subjects: [{ url }],
|
||||
});
|
||||
if (pages.length !== 1)
|
||||
throw new Error(`Browsing error: expected 1 result, got ${pages.length}`);
|
||||
|
||||
if (results.objects.length !== 1)
|
||||
return `Browsing error: expected 1 result, got ${results.objects.length}`;
|
||||
const page = pages[0];
|
||||
|
||||
const firstResult = results.objects[0];
|
||||
return !firstResult.error ? firstResult.content : `Browsing service error: ${JSON.stringify(firstResult)}`;
|
||||
|
||||
} catch (error: any) {
|
||||
return `Browsing error: ${error?.message || error?.toString() || 'Unknown fetch error'}`;
|
||||
// DEBUG: if there's a screenshot, append it to the dom
|
||||
if (DEBUG_SHOW_SCREENSHOT && page.screenshot) {
|
||||
const img = document.createElement('img');
|
||||
img.src = page.screenshot.imageDataUrl;
|
||||
img.style.width = `${page.screenshot.width}px`;
|
||||
img.style.height = `${page.screenshot.height}px`;
|
||||
document.body.appendChild(img);
|
||||
}
|
||||
|
||||
// throw if there's an error
|
||||
if (page.error) {
|
||||
console.warn('Browsing service error:', page.error);
|
||||
if (!page.content)
|
||||
throw new Error(page.error);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { z } from 'zod';
|
||||
import { TRPCError } from '@trpc/server';
|
||||
import { connect, Page, TimeoutError } from '@cloudflare/puppeteer';
|
||||
import { BrowserContext, connect, ScreenshotOptions, TimeoutError } from '@cloudflare/puppeteer';
|
||||
|
||||
import { createTRPCRouter, publicProcedure } from '~/server/api/trpc.server';
|
||||
import { env } from '~/server/env.mjs';
|
||||
@@ -22,6 +22,11 @@ const fetchPageInputSchema = z.object({
|
||||
subjects: z.array(z.object({
|
||||
url: z.string().url(),
|
||||
})),
|
||||
screenshot: z.object({
|
||||
width: z.number(),
|
||||
height: z.number(),
|
||||
quality: z.number().optional(),
|
||||
}).optional(),
|
||||
});
|
||||
|
||||
|
||||
@@ -33,14 +38,15 @@ const fetchPageWorkerOutputSchema = z.object({
|
||||
error: z.string().optional(),
|
||||
stopReason: z.enum(['end', 'timeout', 'error']),
|
||||
screenshot: z.object({
|
||||
base64: z.string(),
|
||||
imageDataUrl: z.string().startsWith('data:image/'),
|
||||
mimeType: z.string().startsWith('image/'),
|
||||
width: z.number(),
|
||||
height: z.number(),
|
||||
}).optional(),
|
||||
});
|
||||
|
||||
const fetchPagesOutputSchema = z.object({
|
||||
objects: z.array(fetchPageWorkerOutputSchema),
|
||||
pages: z.array(fetchPageWorkerOutputSchema),
|
||||
});
|
||||
|
||||
|
||||
@@ -49,14 +55,14 @@ export const browseRouter = createTRPCRouter({
|
||||
fetchPages: publicProcedure
|
||||
.input(fetchPageInputSchema)
|
||||
.output(fetchPagesOutputSchema)
|
||||
.mutation(async ({ input: { access, subjects } }) => {
|
||||
const results: FetchPageWorkerOutputSchema[] = [];
|
||||
.mutation(async ({ input: { access, subjects, screenshot } }) => {
|
||||
const pages: FetchPageWorkerOutputSchema[] = [];
|
||||
|
||||
for (const subject of subjects) {
|
||||
try {
|
||||
results.push(await workerPuppeteer(access, subject.url));
|
||||
pages.push(await workerPuppeteer(access, subject.url, screenshot?.width, screenshot?.height, screenshot?.quality));
|
||||
} catch (error: any) {
|
||||
results.push({
|
||||
pages.push({
|
||||
url: subject.url,
|
||||
content: '',
|
||||
error: error?.message || JSON.stringify(error) || 'Unknown fetch error',
|
||||
@@ -65,7 +71,7 @@ export const browseRouter = createTRPCRouter({
|
||||
}
|
||||
}
|
||||
|
||||
return { objects: results };
|
||||
return { pages };
|
||||
}),
|
||||
|
||||
});
|
||||
@@ -74,11 +80,12 @@ export const browseRouter = createTRPCRouter({
|
||||
type BrowseAccessSchema = z.infer<typeof browseAccessSchema>;
|
||||
type FetchPageWorkerOutputSchema = z.infer<typeof fetchPageWorkerOutputSchema>;
|
||||
|
||||
async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string): Promise<FetchPageWorkerOutputSchema> {
|
||||
async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string, ssWidth: number | undefined, ssHeight: number | undefined, ssQuality: number | undefined): Promise<FetchPageWorkerOutputSchema> {
|
||||
|
||||
// access
|
||||
const browserWSEndpoint = (access.wssEndpoint || env.PUPPETEER_WSS_ENDPOINT || '').trim();
|
||||
if (!browserWSEndpoint || !(browserWSEndpoint.startsWith('wss://') || browserWSEndpoint.startsWith('ws://')))
|
||||
const isLocalBrowser = browserWSEndpoint.startsWith('ws://');
|
||||
if (!browserWSEndpoint || (!browserWSEndpoint.startsWith('wss://') && !isLocalBrowser))
|
||||
throw new TRPCError({
|
||||
code: 'BAD_REQUEST',
|
||||
message: 'Invalid wss:// endpoint',
|
||||
@@ -86,7 +93,7 @@ async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string): P
|
||||
|
||||
const result: FetchPageWorkerOutputSchema = {
|
||||
url: targetUrl,
|
||||
content: '(no content)',
|
||||
content: '',
|
||||
error: undefined,
|
||||
stopReason: 'error',
|
||||
screenshot: undefined,
|
||||
@@ -96,26 +103,27 @@ async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string): P
|
||||
const browser = await connect({ browserWSEndpoint });
|
||||
|
||||
// for local testing, open an incognito context, to seaparate cookies
|
||||
let page: Page;
|
||||
if (browserWSEndpoint.startsWith('ws://')) {
|
||||
const context = await browser.createIncognitoBrowserContext();
|
||||
page = await context.newPage();
|
||||
} else {
|
||||
page = await browser.newPage();
|
||||
}
|
||||
let incognitoContext: BrowserContext | null = null;
|
||||
if (isLocalBrowser)
|
||||
incognitoContext = await browser.createIncognitoBrowserContext();
|
||||
const page = incognitoContext ? await incognitoContext.newPage() : await browser.newPage();
|
||||
page.setDefaultNavigationTimeout(WORKER_TIMEOUT);
|
||||
|
||||
// open url
|
||||
try {
|
||||
page.setDefaultNavigationTimeout(WORKER_TIMEOUT);
|
||||
await page.goto(targetUrl);
|
||||
result.stopReason = 'end';
|
||||
const response = await page.goto(targetUrl);
|
||||
const contentType = response?.headers()?.['content-type'];
|
||||
const isWebPage = contentType?.startsWith('text/html') || contentType?.startsWith('text/plain') || false;
|
||||
if (!isWebPage) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error(`Invalid content-type: ${contentType}`);
|
||||
} else
|
||||
result.stopReason = 'end';
|
||||
} catch (error: any) {
|
||||
const isExpected: boolean = error instanceof TimeoutError;
|
||||
result.stopReason = isExpected ? 'timeout' : 'error';
|
||||
if (!isExpected) {
|
||||
result.error = '[Puppeteer] Loading issue: ' + error?.message || error?.toString() || 'Unknown error';
|
||||
console.error('workerPuppeteer: page.goto', error);
|
||||
}
|
||||
const isTimeout: boolean = error instanceof TimeoutError;
|
||||
result.stopReason = isTimeout ? 'timeout' : 'error';
|
||||
if (!isTimeout)
|
||||
result.error = '[Puppeteer] ' + error?.message || error?.toString() || 'Unknown goto error';
|
||||
}
|
||||
|
||||
// transform the content of the page as text
|
||||
@@ -129,26 +137,30 @@ async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string): P
|
||||
});
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('workerPuppeteer: page.evaluate', error);
|
||||
result.error = '[Puppeteer] ' + error?.message || error?.toString() || 'Unknown evaluate error';
|
||||
}
|
||||
|
||||
// get a screenshot of the page
|
||||
try {
|
||||
const width = 100;
|
||||
const height = 100;
|
||||
const scale = 0.1; // 10%
|
||||
if (ssWidth && ssHeight) {
|
||||
const width = ssWidth;
|
||||
const height = ssHeight;
|
||||
const scale = Math.round(100 * ssWidth / 1024) / 100;
|
||||
|
||||
await page.setViewport({ width: width / scale, height: height / scale, deviceScaleFactor: scale });
|
||||
await page.setViewport({ width: width / scale, height: height / scale, deviceScaleFactor: scale });
|
||||
|
||||
result.screenshot = {
|
||||
base64: await page.screenshot({
|
||||
type: 'webp',
|
||||
clip: { x: 0, y: 0, width: width / scale, height: height / scale },
|
||||
const imageType: ScreenshotOptions['type'] = 'webp';
|
||||
const mimeType = `image/${imageType}`;
|
||||
|
||||
const dataString = await page.screenshot({
|
||||
type: imageType,
|
||||
encoding: 'base64',
|
||||
}) as string,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
clip: { x: 0, y: 0, width: width / scale, height: height / scale },
|
||||
...(ssQuality && { quality: ssQuality }),
|
||||
}) as string;
|
||||
|
||||
result.screenshot = { imageDataUrl: `data:${mimeType};base64,${dataString}`, mimeType, width, height };
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('workerPuppeteer: page.screenshot', error);
|
||||
}
|
||||
@@ -160,11 +172,22 @@ async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string): P
|
||||
console.error('workerPuppeteer: page.close', error);
|
||||
}
|
||||
|
||||
// close the incognito context
|
||||
if (incognitoContext) {
|
||||
try {
|
||||
await incognitoContext.close();
|
||||
} catch (error: any) {
|
||||
console.error('workerPuppeteer: incognitoContext.close', error);
|
||||
}
|
||||
}
|
||||
|
||||
// close the browse (important!)
|
||||
try {
|
||||
await browser.close();
|
||||
} catch (error: any) {
|
||||
console.error('workerPuppeteer: browser.close', error);
|
||||
if (!isLocalBrowser) {
|
||||
try {
|
||||
await browser.close();
|
||||
} catch (error: any) {
|
||||
console.error('workerPuppeteer: browser.close', error);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
@@ -252,7 +252,7 @@ export function useChatLLM() {
|
||||
return useModelsStore(state => {
|
||||
const { chatLLMId } = state;
|
||||
const chatLLM = chatLLMId ? state.llms.find(llm => llm.id === chatLLMId) ?? null : null;
|
||||
return { chatLLMId, chatLLM };
|
||||
return { chatLLM };
|
||||
}, shallow);
|
||||
}
|
||||
|
||||
|
||||
@@ -3,44 +3,57 @@
|
||||
* descriptions for the models.
|
||||
* (nor does it reliably provide context window sizes) - TODO: open a bug upstream
|
||||
*
|
||||
* from: https://ollama.ai/library?sort=popular
|
||||
* from: https://ollama.ai/library?sort=featured
|
||||
*/
|
||||
export const OLLAMA_BASE_MODELS: { [key: string]: string } = {
|
||||
'mistral': 'The Mistral 7B model released by Mistral AI',
|
||||
'llama2': 'The most popular model for general use.',
|
||||
'codellama': 'A large language model that can use text prompts to generate and discuss code.',
|
||||
'vicuna': 'General use chat model based on Llama and Llama 2 with 2K to 16K context sizes.',
|
||||
'llama2-uncensored': 'Uncensored Llama 2 model by George Sung and Jarrad Hope.',
|
||||
'orca-mini': 'A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.',
|
||||
'wizard-vicuna-uncensored': 'Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford.',
|
||||
'nous-hermes': 'General use models based on Llama and Llama 2 from Nous Research.',
|
||||
'phind-codellama': 'Code generation model based on CodeLlama.',
|
||||
'mistral-openorca': 'Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.',
|
||||
'wizardcoder': 'Llama based code generation model focused on Python.',
|
||||
'wizard-math': 'Model focused on math and logic problems',
|
||||
'llama2-chinese': 'Llama 2 based model fine tuned to improve Chinese dialogue ability.',
|
||||
'stable-beluga': 'Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy.',
|
||||
'zephyr': 'Zephyr beta is a fine-tuned 7B version of mistral that was trained on on a mix of publicly available, synthetic datasets.',
|
||||
'codeup': 'Great code generation model based on Llama2.',
|
||||
'falcon': 'A large language model built by the Technology Innovation Institute (TII) for use in summarization, text generation, and chat bots.',
|
||||
'everythinglm': 'Uncensored Llama2 based model with 16k context size.',
|
||||
'wizardlm-uncensored': 'Uncensored version of Wizard LM model',
|
||||
'medllama2': 'Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset.',
|
||||
'wizard-vicuna': 'Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj.',
|
||||
'open-orca-platypus2': 'Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. Designed for chat and code generation.',
|
||||
'starcoder': 'StarCoder is a code generation model trained on 80+ programming languages.',
|
||||
'samantha-mistral': 'A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral.',
|
||||
'openhermes2-mistral': 'OpenHermes 2 Mistral is a 7B model fine-tuned on Mistral with 900,000 entries of primarily GPT-4 generated data from open datasets.',
|
||||
'wizardlm': 'General use 70 billion parameter model based on Llama 2.',
|
||||
'sqlcoder': 'SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks',
|
||||
'dolphin2.2-mistral': 'An instruct-tuned model based on Mistral. Version 2.2 is fine-tuned for improved conversation and empathy.',
|
||||
'dolphin2.1-mistral': 'An instruct-tuned model based on Mistral and trained on a dataset filtered to remove alignment and bias.',
|
||||
'yarn-mistral': 'An extension of Mistral to support a context of up to 128k tokens.',
|
||||
'codebooga': 'A high-performing code instruct model created by merging two existing code models.',
|
||||
'openhermes2.5-mistral': 'OpenHermes 2.5 Mistral 7B is a Mistral 7B fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.',
|
||||
'mistrallite': 'MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts.',
|
||||
'nexusraven': 'Nexus Raven is a 13B instruction tuned model for function calling tasks.',
|
||||
'yarn-llama2': 'An extension of Llama 2 that supports a context of up to 128k tokens.',
|
||||
'xwinlm': 'Conversational model based on Llama 2 that performs competitively on various benchmarks.',
|
||||
'yi': 'A high-performing, bilingual base model.',
|
||||
};
|
||||
export const OLLAMA_BASE_MODELS: { [key: string]: { description: string, pulls: number, added?: string } } = {
|
||||
'starling-lm': { description: 'Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness.', pulls: 2353, added: '20231129' },
|
||||
'neural-chat': { description: 'A fine-tuned model based on Mistral with good coverage of domain and language.', pulls: 3089, added: '20231129' },
|
||||
'mistral': { description: 'The Mistral 7B model released by Mistral AI', pulls: 70300 },
|
||||
'yi': { description: 'A high-performing, bilingual base model.', pulls: 2673 },
|
||||
'llama2': { description: 'The most popular model for general use.', pulls: 141000 },
|
||||
'codellama': { description: 'A large language model that can use text prompts to generate and discuss code.', pulls: 71400 },
|
||||
'llama2-uncensored': { description: 'Uncensored Llama 2 model by George Sung and Jarrad Hope.', pulls: 30900 },
|
||||
'orca-mini': { description: 'A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.', pulls: 26000 },
|
||||
'vicuna': { description: 'General use chat model based on Llama and Llama 2 with 2K to 16K context sizes.', pulls: 21800 },
|
||||
'wizard-vicuna-uncensored': { description: 'Wizard Vicuna Uncensored is a 7B, 13B, and 30B parameter model based on Llama 2 uncensored by Eric Hartford.', pulls: 13700 },
|
||||
'phind-codellama': { description: 'Code generation model based on CodeLlama.', pulls: 10600 },
|
||||
'zephyr': { description: 'Zephyr beta is a fine-tuned 7B version of mistral that was trained on on a mix of publicly available, synthetic datasets.', pulls: 10200 },
|
||||
'wizardcoder': { description: 'Llama based code generation model focused on Python.', pulls: 9895 },
|
||||
'mistral-openorca': { description: 'Mistral OpenOrca is a 7 billion parameter model, fine-tuned on top of the Mistral 7B model using the OpenOrca dataset.', pulls: 9256 },
|
||||
'nous-hermes': { description: 'General use models based on Llama and Llama 2 from Nous Research.', pulls: 8827 },
|
||||
'wizard-math': { description: 'Model focused on math and logic problems', pulls: 7849 },
|
||||
'llama2-chinese': { description: 'Llama 2 based model fine tuned to improve Chinese dialogue ability.', pulls: 7375 },
|
||||
'deepseek-coder': { description: 'DeepSeek Coder is trained from scratch on both 87% code and 13% natural language in English and Chinese. Each of the models are pre-trained on 2 trillion tokens.', pulls: 7335, added: '20231129' },
|
||||
'falcon': { description: 'A large language model built by the Technology Innovation Institute (TII) for use in summarization, text generation, and chat bots.', pulls: 6726 },
|
||||
'stable-beluga': { description: 'Llama 2 based model fine tuned on an Orca-style dataset. Originally called Free Willy.', pulls: 6272 },
|
||||
'codeup': { description: 'Great code generation model based on Llama2.', pulls: 5978 },
|
||||
'orca2': { description: 'Orca 2 is built by Microsoft research, and are a fine-tuned version of Meta\'s Llama 2 models. The model is designed to excel particularly in reasoning.', pulls: 5854, added: '20231129' },
|
||||
'everythinglm': { description: 'Uncensored Llama2 based model with 16k context size.', pulls: 5040 },
|
||||
'medllama2': { description: 'Fine-tuned Llama 2 model to answer medical questions based on an open source medical dataset.', pulls: 4648 },
|
||||
'wizardlm-uncensored': { description: 'Uncensored version of Wizard LM model.', pulls: 4536 },
|
||||
'dolphin2.2-mistral': { description: 'An instruct-tuned model based on Mistral. Version 2.2 is fine-tuned for improved conversation and empathy.', pulls: 3638 },
|
||||
'starcoder': { description: 'StarCoder is a code generation model trained on 80+ programming languages.', pulls: 3638 },
|
||||
'wizard-vicuna': { description: 'Wizard Vicuna is a 13B parameter model based on Llama 2 trained by MelodysDreamj.', pulls: 3485 },
|
||||
'openchat': { description: 'A family of open-source models trained on a wide variety of data, surpassing ChatGPT on various benchmarks.', pulls: 3438, added: '20231129' },
|
||||
'open-orca-platypus2': { description: 'Merge of the Open Orca OpenChat model and the Garage-bAInd Platypus 2 model. Designed for chat and code generation.', pulls: 3145 },
|
||||
'openhermes2.5-mistral': { description: 'OpenHermes 2.5 Mistral 7B is a Mistral 7B fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.', pulls: 3023 },
|
||||
'yarn-mistral': { description: 'An extension of Mistral to support a context of up to 128k tokens.', pulls: 2775 },
|
||||
'samantha-mistral': { description: 'A companion assistant trained in philosophy, psychology, and personal relationships. Based on Mistral.', pulls: 2192 },
|
||||
'sqlcoder': { description: 'SQLCoder is a code completion model fined-tuned on StarCoder for SQL generation tasks', pulls: 1973 },
|
||||
'yarn-llama2': { description: 'An extension of Llama 2 that supports a context of up to 128k tokens.', pulls: 1915 },
|
||||
'openhermes2-mistral': { description: 'OpenHermes 2 Mistral is a 7B model fine-tuned on Mistral with 900,000 entries of primarily GPT-4 generated data from open datasets.', pulls: 1690 },
|
||||
'meditron': { description: 'Open-source medical large language model adapted from Llama 2 to the medical domain.', pulls: 1667, added: '20231129' },
|
||||
'wizardlm': { description: 'General use 70 billion parameter model based on Llama 2.', pulls: 1379 },
|
||||
'mistrallite': { description: 'MistralLite is a fine-tuned model based on Mistral with enhanced capabilities of processing long contexts.', pulls: 1345 },
|
||||
'deepseek-llm': { description: 'An advanced language model crafted with 2 trillion bilingual tokens.', pulls: 1318, added: '20231129' },
|
||||
'dolphin2.1-mistral': { description: 'An instruct-tuned model based on Mistral and trained on a dataset filtered to remove alignment and bias.', pulls: 1302 },
|
||||
'codebooga': { description: 'A high-performing code instruct model created by merging two existing code models.', pulls: 1254 },
|
||||
'goliath': { description: 'A language model created by combining two fine-tuned Llama 2 70B models into one.', pulls: 946, added: '20231129' },
|
||||
'stablelm-zephyr': { description: 'A lightweight chat model allowing accurate, and responsive output without requiring high-end hardware.', pulls: 945, added: '20231210' },
|
||||
'nexusraven': { description: 'Nexus Raven is a 13B instruction tuned model for function calling tasks.', pulls: 860 },
|
||||
'magicoder': { description: '🎩 Magicoder is a family of 7B parameter models trained on 75K synthetic instruction data using OSS-Instruct, a novel approach to enlightening LLMs with open-source code snippets.', pulls: 816, added: '20231210' },
|
||||
'alfred': { description: 'A robust conversational model designed to be used for both chat and instruct use cases.', pulls: 804, added: '20231129' },
|
||||
'xwinlm': { description: 'Conversational model based on Llama 2 that performs competitively on various benchmarks.', pulls: 706 },
|
||||
};
|
||||
// export const OLLAMA_LAST_UPDATE: string = '20231210';
|
||||
export const OLLAMA_PREV_UPDATE: string = '20231129';
|
||||
@@ -11,12 +11,15 @@ import { capitalizeFirstLetter } from '~/common/util/textUtils';
|
||||
import { fixupHost, openAIChatGenerateOutputSchema, OpenAIHistorySchema, openAIHistorySchema, OpenAIModelSchema, openAIModelSchema } from '../openai/openai.router';
|
||||
import { listModelsOutputSchema, ModelDescriptionSchema } from '../server.schemas';
|
||||
|
||||
import { OLLAMA_BASE_MODELS } from './ollama.models';
|
||||
import { wireOllamaGenerationSchema } from './ollama.wiretypes';
|
||||
import { OLLAMA_BASE_MODELS, OLLAMA_PREV_UPDATE } from './ollama.models';
|
||||
import { WireOllamaChatCompletionInput, wireOllamaChunkedOutputSchema } from './ollama.wiretypes';
|
||||
|
||||
|
||||
// Default hosts
|
||||
const DEFAULT_OLLAMA_HOST = 'http://127.0.0.1:11434';
|
||||
export const OLLAMA_PATH_CHAT = '/api/chat';
|
||||
const OLLAMA_PATH_TAGS = '/api/tags';
|
||||
const OLLAMA_PATH_SHOW = '/api/show';
|
||||
|
||||
|
||||
// Mappers
|
||||
@@ -34,7 +37,23 @@ export function ollamaAccess(access: OllamaAccessSchema, apiPath: string): { hea
|
||||
|
||||
}
|
||||
|
||||
export function ollamaChatCompletionPayload(model: OpenAIModelSchema, history: OpenAIHistorySchema, stream: boolean) {
|
||||
|
||||
export const ollamaChatCompletionPayload = (model: OpenAIModelSchema, history: OpenAIHistorySchema, stream: boolean): WireOllamaChatCompletionInput => ({
|
||||
model: model.id,
|
||||
messages: history,
|
||||
options: {
|
||||
...(model.temperature && { temperature: model.temperature }),
|
||||
},
|
||||
// n: ...
|
||||
// functions: ...
|
||||
// function_call: ...
|
||||
stream,
|
||||
});
|
||||
|
||||
|
||||
/* Unused: switched to the Chat endpoint (above). The implementation is left here for reference.
|
||||
https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
|
||||
export function ollamaCompletionPayload(model: OpenAIModelSchema, history: OpenAIHistorySchema, stream: boolean) {
|
||||
|
||||
// if the first message is the system prompt, extract it
|
||||
let systemPrompt: string | undefined = undefined;
|
||||
@@ -62,7 +81,7 @@ export function ollamaChatCompletionPayload(model: OpenAIModelSchema, history: O
|
||||
...(systemPrompt && { system: systemPrompt }),
|
||||
stream,
|
||||
};
|
||||
}
|
||||
}*/
|
||||
|
||||
async function ollamaGET<TOut extends object>(access: OllamaAccessSchema, apiPath: string /*, signal?: AbortSignal*/): Promise<TOut> {
|
||||
const { headers, url } = ollamaAccess(access, apiPath);
|
||||
@@ -104,6 +123,8 @@ const listPullableOutputSchema = z.object({
|
||||
label: z.string(),
|
||||
tag: z.string(),
|
||||
description: z.string(),
|
||||
pulls: z.number(),
|
||||
isNew: z.boolean(),
|
||||
})),
|
||||
});
|
||||
|
||||
@@ -116,11 +137,13 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
.output(listPullableOutputSchema)
|
||||
.query(async ({}) => {
|
||||
return {
|
||||
pullable: Object.entries(OLLAMA_BASE_MODELS).map(([model, description]) => ({
|
||||
id: model,
|
||||
label: capitalizeFirstLetter(model),
|
||||
pullable: Object.entries(OLLAMA_BASE_MODELS).map(([model_id, model]) => ({
|
||||
id: model_id,
|
||||
label: capitalizeFirstLetter(model_id),
|
||||
tag: 'latest',
|
||||
description,
|
||||
description: model.description,
|
||||
pulls: model.pulls,
|
||||
isNew: !!model.added && model.added >= OLLAMA_PREV_UPDATE,
|
||||
})),
|
||||
};
|
||||
}),
|
||||
@@ -158,6 +181,7 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
throw new Error('Ollama delete issue: ' + deleteOutput);
|
||||
}),
|
||||
|
||||
|
||||
/* Ollama: List the Models available */
|
||||
listModels: publicProcedure
|
||||
.input(accessOnlySchema)
|
||||
@@ -165,7 +189,7 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
.query(async ({ input }) => {
|
||||
|
||||
// get the models
|
||||
const wireModels = await ollamaGET(input.access, '/api/tags');
|
||||
const wireModels = await ollamaGET(input.access, OLLAMA_PATH_TAGS);
|
||||
const wireOllamaListModelsSchema = z.object({
|
||||
models: z.array(z.object({
|
||||
name: z.string(),
|
||||
@@ -178,7 +202,7 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
|
||||
// retrieve info for each of the models (/api/show, post call, in parallel)
|
||||
const detailedModels = await Promise.all(models.map(async model => {
|
||||
const wireModelInfo = await ollamaPOST(input.access, { 'name': model.name }, '/api/show');
|
||||
const wireModelInfo = await ollamaPOST(input.access, { 'name': model.name }, OLLAMA_PATH_SHOW);
|
||||
const wireOllamaModelInfoSchema = z.object({
|
||||
license: z.string().optional(),
|
||||
modelfile: z.string(),
|
||||
@@ -196,7 +220,7 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
|
||||
// pretty label and description
|
||||
const label = capitalizeFirstLetter(modelName) + ((modelTag && modelTag !== 'latest') ? ` · ${modelTag}` : '');
|
||||
const description = OLLAMA_BASE_MODELS[modelName] ?? 'Model unknown';
|
||||
const description = OLLAMA_BASE_MODELS[modelName]?.description ?? 'Model unknown';
|
||||
|
||||
// console.log('>>> ollama model', model.name, model.template, model.modelfile, '\n');
|
||||
|
||||
@@ -219,12 +243,15 @@ export const llmOllamaRouter = createTRPCRouter({
|
||||
.output(openAIChatGenerateOutputSchema)
|
||||
.mutation(async ({ input: { access, history, model } }) => {
|
||||
|
||||
const wireGeneration = await ollamaPOST(access, ollamaChatCompletionPayload(model, history, false), '/api/generate');
|
||||
const generation = wireOllamaGenerationSchema.parse(wireGeneration);
|
||||
const wireGeneration = await ollamaPOST(access, ollamaChatCompletionPayload(model, history, false), OLLAMA_PATH_CHAT);
|
||||
const generation = wireOllamaChunkedOutputSchema.parse(wireGeneration);
|
||||
|
||||
if (!generation.message?.content)
|
||||
throw new Error('Ollama chat generation (non-stream) issue: ' + JSON.stringify(wireGeneration));
|
||||
|
||||
return {
|
||||
role: 'assistant',
|
||||
content: generation.response,
|
||||
content: generation.message.content,
|
||||
finish_reason: generation.done ? 'stop' : null,
|
||||
};
|
||||
}),
|
||||
|
||||
@@ -1,16 +1,69 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const wireOllamaGenerationSchema = z.object({
|
||||
|
||||
/**
|
||||
* Chat Completion API - Request
|
||||
* https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-chat-completion
|
||||
*/
|
||||
const wireOllamaChatCompletionInputSchema = z.object({
|
||||
|
||||
// required
|
||||
model: z.string(),
|
||||
messages: z.array(z.object({
|
||||
role: z.enum(['assistant', 'system', 'user']),
|
||||
content: z.string(),
|
||||
})),
|
||||
|
||||
// optional
|
||||
format: z.enum(['json']).optional(),
|
||||
options: z.object({
|
||||
// https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md
|
||||
// Maximum number of tokens to predict when generating text.
|
||||
num_predict: z.number().int().optional(),
|
||||
// Sets the random number seed to use for generation
|
||||
seed: z.number().int().optional(),
|
||||
// The temperature of the model
|
||||
temperature: z.number().positive().optional(),
|
||||
// Reduces the probability of generating nonsense (Default: 40)
|
||||
top_k: z.number().positive().optional(),
|
||||
// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text. (Default 0.9)
|
||||
top_p: z.number().positive().optional(),
|
||||
}).optional(),
|
||||
template: z.string().optional(), // overrides what is defined in the Modelfile
|
||||
stream: z.boolean().optional(), // default: true
|
||||
|
||||
// Future Improvements?
|
||||
// n: z.number().int().optional(), // number of completions to generate
|
||||
// functions: ...
|
||||
// function_call: ...
|
||||
});
|
||||
export type WireOllamaChatCompletionInput = z.infer<typeof wireOllamaChatCompletionInputSchema>;
|
||||
|
||||
|
||||
/**
|
||||
* Chat Completion or Generation APIs - Streaming Response
|
||||
*/
|
||||
export const wireOllamaChunkedOutputSchema = z.object({
|
||||
model: z.string(),
|
||||
// created_at: z.string(), // commented because unused
|
||||
response: z.string(),
|
||||
|
||||
// [Chat Completion] (exclusive with 'response')
|
||||
message: z.object({
|
||||
role: z.enum(['assistant' /*, 'system', 'user' Disabled on purpose, to validate the response */]),
|
||||
content: z.string(),
|
||||
}).optional(), // optional on the last message
|
||||
|
||||
// [Generation] (non-chat, exclusive with 'message')
|
||||
//response: z.string().optional(),
|
||||
|
||||
done: z.boolean(),
|
||||
|
||||
// only on the last message
|
||||
// context: z.array(z.number()),
|
||||
// context: z.array(z.number()), // non-chat endpoint
|
||||
// total_duration: z.number(),
|
||||
// load_duration: z.number(),
|
||||
// eval_duration: z.number(),
|
||||
// prompt_eval_count: z.number(),
|
||||
// prompt_eval_duration: z.number(),
|
||||
// eval_count: z.number(),
|
||||
});
|
||||
// eval_duration: z.number(),
|
||||
|
||||
});
|
||||
@@ -207,7 +207,8 @@ export function localAIModelToModelDescription(modelId: string): ModelDescriptio
|
||||
const _knownOobaboogaChatModels: ManualMappings = [];
|
||||
|
||||
const _knownOobaboogaNonChatModels: string[] = [
|
||||
'None', 'text-curie-001', 'text-davinci-002', 'all-mpnet-base-v2', 'gpt-3.5-turbo', 'text-embedding-ada-002',
|
||||
'None', 'text-curie-001', 'text-davinci-002', 'all-mpnet-base-v2', 'text-embedding-ada-002',
|
||||
/* 'gpt-3.5-turbo' // used to be here, but now it's the way to select the activly loaded ooababooga model */
|
||||
];
|
||||
|
||||
export function oobaboogaModelToModelDescription(modelId: string, created: number): ModelDescriptionSchema {
|
||||
@@ -215,6 +216,10 @@ export function oobaboogaModelToModelDescription(modelId: string, created: numbe
|
||||
if (label.endsWith('.bin'))
|
||||
label = label.slice(0, -4);
|
||||
|
||||
// special case for the default (and only 'chat') model
|
||||
if (modelId === 'gpt-3.5-turbo')
|
||||
label = 'Oobabooga Model';
|
||||
|
||||
return fromManualMapping(_knownOobaboogaChatModels, modelId, created, undefined, {
|
||||
idPrefix: modelId,
|
||||
label: label,
|
||||
@@ -240,50 +245,63 @@ export function oobaboogaModelToModelDescription(modelId: string, created: numbe
|
||||
* - cc: cost per 1k completion tokens
|
||||
* - old: if true, this is an older model that has been superseded by a newer one
|
||||
*/
|
||||
const orModelMap: { [id: string]: { name: string; cw: number; cp: number; cc: number; old?: boolean; unfilt?: boolean; } } = {
|
||||
const orModelMap: { [id: string]: { name: string; cw: number; cp?: number; cc?: number; old?: boolean; unfilt?: boolean; } } = {
|
||||
// 'openrouter/auto': { name: 'Auto (best for prompt)', cw: 128000, cp: undefined, cc: undefined, unfilt: undefined },
|
||||
'mistralai/mistral-7b-instruct': { name: 'Mistral 7B Instruct (beta)', cw: 8192, cp: 0, cc: 0, unfilt: true },
|
||||
'huggingfaceh4/zephyr-7b-beta': { name: 'Hugging Face: Zephyr 7B (beta)', cw: 4096, cp: 0, cc: 0, unfilt: true },
|
||||
'mistralai/mistral-7b-instruct': { name: 'Mistral 7B Instruct v0.1 (beta)', cw: 8192, cp: 0, cc: 0, unfilt: true },
|
||||
'openai/gpt-3.5-turbo': { name: 'OpenAI: GPT-3.5 Turbo', cw: 4095, cp: 0.0015, cc: 0.002 },
|
||||
'openai/gpt-3.5-turbo-1106': { name: 'OpenAI: GPT-3.5 Turbo 16k (preview)', cw: 16385, cp: 0.001, cc: 0.002 },
|
||||
'openai/gpt-3.5-turbo-16k': { name: 'OpenAI: GPT-3.5 Turbo 16k', cw: 16383, cp: 0.003, cc: 0.004 },
|
||||
'openai/gpt-4-1106-preview': { name: 'OpenAI: GPT-4 Turbo (preview)', cw: 128000, cp: 0.01, cc: 0.03 },
|
||||
'openai/gpt-4': { name: 'OpenAI: GPT-4', cw: 8191, cp: 0.03, cc: 0.06 },
|
||||
'openai/gpt-4-32k': { name: 'OpenAI: GPT-4 32k', cw: 32767, cp: 0.06, cc: 0.12 },
|
||||
'openai/gpt-3.5-turbo-instruct': { name: 'OpenAI: GPT-3.5 Turbo Instruct', cw: 4095, cp: 0.0015, cc: 0.002 },
|
||||
'openchat/openchat-7b': { name: 'OpenChat 7B (beta)', cw: 8192, cp: 0, cc: 0, unfilt: true },
|
||||
'undi95/toppy-m-7b': { name: 'Toppy M 7B (beta)', cw: 32768, cp: 0, cc: 0, unfilt: true },
|
||||
'gryphe/mythomist-7b': { name: 'MythoMist 7B (beta)', cw: 32768, cp: 0, cc: 0, unfilt: true },
|
||||
'nousresearch/nous-hermes-llama2-13b': { name: 'Nous: Hermes 13B (beta)', cw: 4096, cp: 0.000155, cc: 0.000155, unfilt: true },
|
||||
'meta-llama/codellama-34b-instruct': { name: 'Meta: CodeLlama 34B Instruct (beta)', cw: 8192, cp: 0.00045, cc: 0.00045, unfilt: true },
|
||||
'phind/phind-codellama-34b': { name: 'Phind: CodeLlama 34B v2 (beta)', cw: 4096, cp: 0.00045, cc: 0.00045, unfilt: true },
|
||||
'intel/neural-chat-7b': { name: 'Neural Chat 7B v3.1 (beta)', cw: 32768, cp: 0.005, cc: 0.005, unfilt: true },
|
||||
'haotian-liu/llava-13b': { name: 'Llava 13B (beta)', cw: 2048, cp: 0.005, cc: 0.005, unfilt: true },
|
||||
'meta-llama/llama-2-13b-chat': { name: 'Meta: Llama v2 13B Chat (beta)', cw: 4096, cp: 0.000234533, cc: 0.000234533, unfilt: true },
|
||||
'alpindale/goliath-120b': { name: 'Goliath 120B (beta)', cw: 6144, cp: 0.00703125, cc: 0.00703125, unfilt: true },
|
||||
'lizpreciatior/lzlv-70b-fp16-hf': { name: 'lzlv 70B (beta)', cw: 4096, cp: 0.000562, cc: 0.000762, unfilt: true },
|
||||
'openai/gpt-3.5-turbo': { name: 'OpenAI: GPT-3.5 Turbo', cw: 4095, cp: 0.001, cc: 0.002, unfilt: false },
|
||||
'openai/gpt-3.5-turbo-1106': { name: 'OpenAI: GPT-3.5 Turbo 16k (preview)', cw: 16385, cp: 0.001, cc: 0.002, unfilt: false },
|
||||
'openai/gpt-3.5-turbo-16k': { name: 'OpenAI: GPT-3.5 Turbo 16k', cw: 16385, cp: 0.003, cc: 0.004, unfilt: false },
|
||||
'openai/gpt-4-1106-preview': { name: 'OpenAI: GPT-4 Turbo (preview)', cw: 128000, cp: 0.01, cc: 0.03, unfilt: false },
|
||||
'openai/gpt-4': { name: 'OpenAI: GPT-4', cw: 8191, cp: 0.03, cc: 0.06, unfilt: false },
|
||||
'openai/gpt-4-32k': { name: 'OpenAI: GPT-4 32k', cw: 32767, cp: 0.06, cc: 0.12, unfilt: false },
|
||||
'openai/gpt-4-vision-preview': { name: 'OpenAI: GPT-4 Vision (preview)', cw: 128000, cp: 0.01, cc: 0.03, unfilt: false },
|
||||
'openai/gpt-3.5-turbo-instruct': { name: 'OpenAI: GPT-3.5 Turbo Instruct', cw: 4095, cp: 0.0015, cc: 0.002, unfilt: false },
|
||||
'google/palm-2-chat-bison': { name: 'Google: PaLM 2 Chat', cw: 9216, cp: 0.0005, cc: 0.0005, unfilt: true },
|
||||
'google/palm-2-codechat-bison': { name: 'Google: PaLM 2 Code Chat', cw: 7168, cp: 0.0005, cc: 0.0005, unfilt: true },
|
||||
'google/palm-2-chat-bison-32k': { name: 'Google: PaLM 2 Chat 32k', cw: 32000, cp: 0.0005, cc: 0.0005, unfilt: true },
|
||||
'google/palm-2-codechat-bison-32k': { name: 'Google: PaLM 2 Code Chat 32k', cw: 32000, cp: 0.0005, cc: 0.0005, unfilt: true },
|
||||
'meta-llama/llama-2-13b-chat': { name: 'Meta: Llama v2 13B Chat (beta)', cw: 4096, cp: 0.0002345, cc: 0.0002345, unfilt: true },
|
||||
'meta-llama/llama-2-70b-chat': { name: 'Meta: Llama v2 70B Chat (beta)', cw: 4096, cp: 0.0007, cc: 0.00095, unfilt: true },
|
||||
'nousresearch/nous-hermes-llama2-13b': { name: 'Nous: Hermes Llama2 13B (beta)', cw: 4096, cp: 0.0002, cc: 0.0002, unfilt: true },
|
||||
'nousresearch/nous-hermes-llama2-70b': { name: 'Nous: Hermes Llama2 70B (beta)', cw: 4096, cp: 0.001, cc: 0.001, unfilt: true },
|
||||
'meta-llama/codellama-34b-instruct': { name: 'Meta: CodeLlama 34B Instruct (beta)', cw: 8192, cp: 0.0004, cc: 0.0004, unfilt: true },
|
||||
'phind/phind-codellama-34b': { name: 'Phind: CodeLlama 34B v2 (beta)', cw: 4096, cp: 0.0004, cc: 0.0004, unfilt: true },
|
||||
'jondurbin/airoboros-l2-70b': { name: 'Airoboros L2 70B (beta)', cw: 4096, cp: 0.0007, cc: 0.00095, unfilt: true },
|
||||
'nousresearch/nous-hermes-llama2-70b': { name: 'Nous: Hermes 70B (beta)', cw: 4096, cp: 0.0009, cc: 0.0009, unfilt: true },
|
||||
'nousresearch/nous-capybara-34b': { name: 'Nous: Capybara 34B (beta)', cw: 32000, cp: 0.02, cc: 0.02, unfilt: true },
|
||||
'jondurbin/airoboros-l2-70b': { name: 'Airoboros 70B (beta)', cw: 4096, cp: 0.0007, cc: 0.00095, unfilt: true },
|
||||
'migtissera/synthia-70b': { name: 'Synthia 70B (beta)', cw: 8192, cp: 0.009375, cc: 0.009375, unfilt: true },
|
||||
'open-orca/mistral-7b-openorca': { name: 'Mistral OpenOrca 7B (beta)', cw: 8192, cp: 0.0002, cc: 0.0002, unfilt: true },
|
||||
'teknium/openhermes-2-mistral-7b': { name: 'Mistral OpenHermes 7B (beta)', cw: 4096, cp: 0.0002, cc: 0.0002, unfilt: true },
|
||||
'teknium/openhermes-2-mistral-7b': { name: 'OpenHermes 2 Mistral 7B (beta)', cw: 4096, cp: 0.0002, cc: 0.0002, unfilt: true },
|
||||
'teknium/openhermes-2.5-mistral-7b': { name: 'OpenHermes 2.5 Mistral 7B (beta)', cw: 4096, cp: 0.0002, cc: 0.0002, unfilt: true },
|
||||
'pygmalionai/mythalion-13b': { name: 'Pygmalion: Mythalion 13B (beta)', cw: 8192, cp: 0.001125, cc: 0.001125, unfilt: true },
|
||||
'undi95/remm-slerp-l2-13b': { name: 'ReMM SLERP L2 13B (beta)', cw: 6144, cp: 0.001125, cc: 0.001125, unfilt: true },
|
||||
'gryphe/mythomax-l2-13b': { name: 'MythoMax L2 13B', cw: 4096, cp: 0.0008, cc: 0.0008, unfilt: true },
|
||||
'undi95/remm-slerp-l2-13b': { name: 'ReMM SLERP 13B (beta)', cw: 6144, cp: 0.001125, cc: 0.001125, unfilt: true },
|
||||
'xwin-lm/xwin-lm-70b': { name: 'Xwin 70B (beta)', cw: 8192, cp: 0.009375, cc: 0.009375, unfilt: true },
|
||||
'gryphe/mythomax-l2-13b-8k': { name: 'MythoMax L2 13B 8k (beta)', cw: 8192, cp: 0.001125, cc: 0.001125, unfilt: true },
|
||||
'anthropic/claude-2': { name: 'Anthropic: Claude v2', cw: 100000, cp: 0.01102, cc: 0.03268 },
|
||||
'anthropic/claude-instant-v1': { name: 'Anthropic: Claude Instant v1', cw: 100000, cp: 0.00163, cc: 0.00551 },
|
||||
'mancer/weaver': { name: 'Mancer: Weaver 12k (alpha)', cw: 8000, cp: 0.0045, cc: 0.0045, unfilt: true },
|
||||
'openai/gpt-3.5-turbo-0301': { name: 'OpenAI: GPT-3.5 Turbo (older v0301)', cw: 4095, cp: 0.0015, cc: 0.002, old: true },
|
||||
'gryphe/mythomax-l2-13b-8k': { name: 'MythoMax 13B 8k (beta)', cw: 8192, cp: 0.001125, cc: 0.001125, unfilt: true },
|
||||
'neversleep/noromaid-20b': { name: 'Noromaid 20B (beta)', cw: 8192, cp: 0.00225, cc: 0.00225, unfilt: true },
|
||||
'anthropic/claude-2': { name: 'Anthropic: Claude v2.1', cw: 200000, cp: 0.008, cc: 0.024, unfilt: false },
|
||||
'anthropic/claude-2.0': { name: 'Anthropic: Claude v2.0', cw: 100000, cp: 0.008, cc: 0.024, unfilt: false },
|
||||
'anthropic/claude-instant-v1': { name: 'Anthropic: Claude Instant v1', cw: 100000, cp: 0.00163, cc: 0.00551, unfilt: false },
|
||||
'mancer/weaver': { name: 'Mancer: Weaver (alpha)', cw: 8000, cp: 0.0045, cc: 0.0045, unfilt: true },
|
||||
'gryphe/mythomax-l2-13b': { name: 'MythoMax 13B', cw: 4096, cp: 0.0006, cc: 0.0006, unfilt: true },
|
||||
'openai/gpt-3.5-turbo-0301': { name: 'OpenAI: GPT-3.5 Turbo (older v0301)', cw: 4095, cp: 0.001, cc: 0.002, old: true },
|
||||
'openai/gpt-4-0314': { name: 'OpenAI: GPT-4 (older v0314)', cw: 8191, cp: 0.03, cc: 0.06, old: true },
|
||||
'openai/gpt-4-32k-0314': { name: 'OpenAI: GPT-4 32k (older v0314)', cw: 32767, cp: 0.06, cc: 0.12, old: true },
|
||||
'openai/text-davinci-002': { name: 'OpenAI: Davinci 2', cw: 4095, cp: 0.02, cc: 0.02, old: true },
|
||||
'anthropic/claude-v1': { name: 'Anthropic: Claude v1', cw: 9000, cp: 0.01102, cc: 0.03268, old: true },
|
||||
'anthropic/claude-1.2': { name: 'Anthropic: Claude (older v1)', cw: 9000, cp: 0.01102, cc: 0.03268, old: true },
|
||||
'anthropic/claude-v1': { name: 'Anthropic: Claude v1', cw: 9000, cp: 0.008, cc: 0.024, old: true },
|
||||
'anthropic/claude-1.2': { name: 'Anthropic: Claude (older v1)', cw: 9000, cp: 0.008, cc: 0.024, old: true },
|
||||
'anthropic/claude-instant-v1-100k': { name: 'Anthropic: Claude Instant 100k v1', cw: 100000, cp: 0.00163, cc: 0.00551, old: true },
|
||||
'anthropic/claude-v1-100k': { name: 'Anthropic: Claude 100k v1', cw: 100000, cp: 0.01102, cc: 0.03268, old: true },
|
||||
'anthropic/claude-v1-100k': { name: 'Anthropic: Claude 100k v1', cw: 100000, cp: 0.008, cc: 0.024, old: true },
|
||||
'anthropic/claude-instant-1.0': { name: 'Anthropic: Claude Instant (older v1)', cw: 9000, cp: 0.00163, cc: 0.00551, old: true },
|
||||
};
|
||||
|
||||
const orModelFamilyOrder = ['mistralai/', 'huggingfaceh4/', 'openai/', 'anthropic/', 'google/', 'meta-llama/', 'phind/'];
|
||||
const orModelFamilyOrder = ['mistralai/', 'huggingfaceh4/', 'undi95/', 'openchat/', 'anthropic/', 'google/', 'openai/', 'meta-llama/', 'phind/', 'openrouter/'];
|
||||
|
||||
export function openRouterModelFamilySortFn(a: { id: string }, b: { id: string }): number {
|
||||
const aPrefixIndex = orModelFamilyOrder.findIndex(prefix => a.id.startsWith(prefix));
|
||||
@@ -348,6 +366,6 @@ function fromManualMapping(mappings: ManualMappings, id: string, created?: numbe
|
||||
contextWindow: known.contextWindow,
|
||||
...(!!known.maxCompletionTokens && { maxCompletionTokens: known.maxCompletionTokens }),
|
||||
interfaces: known.interfaces,
|
||||
...(!!known.hidden && { hidden: known.hidden })
|
||||
...(!!known.hidden && { hidden: known.hidden }),
|
||||
};
|
||||
}
|
||||
@@ -28,7 +28,7 @@ export type OpenAIAccessSchema = z.infer<typeof openAIAccessSchema>;
|
||||
|
||||
export const openAIModelSchema = z.object({
|
||||
id: z.string(),
|
||||
temperature: z.number().min(0).max(1).optional(),
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
maxTokens: z.number().min(1).max(1000000),
|
||||
});
|
||||
export type OpenAIModelSchema = z.infer<typeof openAIModelSchema>;
|
||||
|
||||
@@ -6,10 +6,10 @@ import { createEmptyReadableStream, debugGenerateCurlCommand, safeErrorString, S
|
||||
|
||||
import type { AnthropicWire } from '../anthropic/anthropic.wiretypes';
|
||||
import type { OpenAIWire } from './openai.wiretypes';
|
||||
import { OLLAMA_PATH_CHAT, ollamaAccess, ollamaAccessSchema, ollamaChatCompletionPayload } from '../ollama/ollama.router';
|
||||
import { anthropicAccess, anthropicAccessSchema, anthropicChatCompletionPayload } from '../anthropic/anthropic.router';
|
||||
import { ollamaAccess, ollamaAccessSchema, ollamaChatCompletionPayload } from '../ollama/ollama.router';
|
||||
import { openAIAccess, openAIAccessSchema, openAIChatCompletionPayload, openAIHistorySchema, openAIModelSchema } from './openai.router';
|
||||
import { wireOllamaGenerationSchema } from '../ollama/ollama.wiretypes';
|
||||
import { wireOllamaChunkedOutputSchema } from '../ollama/ollama.wiretypes';
|
||||
|
||||
|
||||
/**
|
||||
@@ -59,10 +59,10 @@ export async function openaiStreamingRelayHandler(req: NextRequest): Promise<Res
|
||||
break;
|
||||
|
||||
case 'ollama':
|
||||
headersUrl = ollamaAccess(access, '/api/generate');
|
||||
headersUrl = ollamaAccess(access, OLLAMA_PATH_CHAT);
|
||||
body = ollamaChatCompletionPayload(model, history, true);
|
||||
eventStreamFormat = 'json-nl';
|
||||
vendorStreamParser = createOllamaStreamParser();
|
||||
vendorStreamParser = createOllamaChatCompletionStreamParser();
|
||||
break;
|
||||
|
||||
case 'azure':
|
||||
@@ -135,30 +135,35 @@ function createAnthropicStreamParser(): AIStreamParser {
|
||||
};
|
||||
}
|
||||
|
||||
function createOllamaStreamParser(): AIStreamParser {
|
||||
function createOllamaChatCompletionStreamParser(): AIStreamParser {
|
||||
let hasBegun = false;
|
||||
|
||||
return (data: string) => {
|
||||
|
||||
let wireGeneration: any;
|
||||
// parse the JSON chunk
|
||||
let wireJsonChunk: any;
|
||||
try {
|
||||
wireGeneration = JSON.parse(data);
|
||||
wireJsonChunk = JSON.parse(data);
|
||||
} catch (error: any) {
|
||||
// log the malformed data to the console, and rethrow to transmit as 'error'
|
||||
console.log(`/api/llms/stream: Ollama parsing issue: ${error?.message || error}`, data);
|
||||
throw error;
|
||||
}
|
||||
const generation = wireOllamaGenerationSchema.parse(wireGeneration);
|
||||
let text = generation.response;
|
||||
|
||||
// validate chunk
|
||||
const chunk = wireOllamaChunkedOutputSchema.parse(wireJsonChunk);
|
||||
|
||||
// process output
|
||||
let text = chunk.message?.content || /*chunk.response ||*/ '';
|
||||
|
||||
// hack: prepend the model name to the first packet
|
||||
if (!hasBegun) {
|
||||
if (!hasBegun && chunk.model) {
|
||||
hasBegun = true;
|
||||
const firstPacket: ChatStreamFirstPacketSchema = { model: generation.model };
|
||||
const firstPacket: ChatStreamFirstPacketSchema = { model: chunk.model };
|
||||
text = JSON.stringify(firstPacket) + text;
|
||||
}
|
||||
|
||||
return { text, close: generation.done };
|
||||
return { text, close: chunk.done };
|
||||
};
|
||||
}
|
||||
|
||||
@@ -248,7 +253,8 @@ function createEventStreamTransformer(vendorTextParser: AIStreamParser, inputFor
|
||||
if (close)
|
||||
controller.terminate();
|
||||
} catch (error: any) {
|
||||
// console.log(`/api/llms/stream: parse issue: ${error?.message || error}`);
|
||||
if (SERVER_DEBUG_WIRE)
|
||||
console.log(' - E: parse issue:', event.data, error?.message || error);
|
||||
controller.enqueue(textEncoder.encode(`[Stream Issue] ${dialectLabel}: ${safeErrorString(error) || 'Unknown stream parsing error'}`));
|
||||
controller.terminate();
|
||||
}
|
||||
|
||||
+61
-20
@@ -1,24 +1,29 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, FormControl, Input, Option, Select, Stack, Typography } from '@mui/joy';
|
||||
import { Box, Button, Chip, FormControl, IconButton, Input, Option, Select, Stack, Typography } from '@mui/joy';
|
||||
import LaunchIcon from '@mui/icons-material/Launch';
|
||||
import FormatListNumberedRtlIcon from '@mui/icons-material/FormatListNumberedRtl';
|
||||
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { GoodModal } from '~/common/components/GoodModal';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
import { settingsGap } from '~/common/app.theme';
|
||||
|
||||
import type { OllamaAccessSchema } from '../../transports/server/ollama/ollama.router';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
|
||||
|
||||
export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () => void }) {
|
||||
export function OllamaAdministration(props: { access: OllamaAccessSchema, onClose: () => void }) {
|
||||
|
||||
// state
|
||||
const [sortByPulls, setSortByPulls] = React.useState<boolean>(false);
|
||||
const [modelName, setModelName] = React.useState<string | null>('llama2');
|
||||
const [modelTag, setModelTag] = React.useState<string>('');
|
||||
|
||||
// external state
|
||||
const { data: pullable } = apiQuery.llmOllama.adminListPullable.useQuery({ access: props.access }, {
|
||||
const { data: pullableData } = apiQuery.llmOllama.adminListPullable.useQuery({ access: props.access }, {
|
||||
staleTime: 1000 * 60,
|
||||
refetchOnWindowFocus: false,
|
||||
});
|
||||
@@ -26,7 +31,11 @@ export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () =>
|
||||
const { isLoading: isDeleting, status: deleteStatus, error: deleteError, mutate: deleteMutate, reset: deleteReset } = apiQuery.llmOllama.adminDelete.useMutation();
|
||||
|
||||
// derived state
|
||||
const pullModelDescription = pullable?.pullable.find(p => p.id === modelName)?.description ?? null;
|
||||
let pullable = pullableData?.pullable || [];
|
||||
if (sortByPulls)
|
||||
pullable = pullable.toSorted((a, b) => b.pulls - a.pulls);
|
||||
const pullModelDescription = pullable.find(p => p.id === modelName)?.description ?? null;
|
||||
|
||||
|
||||
const handleModelPull = () => {
|
||||
deleteReset();
|
||||
@@ -38,6 +47,7 @@ export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () =>
|
||||
modelName && deleteMutate({ access: props.access, name: modelName + (modelTag ? ':' + modelTag : '') });
|
||||
};
|
||||
|
||||
|
||||
return (
|
||||
<GoodModal title='Ollama Administration' dividers open onClose={props.onClose}>
|
||||
|
||||
@@ -47,23 +57,48 @@ export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () =>
|
||||
However we provide a way to pull models from the Ollama host, for convenience.
|
||||
</Typography>
|
||||
|
||||
<Box sx={{ display: 'flex', gap: 1 }}>
|
||||
<FormControl sx={{ flexGrow: 1 }}>
|
||||
<Box sx={{ display: 'flex', flexFlow: 'row wrap', gap: 1 }}>
|
||||
<FormControl sx={{ flexGrow: 1, flexBasis: 0.55 }}>
|
||||
<FormLabelStart title='Name' />
|
||||
<Select value={modelName || ''} onChange={(_event: any, value: string | null) => setModelName(value)}>
|
||||
{pullable?.pullable.map(p =>
|
||||
<Option key={p.id} value={p.id}>{p.id}</Option>,
|
||||
)}
|
||||
</Select>
|
||||
<Box sx={{ display: 'flex', gap: 1 }}>
|
||||
<Select
|
||||
value={modelName || ''}
|
||||
onChange={(_event: any, value: string | null) => setModelName(value)}
|
||||
sx={{ flexGrow: 1 }}
|
||||
>
|
||||
{pullable.map(p =>
|
||||
<Option key={p.id} value={p.id}>
|
||||
{p.isNew === true && <Chip size='sm' variant='outlined'>NEW</Chip>} {p.label}{sortByPulls && ` (${p.pulls.toLocaleString()})`}
|
||||
</Option>,
|
||||
)}
|
||||
</Select>
|
||||
<GoodTooltip title='Sort by Downloads'>
|
||||
<IconButton
|
||||
variant={sortByPulls ? 'solid' : 'outlined'}
|
||||
onClick={() => setSortByPulls(!sortByPulls)}
|
||||
>
|
||||
<FormatListNumberedRtlIcon />
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
</Box>
|
||||
</FormControl>
|
||||
<FormControl sx={{ flexGrow: 1 }}>
|
||||
<FormControl sx={{ flexGrow: 1, flexBasis: 0.45 }}>
|
||||
<FormLabelStart title='Tag' />
|
||||
<Input
|
||||
variant='outlined' placeholder='latest'
|
||||
value={modelTag || ''} onChange={event => setModelTag(event.target.value)}
|
||||
sx={{ minWidth: 100 }}
|
||||
slotProps={{ input: { size: 10 } }} // halve the min width
|
||||
/>
|
||||
<Box sx={{ display: 'flex', gap: 1 }}>
|
||||
<Input
|
||||
variant='outlined' placeholder='latest'
|
||||
value={modelTag || ''} onChange={event => setModelTag(event.target.value)}
|
||||
sx={{ minWidth: 80, flexGrow: 1 }}
|
||||
slotProps={{ input: { size: 10 } }} // halve the min width
|
||||
/>
|
||||
{!!modelName && (
|
||||
<IconButton
|
||||
component={Link} href={`https://ollama.ai/library/${modelName}`} target='_blank'
|
||||
>
|
||||
<LaunchIcon />
|
||||
</IconButton>
|
||||
)}
|
||||
</Box>
|
||||
</FormControl>
|
||||
</Box>
|
||||
|
||||
@@ -83,7 +118,7 @@ export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () =>
|
||||
{pullModelDescription}
|
||||
</Typography>
|
||||
|
||||
<Box sx={{ display: 'flex', gap: 1 }}>
|
||||
<Box sx={{ display: 'flex', flexWrap: 1, gap: 1 }}>
|
||||
<Button
|
||||
variant='outlined'
|
||||
color={deleteStatus === 'error' ? 'danger' : deleteStatus === 'success' ? 'success' : 'primary'}
|
||||
@@ -104,6 +139,12 @@ export function OllamaAdmin(props: { access: OllamaAccessSchema, onClose: () =>
|
||||
|
||||
</Box>
|
||||
|
||||
{/* Warnings */}
|
||||
{isPulling && <Typography color='warning' level='body-sm'>
|
||||
Pulling maybe slow and TIME OUT as the operation will download many GBs from the internet. In case of a
|
||||
timeout, the server is still downloading the model. Check back again later and the model should be available.
|
||||
</Typography>}
|
||||
|
||||
</Stack>
|
||||
|
||||
</GoodModal>
|
||||
+2
-2
@@ -11,7 +11,7 @@ import { asValidURL } from '~/common/util/urlUtils';
|
||||
|
||||
import { DModelSourceId, useModelsStore, useSourceSetup } from '../../store-llms';
|
||||
import { ModelVendorOllama } from './ollama.vendor';
|
||||
import { OllamaAdmin } from './OllamaAdmin';
|
||||
import { OllamaAdministration } from './OllamaAdministration';
|
||||
import { modelDescriptionToDLLM } from '../openai/OpenAISourceSetup';
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ export function OllamaSourceSetup(props: { sourceId: DModelSourceId }) {
|
||||
|
||||
{isError && <InlineError error={error} />}
|
||||
|
||||
{adminOpen && <OllamaAdmin access={access} onClose={() => setAdminOpen(false)} />}
|
||||
{adminOpen && <OllamaAdministration access={access} onClose={() => setAdminOpen(false)} />}
|
||||
|
||||
</>;
|
||||
}
|
||||
+28
-2
@@ -1,5 +1,8 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { IconButton, Tooltip } from '@mui/joy';
|
||||
import LocalFireDepartmentIcon from '@mui/icons-material/LocalFireDepartment';
|
||||
|
||||
import { FormSliderControl } from '~/common/components/forms/FormSliderControl';
|
||||
|
||||
import { DLLM, useModelsStore } from '../../store-llms';
|
||||
@@ -18,18 +21,41 @@ function normalizeOpenAIOptions(partialOptions?: Partial<LLMOptionsOpenAI>) {
|
||||
|
||||
export function OpenAILLMOptions(props: { llm: DLLM<unknown, LLMOptionsOpenAI> }) {
|
||||
|
||||
// derived state
|
||||
const { id: llmId, maxOutputTokens, options } = props.llm;
|
||||
const { llmResponseTokens, llmTemperature } = normalizeOpenAIOptions(options);
|
||||
|
||||
// state (here because the initial state depends on props)
|
||||
const [overheat, setOverheat] = React.useState(llmTemperature > 1);
|
||||
|
||||
const showOverheatButton = overheat || llmTemperature >= 1;
|
||||
|
||||
const handleOverheatToggle = React.useCallback(() => {
|
||||
if (overheat && llmTemperature > 1)
|
||||
useModelsStore.getState().updateLLMOptions(llmId, { llmTemperature: 1 });
|
||||
setOverheat(!overheat);
|
||||
}, [llmId, llmTemperature, overheat]);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<FormSliderControl
|
||||
title='Temperature' ariaLabel='Model Temperature'
|
||||
description={llmTemperature < 0.33 ? 'More strict' : llmTemperature > 0.67 ? 'Larger freedom' : 'Creativity'}
|
||||
min={0} max={1} step={0.1} defaultValue={0.5}
|
||||
description={llmTemperature < 0.33 ? 'More strict' : llmTemperature > 1 ? 'Extra hot ♨️' : llmTemperature > 0.67 ? 'Larger freedom' : 'Creativity'}
|
||||
min={0} max={overheat ? 2 : 1} step={0.1} defaultValue={0.5}
|
||||
valueLabelDisplay='on'
|
||||
value={llmTemperature}
|
||||
onChange={value => useModelsStore.getState().updateLLMOptions(llmId, { llmTemperature: value })}
|
||||
endAdornment={showOverheatButton &&
|
||||
<Tooltip title={overheat ? 'Disable LLM Overheating' : 'Increase Max LLM Temperature to 2'} sx={{ p: 1 }}>
|
||||
<IconButton
|
||||
variant={overheat ? 'soft' : 'plain'} color={overheat ? 'danger' : 'neutral'}
|
||||
onClick={handleOverheatToggle} sx={{ ml: 2 }}
|
||||
>
|
||||
<LocalFireDepartmentIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
}
|
||||
/>
|
||||
|
||||
<FormSliderControl
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Chip, CircularProgress, FormControl, Input, Option, Radio, RadioGroup, Select, Slider, Switch } from '@mui/joy';
|
||||
import { Chip, CircularProgress, FormControl, Input, Option, Select, Slider, Switch } from '@mui/joy';
|
||||
import CropSquareIcon from '@mui/icons-material/CropSquare';
|
||||
import FormatPaintIcon from '@mui/icons-material/FormatPaint';
|
||||
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
|
||||
@@ -12,6 +12,7 @@ import { backendCaps } from '~/modules/backend/state-backend';
|
||||
|
||||
import { FormInputKey } from '~/common/components/forms/FormInputKey';
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { FormRadioControl } from '~/common/components/forms/FormRadioControl';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
import { useToggleableBoolean } from '~/common/util/useToggleableBoolean';
|
||||
@@ -163,14 +164,16 @@ export function ProdiaSettings() {
|
||||
</Select>
|
||||
</FormControl>}
|
||||
|
||||
{advanced.on && !selectedIsXL && <FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
|
||||
<FormLabelStart title='[SD] Aspect Ratio' description={prodiaAspectRatio === 'square' ? 'Square' : prodiaAspectRatio === 'portrait' ? 'Portrait' : 'Landscape'} />
|
||||
<RadioGroup orientation='horizontal' value={prodiaAspectRatio} onChange={(e) => setProdiaAspectRatio(e.target.value as 'square' | 'portrait' | 'landscape')}>
|
||||
<Radio value='square' label={<CropSquareIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
|
||||
<Radio value='portrait' label={<StayPrimaryPortraitIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
|
||||
<Radio value='landscape' label={<StayPrimaryLandscapeIcon sx={{ width: 25, height: 24, mt: -0.25 }} />} />
|
||||
</RadioGroup>
|
||||
</FormControl>}
|
||||
{advanced.on && !selectedIsXL && <FormRadioControl
|
||||
title='[SD] Aspect Ratio'
|
||||
description={prodiaAspectRatio === 'square' ? 'Square' : prodiaAspectRatio === 'portrait' ? 'Portrait' : 'Landscape'}
|
||||
options={[
|
||||
{ value: 'square', label: <CropSquareIcon sx={{ width: 25, height: 24, mt: -0.25 }} /> },
|
||||
{ value: 'portrait', label: <StayPrimaryPortraitIcon sx={{ width: 25, height: 24, mt: -0.25 }} /> },
|
||||
{ value: 'landscape', label: <StayPrimaryLandscapeIcon sx={{ width: 25, height: 24, mt: -0.25 }} /> },
|
||||
]}
|
||||
value={prodiaAspectRatio} onChange={setProdiaAspectRatio}
|
||||
/>}
|
||||
|
||||
{advanced.on && !selectedIsXL && <FormControl orientation='horizontal' sx={{ justifyContent: 'space-between' }}>
|
||||
<FormLabelStart title='[SD] Upscale'
|
||||
|
||||
@@ -5,11 +5,12 @@ import { Box, Button, FormControl, Input, Sheet, Textarea, Typography } from '@m
|
||||
import FileUploadIcon from '@mui/icons-material/FileUpload';
|
||||
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { FormRadioOption, useFormRadio } from '~/common/components/forms/useFormRadio';
|
||||
import { FormRadioOption } from '~/common/components/forms/FormRadioControl';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
import { OpenAIIcon } from '~/common/components/icons/OpenAIIcon';
|
||||
import { apiAsyncNode } from '~/common/util/trpc.client';
|
||||
import { createDConversation, createDMessage, DConversationId, DMessage, useChatStore } from '~/common/state/store-chats';
|
||||
import { useFormRadio } from '~/common/components/forms/useFormRadio';
|
||||
|
||||
import type { ChatGptSharedChatSchema } from './server/chatgpt';
|
||||
import { loadAllConversationsFromJson } from './trade.client';
|
||||
|
||||
@@ -19,10 +19,13 @@ import { ZodError } from 'zod';
|
||||
* These allow you to access things when processing a request, like the database, the session, etc.
|
||||
*/
|
||||
|
||||
export const createTRPCFetchContext = ({ /*req, resHeaders*/ }: { req: Request; resHeaders: Headers; }) => {
|
||||
export const createTRPCFetchContext = ({ req /*, resHeaders*/ }: { req: Request; resHeaders: Headers; }) => {
|
||||
// const user = { name: req.headers.get('username') ?? 'anonymous' };
|
||||
// return { req, resHeaders };
|
||||
return {};
|
||||
return {
|
||||
// only used by Backend Analytics
|
||||
hostName: req.headers?.get('host') ?? 'localhost',
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
+9
-2
@@ -4,7 +4,7 @@ import { z } from 'zod';
|
||||
export const env = createEnv({
|
||||
server: {
|
||||
|
||||
// Postgres, for optional storage via Prisma
|
||||
// Backend Postgres, for optional storage via Prisma
|
||||
POSTGRES_PRISMA_URL: z.string().url().optional(),
|
||||
POSTGRES_URL_NON_POOLING: z.string().url().optional(),
|
||||
|
||||
@@ -44,7 +44,14 @@ export const env = createEnv({
|
||||
|
||||
// Browsing Service
|
||||
PUPPETEER_WSS_ENDPOINT: z.string().url().optional(),
|
||||
|
||||
|
||||
// Backend: Analytics flags (e.g. which hostname responds) for managed installs
|
||||
BACKEND_ANALYTICS: z.string().optional().transform(list => (list || '').split(';').filter(flag => !!flag)),
|
||||
|
||||
// Backend: HTTP Basic Authentication
|
||||
HTTP_BASIC_AUTH_USERNAME: z.string().optional(),
|
||||
HTTP_BASIC_AUTH_PASSWORD: z.string().optional(),
|
||||
|
||||
},
|
||||
|
||||
onValidationError: error => {
|
||||
|
||||
Reference in New Issue
Block a user