Compare commits

..

1 Commits

Author SHA1 Message Date
Enrico Ros f9d30114c5 Auth: Clerk provider 2024-05-13 12:11:26 -07:00
61 changed files with 600 additions and 898 deletions
+1 -9
View File
@@ -21,15 +21,7 @@ Or fork & run on Vercel
[//]: # (big-AGI is an open book; see the **[ready-to-ship and future ideas](https://github.com/users/enricoros/projects/4/views/2)** in our open roadmap)
### What's New in 1.16.2 · Jun 7, 2024 (minor release)
- Improve web downloads, as text, markdwon, or HTML
- Proper support for Gemini models
- Added the latest Mistral model
- Tokenizer support for gpt-4o
- Updates to Beam
### What's New in 1.16.1 · May 13, 2024 (minor release)
### What's New in 1.16.1 · May 13, 2024 (minor release, models support)
- Support for the new OpenAI GPT-4o 2024-05-13 model
+1 -9
View File
@@ -10,15 +10,7 @@ by release.
- milestone: [1.17.0](https://github.com/enricoros/big-agi/milestone/17)
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
### What's New in 1.16.2 · Jun 7, 2024 (minor release)
- Improve web downloads, as text, markdwon, or HTML
- Proper support for Gemini models
- Added the latest Mistral model
- Tokenizer support for gpt-4o
- Updates to Beam
### What's New in 1.16.1 · May 13, 2024 (minor release)
### What's New in 1.16.1 · May 13, 2024 (minor release, models support)
- Support for the new OpenAI GPT-4o 2024-05-13 model
+7
View File
@@ -0,0 +1,7 @@
import { clerkMiddleware } from '@clerk/nextjs/server';
export default clerkMiddleware();
export const config = {
matcher: ['/((?!.+.[w]+$|_next).*)', '/', '/(api|trpc)(.*)'],
};
+229 -182
View File
@@ -9,6 +9,7 @@
"version": "1.16.0",
"hasInstallScript": true,
"dependencies": {
"@clerk/nextjs": "^5.0.8",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.4",
"@emotion/server": "^11.11.0",
@@ -29,7 +30,6 @@
"@vercel/analytics": "^1.2.2",
"@vercel/speed-insights": "^1.0.10",
"browser-fs-access": "^0.35.0",
"cheerio": "^1.0.0-rc.12",
"eventsource-parser": "^1.1.2",
"idb-keyval": "^6.2.1",
"next": "~14.1.4",
@@ -52,8 +52,7 @@
"sharp": "^0.33.3",
"superjson": "^2.2.1",
"tesseract.js": "^5.1.0",
"tiktoken": "^1.0.15",
"turndown": "^7.2.0",
"tiktoken": "^1.0.14",
"uuid": "^9.0.1",
"zod": "^3.23.8",
"zustand": "^4.5.2"
@@ -70,7 +69,6 @@
"@types/react-dom": "^18.3.0",
"@types/react-katex": "^3.0.4",
"@types/react-timeago": "^4.1.7",
"@types/turndown": "^5.0.4",
"@types/uuid": "^9.0.8",
"eslint": "^8.57.0",
"eslint-config-next": "^14.2.3",
@@ -223,6 +221,116 @@
"node": ">=6.9.0"
}
},
"node_modules/@clerk/backend": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@clerk/backend/-/backend-1.1.3.tgz",
"integrity": "sha512-PCGolPkTff7aQjAoS+oDhUldviBYM376klclxJO8SK7eyNkxhwfU95T5McEVpEgz5nITKjKk03Zr7a7v82X6Iw==",
"dependencies": {
"@clerk/shared": "2.0.2",
"cookie": "0.5.0",
"snakecase-keys": "5.4.4",
"tslib": "2.4.1"
},
"engines": {
"node": ">=18.17.0"
}
},
"node_modules/@clerk/backend/node_modules/tslib": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.1.tgz",
"integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA=="
},
"node_modules/@clerk/clerk-react": {
"version": "5.0.4",
"resolved": "https://registry.npmjs.org/@clerk/clerk-react/-/clerk-react-5.0.4.tgz",
"integrity": "sha512-sVpzLgz/2Muj7W2M3j1g4l296ld+XlEKIQ32sUhFeIM4XiOpBaO8bFu1c4YcdKaetreI6f6dB/K9lYJLGZTtFw==",
"dependencies": {
"@clerk/shared": "2.0.2",
"@clerk/types": "4.2.1",
"tslib": "2.4.1"
},
"engines": {
"node": ">=18.17.0"
},
"peerDependencies": {
"react": ">=18",
"react-dom": ">=18"
}
},
"node_modules/@clerk/clerk-react/node_modules/tslib": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.1.tgz",
"integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA=="
},
"node_modules/@clerk/nextjs": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/@clerk/nextjs/-/nextjs-5.0.8.tgz",
"integrity": "sha512-ZwTA+nd0EbeFbsrGYriv8t2oaiANTKQGvvPji9TWTwLR+gP9V5XcVsw4xuZ8w3+MjhnKCrc8BE+oCg5wUCn+Bw==",
"dependencies": {
"@clerk/backend": "1.1.3",
"@clerk/clerk-react": "5.0.4",
"@clerk/shared": "2.0.2",
"crypto-js": "4.2.0",
"path-to-regexp": "6.2.1",
"tslib": "2.4.1"
},
"engines": {
"node": ">=18.17.0"
},
"peerDependencies": {
"next": "^13.5.4 || ^14.0.3",
"react": ">=18",
"react-dom": ">=18"
}
},
"node_modules/@clerk/nextjs/node_modules/tslib": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.1.tgz",
"integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA=="
},
"node_modules/@clerk/shared": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@clerk/shared/-/shared-2.0.2.tgz",
"integrity": "sha512-uQXbBJyUV6B4y/cKIgKypVmCdCh7pHeQYbASCKI3RzlsC3FPyopy0hTlAIvXJqjulPYBfRUI1w2FDRJzpEk3sw==",
"hasInstallScript": true,
"dependencies": {
"glob-to-regexp": "0.4.1",
"js-cookie": "3.0.1",
"std-env": "^3.7.0",
"swr": "2.2.0"
},
"engines": {
"node": ">=18.17.0"
},
"peerDependencies": {
"react": ">=18",
"react-dom": ">=18"
},
"peerDependenciesMeta": {
"react": {
"optional": true
},
"react-dom": {
"optional": true
}
}
},
"node_modules/@clerk/types": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/@clerk/types/-/types-4.2.1.tgz",
"integrity": "sha512-tR7Qd1P5Fa4n81ZxKA0tFPJd1OgrL7chU/+moq/LSnK8ZRN/DUzh5K7730sdw5F26CKtT6pzmW+irtgpYLYdDQ==",
"dependencies": {
"csstype": "3.1.1"
},
"engines": {
"node": ">=18.17.0"
}
},
"node_modules/@clerk/types/node_modules/csstype": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz",
"integrity": "sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw=="
},
"node_modules/@cloudflare/puppeteer": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@cloudflare/puppeteer/-/puppeteer-0.0.5.tgz",
@@ -1027,11 +1135,6 @@
"node-pre-gyp": "bin/node-pre-gyp"
}
},
"node_modules/@mixmark-io/domino": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@mixmark-io/domino/-/domino-2.2.0.tgz",
"integrity": "sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw=="
},
"node_modules/@mui/base": {
"version": "5.0.0-beta.42",
"resolved": "https://registry.npmjs.org/@mui/base/-/base-5.0.0-beta.42.tgz",
@@ -2090,12 +2193,6 @@
"@types/react": "*"
}
},
"node_modules/@types/turndown": {
"version": "5.0.4",
"resolved": "https://registry.npmjs.org/@types/turndown/-/turndown-5.0.4.tgz",
"integrity": "sha512-28GI33lCCkU4SGH1GvjDhFgOVr+Tym4PXGBIU1buJUa6xQolniPArtUT+kv42RR2N9MsMLInkr904Aq+ESHBJg==",
"dev": true
},
"node_modules/@types/unist": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz",
@@ -2677,11 +2774,6 @@
"resolved": "https://registry.npmjs.org/bmp-js/-/bmp-js-0.1.0.tgz",
"integrity": "sha512-vHdS19CnY3hwiNdkaqk93DvjVLfbEcI8mys4UjuWrlX1haDmroo8o4xCzh4wD6DGV6HxRCyauwhHRqMTfERtjw=="
},
"node_modules/boolbase": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="
},
"node_modules/brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
@@ -2847,42 +2939,6 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/cheerio": {
"version": "1.0.0-rc.12",
"resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz",
"integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==",
"dependencies": {
"cheerio-select": "^2.1.0",
"dom-serializer": "^2.0.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1",
"htmlparser2": "^8.0.1",
"parse5": "^7.0.0",
"parse5-htmlparser2-tree-adapter": "^7.0.0"
},
"engines": {
"node": ">= 6"
},
"funding": {
"url": "https://github.com/cheeriojs/cheerio?sponsor=1"
}
},
"node_modules/cheerio-select": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz",
"integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==",
"dependencies": {
"boolbase": "^1.0.0",
"css-select": "^5.1.0",
"css-what": "^6.1.0",
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/chownr": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz",
@@ -2985,6 +3041,14 @@
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz",
"integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="
},
"node_modules/cookie": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz",
"integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/copy-anything": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/copy-anything/-/copy-anything-3.0.5.tgz",
@@ -3033,6 +3097,11 @@
"node": ">= 8"
}
},
"node_modules/crypto-js": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
"integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
},
"node_modules/css-box-model": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/css-box-model/-/css-box-model-1.2.1.tgz",
@@ -3041,32 +3110,6 @@
"tiny-invariant": "^1.0.6"
}
},
"node_modules/css-select": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz",
"integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==",
"dependencies": {
"boolbase": "^1.0.0",
"css-what": "^6.1.0",
"domhandler": "^5.0.2",
"domutils": "^3.0.1",
"nth-check": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/css-what": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
"integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
"engines": {
"node": ">= 6"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/csstype": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
@@ -3295,55 +3338,13 @@
"csstype": "^3.0.2"
}
},
"node_modules/dom-serializer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
"integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==",
"node_modules/dot-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz",
"integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==",
"dependencies": {
"domelementtype": "^2.3.0",
"domhandler": "^5.0.2",
"entities": "^4.2.0"
},
"funding": {
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
}
},
"node_modules/domelementtype": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
"integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
]
},
"node_modules/domhandler": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz",
"integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==",
"dependencies": {
"domelementtype": "^2.3.0"
},
"engines": {
"node": ">= 4"
},
"funding": {
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/domutils": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz",
"integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==",
"dependencies": {
"dom-serializer": "^2.0.0",
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3"
},
"funding": {
"url": "https://github.com/fb55/domutils?sponsor=1"
"no-case": "^3.0.4",
"tslib": "^2.0.3"
}
},
"node_modules/duplexer": {
@@ -4394,6 +4395,11 @@
"node": ">=10.13.0"
}
},
"node_modules/glob-to-regexp": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
"integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
},
"node_modules/glob/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
@@ -4792,24 +4798,6 @@
"url": "https://opencollective.com/unified"
}
},
"node_modules/htmlparser2": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
"integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==",
"funding": [
"https://github.com/fb55/htmlparser2?sponsor=1",
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
],
"dependencies": {
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1",
"entities": "^4.4.0"
}
},
"node_modules/https-proxy-agent": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
@@ -5392,6 +5380,14 @@
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/js-cookie": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.1.tgz",
"integrity": "sha512-+0rgsUXZu4ncpPxRL+lNEptWMOWl9etvPHc/koSRp6MPwpRYAhmk0dUG00J4bxVV3r9uUzfo24wW0knS07SKSw==",
"engines": {
"node": ">=12"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
@@ -5565,6 +5561,14 @@
"loose-envify": "cli.js"
}
},
"node_modules/lower-case": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz",
"integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==",
"dependencies": {
"tslib": "^2.0.3"
}
},
"node_modules/lru-cache": {
"version": "10.2.2",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz",
@@ -5598,6 +5602,17 @@
"semver": "bin/semver.js"
}
},
"node_modules/map-obj": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
"integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/markdown-table": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz",
@@ -6645,6 +6660,15 @@
}
}
},
"node_modules/no-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz",
"integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==",
"dependencies": {
"lower-case": "^2.0.2",
"tslib": "^2.0.3"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
@@ -6696,17 +6720,6 @@
"resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz",
"integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA=="
},
"node_modules/nth-check": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
"integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
"dependencies": {
"boolbase": "^1.0.0"
},
"funding": {
"url": "https://github.com/fb55/nth-check?sponsor=1"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
@@ -6966,18 +6979,6 @@
"url": "https://github.com/inikulin/parse5?sponsor=1"
}
},
"node_modules/parse5-htmlparser2-tree-adapter": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz",
"integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==",
"dependencies": {
"domhandler": "^5.0.2",
"parse5": "^7.0.0"
},
"funding": {
"url": "https://github.com/inikulin/parse5?sponsor=1"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
@@ -7026,6 +7027,11 @@
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-to-regexp": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz",
"integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw=="
},
"node_modules/path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
@@ -7949,6 +7955,39 @@
"node": ">=8"
}
},
"node_modules/snake-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz",
"integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==",
"dependencies": {
"dot-case": "^3.0.4",
"tslib": "^2.0.3"
}
},
"node_modules/snakecase-keys": {
"version": "5.4.4",
"resolved": "https://registry.npmjs.org/snakecase-keys/-/snakecase-keys-5.4.4.tgz",
"integrity": "sha512-YTywJG93yxwHLgrYLZjlC75moVEX04LZM4FHfihjHe1FCXm+QaLOFfSf535aXOAd0ArVQMWUAe8ZPm4VtWyXaA==",
"dependencies": {
"map-obj": "^4.1.0",
"snake-case": "^3.0.4",
"type-fest": "^2.5.2"
},
"engines": {
"node": ">=12"
}
},
"node_modules/snakecase-keys/node_modules/type-fest": {
"version": "2.19.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
"integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
"engines": {
"node": ">=12.20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
@@ -7974,6 +8013,11 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/std-env": {
"version": "3.7.0",
"resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz",
"integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg=="
},
"node_modules/stream": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/stream/-/stream-0.0.2.tgz",
@@ -8264,6 +8308,17 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/swr": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/swr/-/swr-2.2.0.tgz",
"integrity": "sha512-AjqHOv2lAhkuUdIiBu9xbuettzAzWXmCEcLONNKJRba87WAefz8Ca9d6ds/SzrPc235n1IxWYdhJ2zF3MNUaoQ==",
"dependencies": {
"use-sync-external-store": "^1.2.0"
},
"peerDependencies": {
"react": "^16.11.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/tapable": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
@@ -8353,9 +8408,9 @@
}
},
"node_modules/tiktoken": {
"version": "1.0.15",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.15.tgz",
"integrity": "sha512-sCsrq/vMWUSEW29CJLNmPvWxlVp7yh2tlkAjpJltIKqp5CKf98ZNpdeHRmAlPVFlGEbswDc6SmI8vz64W/qErw=="
"version": "1.0.14",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.14.tgz",
"integrity": "sha512-g5zd5r/DoH8Kw0fiYbYpVhb6WO8BHO1unXqmBBWKwoT17HwSounnDtMDFUKm2Pko8U47sjQarOe+9aUrnqmmTg=="
},
"node_modules/tiny-invariant": {
"version": "1.3.3",
@@ -8442,14 +8497,6 @@
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="
},
"node_modules/turndown": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/turndown/-/turndown-7.2.0.tgz",
"integrity": "sha512-eCZGBN4nNNqM9Owkv9HAtWRYfLA4h909E/WGAWWBpmB275ehNhZyk87/Tpvjbp0jjNl9XwCsbe6bm6CqFsgD+A==",
"dependencies": {
"@mixmark-io/domino": "^2.2.0"
}
},
"node_modules/type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+2 -4
View File
@@ -18,6 +18,7 @@
"schema": "src/server/prisma/schema.prisma"
},
"dependencies": {
"@clerk/nextjs": "^5.0.8",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.4",
"@emotion/server": "^11.11.0",
@@ -38,7 +39,6 @@
"@vercel/analytics": "^1.2.2",
"@vercel/speed-insights": "^1.0.10",
"browser-fs-access": "^0.35.0",
"cheerio": "^1.0.0-rc.12",
"eventsource-parser": "^1.1.2",
"idb-keyval": "^6.2.1",
"next": "~14.1.4",
@@ -61,8 +61,7 @@
"sharp": "^0.33.3",
"superjson": "^2.2.1",
"tesseract.js": "^5.1.0",
"tiktoken": "^1.0.15",
"turndown": "^7.2.0",
"tiktoken": "^1.0.14",
"uuid": "^9.0.1",
"zod": "^3.23.8",
"zustand": "^4.5.2"
@@ -79,7 +78,6 @@
"@types/react-dom": "^18.3.0",
"@types/react-katex": "^3.0.4",
"@types/react-timeago": "^4.1.7",
"@types/turndown": "^5.0.4",
"@types/uuid": "^9.0.8",
"eslint": "^8.57.0",
"eslint-config-next": "^14.2.3",
+15 -12
View File
@@ -13,6 +13,7 @@ import '~/common/styles/GithubMarkdown.css';
import '~/common/styles/NProgress.css';
import '~/common/styles/app.styles.css';
import { ProviderAuth } from '~/common/providers/ProviderAuth';
import { ProviderBackendCapabilities } from '~/common/providers/ProviderBackendCapabilities';
import { ProviderBootstrapLogic } from '~/common/providers/ProviderBootstrapLogic';
import { ProviderSingleTab } from '~/common/providers/ProviderSingleTab';
@@ -32,18 +33,20 @@ const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) =>
</Head>
<ProviderTheming emotionCache={emotionCache}>
<ProviderSingleTab>
<ProviderTRPCQuerySettings>
<ProviderBackendCapabilities>
{/* ^ SSR boundary */}
<ProviderBootstrapLogic>
<ProviderSnacks>
<Component {...pageProps} />
</ProviderSnacks>
</ProviderBootstrapLogic>
</ProviderBackendCapabilities>
</ProviderTRPCQuerySettings>
</ProviderSingleTab>
<ProviderAuth>
<ProviderSingleTab>
<ProviderTRPCQuerySettings>
<ProviderBackendCapabilities>
{/* ^ SSR boundary */}
<ProviderBootstrapLogic>
<ProviderSnacks>
<Component {...pageProps} />
</ProviderSnacks>
</ProviderBootstrapLogic>
</ProviderBackendCapabilities>
</ProviderTRPCQuerySettings>
</ProviderSingleTab>
</ProviderAuth>
</ProviderTheming>
{isVercelFromFrontend && <VercelAnalytics debug={false} />}
+3 -6
View File
@@ -77,12 +77,9 @@ function AppShareTarget() {
setIsDownloading(true);
callBrowseFetchPage(intentURL)
.then(page => {
if (page.stopReason !== 'error') {
let pageContent = page.content.markdown || page.content.text || page.content.html || '';
if (pageContent)
pageContent = '\n\n```' + intentURL + '\n' + pageContent + '\n```\n';
queueComposerTextAndLaunchApp(pageContent);
} else
if (page.stopReason !== 'error')
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + page.content + '\n```\n');
else
setErrorMessage('Could not read any data' + page.error ? ': ' + page.error : '');
})
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
+4 -4
View File
@@ -1,5 +1,5 @@
import * as React from 'react';
import { useShallow } from 'zustand/react/shallow';
import { shallow } from 'zustand/shallow';
import { Box, Card, ListDivider, ListItemDecorator, MenuItem, Switch, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
@@ -99,7 +99,7 @@ export function Telephone(props: {
// external state
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
const { chatTitle, reMessages } = useChatStore(useShallow(state => {
const { chatTitle, reMessages } = useChatStore(state => {
const conversation = props.callIntent.conversationId
? state.conversations.find(conversation => conversation.id === props.callIntent.conversationId) ?? null
: null;
@@ -107,7 +107,7 @@ export function Telephone(props: {
chatTitle: conversation ? conversationTitle(conversation) : null,
reMessages: conversation ? conversation.messages : null,
};
}));
}, shallow);
const persona = SystemPurposes[props.callIntent.personaId as SystemPurposeId] ?? undefined;
const personaCallStarters = persona?.call?.starters ?? undefined;
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
@@ -225,7 +225,7 @@ export function Telephone(props: {
let finalText = '';
let error: any | null = null;
setPersonaTextInterim('💭...');
llmStreamingChatGenerate(chatLLMId, callPrompt, 'call', callMessages[0].id, null, null, responseAbortController.current.signal, ({ textSoFar }) => {
llmStreamingChatGenerate(chatLLMId, callPrompt, null, null, responseAbortController.current.signal, ({ textSoFar }) => {
const text = textSoFar?.trim();
if (text) {
finalText = text;
+1 -1
View File
@@ -310,7 +310,7 @@ function ChatDrawer(props: {
bottomBarBasis={filteredChatsBarBasis}
onConversationActivate={handleConversationActivate}
onConversationBranch={onConversationBranch}
onConversationDeleteNoConfirmation={handleConversationDeleteNoConfirmation}
onConversationDelete={handleConversationDeleteNoConfirmation}
onConversationExport={onConversationsExportDialog}
onConversationFolderChange={handleConversationFolderChange}
/>
+5 -14
View File
@@ -42,7 +42,7 @@ export const ChatDrawerItemMemo = React.memo(ChatDrawerItem, (prev, next) =>
prev.bottomBarBasis === next.bottomBarBasis &&
prev.onConversationActivate === next.onConversationActivate &&
prev.onConversationBranch === next.onConversationBranch &&
prev.onConversationDeleteNoConfirmation === next.onConversationDeleteNoConfirmation &&
prev.onConversationDelete === next.onConversationDelete &&
prev.onConversationExport === next.onConversationExport &&
prev.onConversationFolderChange === next.onConversationFolderChange,
);
@@ -76,7 +76,7 @@ function ChatDrawerItem(props: {
bottomBarBasis: number,
onConversationActivate: (conversationId: DConversationId, closeMenu: boolean) => void,
onConversationBranch: (conversationId: DConversationId, messageId: string | null) => void,
onConversationDeleteNoConfirmation: (conversationId: DConversationId) => void,
onConversationDelete: (conversationId: DConversationId) => void,
onConversationExport: (conversationId: DConversationId, exportAll: boolean) => void,
onConversationFolderChange: (folderChangeRequest: FolderChangeRequest) => void,
}) {
@@ -155,16 +155,7 @@ function ChatDrawerItem(props: {
// Delete
const { onConversationDeleteNoConfirmation } = props;
const handleDeleteButtonShow = React.useCallback((event: React.MouseEvent) => {
// special case: if 'Shift' is pressed, delete immediately
if (event.shiftKey) {
event.stopPropagation();
onConversationDeleteNoConfirmation(conversationId);
return;
}
setDeleteArmed(true);
}, [conversationId, onConversationDeleteNoConfirmation]);
const handleDeleteButtonShow = React.useCallback(() => setDeleteArmed(true), []);
const handleDeleteButtonHide = React.useCallback(() => setDeleteArmed(false), []);
@@ -172,9 +163,9 @@ function ChatDrawerItem(props: {
if (deleteArmed) {
setDeleteArmed(false);
event.stopPropagation();
onConversationDeleteNoConfirmation(conversationId);
props.onConversationDelete(conversationId);
}
}, [conversationId, deleteArmed, onConversationDeleteNoConfirmation]);
}, [conversationId, deleteArmed, props]);
const textSymbol = SystemPurposes[systemPurposeId]?.symbol || '❓';
@@ -58,12 +58,16 @@ export async function attachmentLoadInputAsync(source: Readonly<AttachmentSource
edit({ label: source.refUrl, ref: source.refUrl });
try {
const page = await callBrowseFetchPage(source.url);
edit(
page.content.markdown ? { input: { mimeType: 'text/markdown', data: page.content.markdown, dataSize: page.content.markdown.length } }
: page.content.text ? { input: { mimeType: 'text/plain', data: page.content.text, dataSize: page.content.text.length } }
: page.content.html ? { input: { mimeType: 'text/html', data: page.content.html, dataSize: page.content.html.length } }
: { inputError: 'No content found at this link' },
);
if (page.content) {
edit({
input: {
mimeType: 'text/plain',
data: page.content,
dataSize: page.content.length,
},
});
} else
edit({ inputError: 'No content found at this link' });
} catch (error: any) {
edit({ inputError: `Issue downloading page: ${error?.message || (typeof error === 'string' ? error : JSON.stringify(error))}` });
}
+1 -2
View File
@@ -15,8 +15,7 @@ export const runBrowseGetPageUpdatingState = async (cHandler: ConversationHandle
try {
const page = await callBrowseFetchPage(url);
const pageContent = page.content.markdown || page.content.text || page.content.html || 'Issue: page load did not produce an answer: no text found';
cHandler.messageEdit(assistantMessageId, { text: pageContent, typing: false }, true);
cHandler.messageEdit(assistantMessageId, { text: page.content || 'Issue: page load did not produce an answer: no text found', typing: false }, true);
return true;
} catch (error: any) {
console.error(error);
+2 -6
View File
@@ -2,7 +2,7 @@ import type { DLLMId } from '~/modules/llms/store-llms';
import type { StreamingClientUpdate } from '~/modules/llms/vendors/unifiedStreamingClient';
import { autoSuggestions } from '~/modules/aifn/autosuggestions/autoSuggestions';
import { conversationAutoTitle } from '~/modules/aifn/autotitle/autoTitle';
import { llmStreamingChatGenerate, VChatContextRef, VChatContextName, VChatMessageIn } from '~/modules/llms/llm.client';
import { llmStreamingChatGenerate, VChatMessageIn } from '~/modules/llms/llm.client';
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
import type { DMessage } from '~/common/state/store-chats';
@@ -34,8 +34,6 @@ export async function runAssistantUpdatingState(conversationId: string, history:
const messageStatus = await streamAssistantMessage(
assistantLlmId,
history.map((m): VChatMessageIn => ({ role: m.role, content: m.text })),
'conversation',
conversationId,
parallelViewCount,
autoSpeak,
(update) => cHandler.messageEdit(assistantMessageId, update, false),
@@ -63,8 +61,6 @@ type StreamMessageStatus = { outcome: StreamMessageOutcome, errorMessage?: strin
export async function streamAssistantMessage(
llmId: DLLMId,
messagesHistory: VChatMessageIn[],
contextName: VChatContextName,
contextRef: VChatContextRef,
throttleUnits: number, // 0: disable, 1: default throttle (12Hz), 2+ reduce the message frequency with the square root
autoSpeak: ChatAutoSpeakType,
editMessage: (update: Partial<DMessage>) => void,
@@ -96,7 +92,7 @@ export async function streamAssistantMessage(
const incrementalAnswer: Partial<DMessage> = { text: '' };
try {
await llmStreamingChatGenerate(llmId, messagesHistory, contextName, contextRef, null, null, abortSignal, (update: StreamingClientUpdate) => {
await llmStreamingChatGenerate(llmId, messagesHistory, null, null, abortSignal, (update: StreamingClientUpdate) => {
const textSoFar = update.textSoFar;
// grow the incremental message
+4 -7
View File
@@ -52,19 +52,17 @@ interface NewsItem {
// news and feature surfaces
export const NewsItems: NewsItem[] = [
/*{
versionCode: '1.17.0',
versionCode: '1.16.0',
items: [
Screen Capture (when removed from labs)
Auto-Merge
Draw
...
Screen Capture (when removed from labs)
]
}*/
{
versionCode: '1.16.2',
versionCode: '1.16.1',
versionName: 'Crystal Clear',
versionDate: new Date('2024-06-07T05:00:00Z'),
// versionDate: new Date('2024-05-13T19:00:00Z'),
versionDate: new Date('2024-05-13T19:00:00Z'),
// versionDate: new Date('2024-05-09T00:00:00Z'),
versionCoverImage: coverV116,
items: [
@@ -78,7 +76,6 @@ export const NewsItems: NewsItem[] = [
{ text: <>Updated <B>Anthropic</B>*, <B>Groq</B>, <B>Ollama</B>, <B>OpenAI</B>*, <B>OpenRouter</B>*, and <B>Perplexity</B></> },
{ text: <>Developers: update LLMs data structures</>, dev: true },
{ text: <>1.16.1: Support for <B>OpenAI</B> <B href='https://openai.com/index/hello-gpt-4o/'>GPT-4o</B> (refresh your OpenAI models)</> },
{ text: <>1.16.2: Proper <B>Gemini</B> support, <B>HTML/Markdown</B> downloads, and latest <B>Mistral</B></> },
],
},
{
+1 -1
View File
@@ -7,7 +7,7 @@ import { useAppStateStore } from '~/common/state/store-appstate';
// update this variable every time you want to broadcast a new version to clients
export const incrementalNewsVersion: number = 16.1; // not notifying for 16.2
export const incrementalNewsVersion: number = 16.1;
interface NewsState {
+3 -7
View File
@@ -1,5 +1,4 @@
import * as React from 'react';
import { v4 as uuidv4 } from 'uuid';
import { Alert, Box, Button, Card, CardContent, CircularProgress, Divider, FormLabel, Grid, IconButton, LinearProgress, Tab, tabClasses, TabList, TabPanel, Tabs, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
@@ -103,11 +102,8 @@ export function Creator(props: { display: boolean }) {
strings: editedInstructions, stringEditors: instructionEditors,
} = useFormEditTextArray(Prompts, PromptTitles);
const { steps: creationChainSteps, id: chainId } = React.useMemo(() => {
return {
steps: createChain(editedInstructions, PromptTitles),
id: uuidv4(),
};
const creationChainSteps = React.useMemo(() => {
return createChain(editedInstructions, PromptTitles);
}, [editedInstructions]);
const llmLabel = personaLlm?.label || undefined;
@@ -126,7 +122,7 @@ export function Creator(props: { display: boolean }) {
chainError,
userCancelChain,
restartChain,
} = useLLMChain(creationChainSteps, personaLlm?.id, chainInputText ?? undefined, savePersona, 'persona-extract', chainId);
} = useLLMChain(creationChainSteps, personaLlm?.id, chainInputText ?? undefined, savePersona);
// Reset the relevant state when the selected tab changes
+1 -1
View File
@@ -200,7 +200,7 @@ export function SettingsModal(props: {
<TabPanel value={PreferencesTab.Tools} variant='outlined' sx={{ p: 'var(--Tabs-gap)', borderRadius: 'md' }}>
<Topics>
<Topic icon={<SearchIcon />} title='Browsing'>
<Topic icon={<SearchIcon />} title='Browsing' startCollapsed>
<BrowseSettings />
</Topic>
<Topic icon={<SearchIcon />} title='Google Search API' startCollapsed>
-15
View File
@@ -1,15 +0,0 @@
import * as React from 'react';
import { Typography } from '@mui/joy';
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
export function AlreadySet(props: { required?: boolean }) {
return (
<Typography level='body-sm' startDecorator={props.required ? undefined : <CheckRoundedIcon color='success' />}>
{/*Installed Already*/}
{props.required ? 'required' : 'Already set on server'}
</Typography>
);
}
+2
View File
@@ -9,6 +9,7 @@ import MoreHorizIcon from '@mui/icons-material/MoreHoriz';
import { useModelsStore } from '~/modules/llms/store-llms';
import { AgiSquircleIcon } from '~/common/components/icons/AgiSquircleIcon';
import { authUserButton } from '~/common/providers/ProviderAuth';
import { checkDivider, checkVisibileIcon, NavItemApp, navItems } from '~/common/app.nav';
import { themeZIndexDesktopNav } from '~/common/app.theme';
@@ -184,6 +185,7 @@ export function DesktopNav(props: { component: React.ElementType, currentApp?: N
<DesktopNavGroupBox sx={{ mb: 'calc(2 * var(--GroupMarginY))' }}>
{navExtLinkItems}
{navModalItems}
{authUserButton}
</DesktopNavGroupBox>
</InvertedBar>
+36
View File
@@ -0,0 +1,36 @@
import * as React from 'react';
import dynamic from 'next/dynamic';
import { RedirectToSignIn, SignedIn, SignedOut, UserButton } from '@clerk/nextjs';
const enableClerk = !!process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY;
// Dynamically import ClerkProvider only if ENABLE_CLERK is true
const ClerkProviderDynamic = !enableClerk ? null : dynamic(() =>
import('@clerk/nextjs')
.then(({ ClerkProvider }) => ClerkProvider)
.catch(err => {
console.error('Failed to load ClerkProvider:', err);
const AuthLoadIssue = () => <>Issue Loading Auth</>;
AuthLoadIssue.displayName = 'AuthLoadIssue';
return AuthLoadIssue;
}),
);
export const ProviderAuth = (props: { children: React.ReactNode }) => (enableClerk && ClerkProviderDynamic)
? (
<ClerkProviderDynamic>
<SignedIn>
{props.children}
</SignedIn>
<SignedOut>
<RedirectToSignIn />
</SignedOut>
</ClerkProviderDynamic>
) : props.children;
export const authUserButton = (enableClerk && ClerkProviderDynamic)
? <>
<UserButton />
</>
: null;
+31 -2
View File
@@ -5,7 +5,7 @@ import { v4 as uuidv4 } from 'uuid';
import { DLLMId, getChatLLMId } from '~/modules/llms/store-llms';
import { idbStateStorage } from '../util/idbUtils';
import { IDB_MIGRATION_INITIAL, idbStateStorage } from '../util/idbUtils';
import { countModelTokens } from '../util/token-counter';
import { defaultSystemPurposeId, SystemPurposeId } from '../../data';
@@ -407,7 +407,10 @@ export const useChatStore = create<ConversationsStore>()(devtools(
storage: createJSONStorage(() => idbStateStorage),
// Migrations
migrate: (persistedState: unknown, _fromVersion: number): ConversationsStore => {
migrate: (persistedState: unknown, fromVersion: number): ConversationsStore => {
// -1 -> 3: migration loading from localStorage to IndexedDB
if (fromVersion === IDB_MIGRATION_INITIAL)
return _migrateLocalStorageData() as any;
// other: just proceed
return persistedState as any;
@@ -462,6 +465,32 @@ function getNextBranchTitle(currentTitle: string): string {
return `(1) ${currentTitle}`;
}
/**
* Returns the chats stored in the localStorage, and rename the key for
* backup/data loss prevention purposes
*/
function _migrateLocalStorageData(): ChatState | {} {
const key = 'app-chats';
const value = localStorage.getItem(key);
if (!value) return {};
try {
// parse the localStorage state
const localStorageState = JSON.parse(value)?.state;
// backup and delete the localStorage key
const backupKey = `${key}-v2`;
localStorage.setItem(backupKey, value);
localStorage.removeItem(key);
// match the state from localstorage
return {
conversations: localStorageState?.conversations ?? [],
};
} catch (error) {
console.error('LocalStorage migration error', error);
return {};
}
}
/**
* Convenience function to count the tokens in a DMessage object
+15
View File
@@ -1,6 +1,10 @@
import type { StateStorage } from 'zustand/middleware';
import { del as idbDel, get as idbGet, set as idbSet } from 'idb-keyval';
// used by the state storage middleware to detect data migration from the old state storage (localStorage)
// NOTE: remove past 2024-03-19 (6 months past release of this utility conversion)
export const IDB_MIGRATION_INITIAL = -1;
// set to true to enable debugging
const DEBUG_SCHEDULER = false;
@@ -126,6 +130,17 @@ export const idbStateStorage: StateStorage = {
if (DEBUG_SCHEDULER)
console.warn(' (read bytes:', value?.length?.toLocaleString(), ')');
/* IMPORTANT!
* We modify the default behavior of `getItem` to return a {version: -1} object if a key is not found.
* This is to trigger the migration across state storage implementations, as Zustand would not call the
* 'migrate' function otherwise.
* See 'https://github.com/enricoros/big-agi/pull/158' for more details
*/
if (value === undefined) {
return JSON.stringify({
version: IDB_MIGRATION_INITIAL,
});
}
return value || null;
},
setItem: (name: string, value: string): void => {
+13 -26
View File
@@ -6,20 +6,14 @@ import { DLLMId, findLLMOrThrow } from '~/modules/llms/store-llms';
// Do not set this to true in production, it's very verbose
const DEBUG_TOKEN_COUNT = false;
// Globals
// const tokenEncodings: string[] = ['gpt2', 'r50k_base', 'p50k_base', 'p50k_edit', 'cl100k_base', 'o200k_base'] satisfies TiktokenEncoding[];
// Global symbols to dynamically load the Tiktoken library
// global symbols to dynamically load the Tiktoken library
let get_encoding: ((encoding: TiktokenEncoding) => Tiktoken) | null = null;
let encoding_for_model: ((model: TiktokenModel) => Tiktoken) | null = null;
let preloadPromise: Promise<void> | null = null;
let informTheUser = false;
/**
* Preloads the Tiktoken library if not already loaded.
* @returns {Promise<void>} A promise that resolves when the library is loaded.
*/
export function preloadTiktokenLibrary(): Promise<void> {
export function preloadTiktokenLibrary() {
if (!preloadPromise) {
preloadPromise = import('tiktoken')
.then(tiktoken => {
@@ -39,21 +33,16 @@ export function preloadTiktokenLibrary(): Promise<void> {
/**
* Wrapper around the Tiktoken library to keep tokenizers for all models in a cache.
* Also, preloads the tokenizer for the default model to avoid initial stall.
* Wrapper around the Tiktoken library, to keep tokenizers for all models in a cache
*
* We also preload the tokenizer for the default model, so that the first time a user types
* a message, it doesn't stall loading the tokenizer.
*/
export const countModelTokens: (text: string, llmId: DLLMId, debugFrom: string) => number | null = (() => {
// return () => 0;
const tokenEncoders: { [modelId: string]: Tiktoken } = {};
let encodingDefault: Tiktoken | null = null;
let encodingCL100K: Tiktoken | null = null;
/**
* Counts the tokens in the given text for the specified model.
* @param {string} text - The text to tokenize.
* @param {DLLMId} llmId - The ID of the LLM.
* @param {string} debugFrom - Debug information.
* @returns {number | null} The token count or null if not ready.
*/
function _tokenCount(text: string, llmId: DLLMId, debugFrom: string): number | null {
// The library shall have been preloaded - if not, attempt to start its loading and return null to indicate we're not ready to count
@@ -66,23 +55,21 @@ export const countModelTokens: (text: string, llmId: DLLMId, debugFrom: string)
return null;
}
const openaiModel = findLLMOrThrow(llmId)?.options?.llmRef;
const { options: { llmRef: openaiModel } } = findLLMOrThrow(llmId);
if (!openaiModel) throw new Error(`LLM ${llmId} has no LLM reference id`);
if (!(openaiModel in tokenEncoders)) {
try {
tokenEncoders[openaiModel] = encoding_for_model(openaiModel as TiktokenModel);
} catch (e) {
// fallback to the default encoding across all models (not just OpenAI - this will be used everywhere..)
if (!encodingDefault)
encodingDefault = get_encoding('cl100k_base');
tokenEncoders[openaiModel] = encodingDefault;
// make sure we recycle the default encoding across all models
if (!encodingCL100K)
encodingCL100K = get_encoding('cl100k_base');
tokenEncoders[openaiModel] = encodingCL100K;
}
}
let count: number = 0;
// Note: the try/catch shouldn't be necessary, but there could be corner cases where the tiktoken library throws
// https://github.com/enricoros/big-agi/issues/182
let count = 0;
try {
count = tokenEncoders[openaiModel]?.encode(text, 'all', [])?.length || 0;
} catch (e) {
@@ -83,7 +83,7 @@ export function autoSuggestions(conversationId: string, assistantMessageId: stri
// Follow-up: Auto-Diagrams
if (suggestDiagrams) {
llmChatGenerateOrThrow(funcLLMId, [
void llmChatGenerateOrThrow(funcLLMId, [
{ role: 'system', content: systemMessage.text },
{ role: 'user', content: userMessage.text },
{ role: 'assistant', content: assistantMessageText },
@@ -110,8 +110,7 @@ export function autoSuggestions(conversationId: string, assistantMessageId: stri
}
}
}).catch(err => {
// Likely the model did not support function calling
// console.log('autoSuggestions: diagram error:', err);
console.error('autoSuggestions::diagram:', err);
});
}
+3 -3
View File
@@ -68,7 +68,7 @@ export function DiagramsModal(props: { config: DiagramConfig, onClose: () => voi
const [diagramLlm, llmComponent] = useFormRadioLlmType('Generator', 'chat');
// derived state
const { conversationId, messageId, text: subject } = props.config;
const { conversationId, text: subject } = props.config;
const diagramLlmId = diagramLlm?.id;
@@ -98,7 +98,7 @@ export function DiagramsModal(props: { config: DiagramConfig, onClose: () => voi
const diagramPrompt = bigDiagramPrompt(diagramType, diagramLanguage, systemMessage.text, subject, customInstruction);
try {
await llmStreamingChatGenerate(diagramLlm.id, diagramPrompt, 'ai-diagram', messageId, null, null, stepAbortController.signal,
await llmStreamingChatGenerate(diagramLlm.id, diagramPrompt, null, null, stepAbortController.signal,
({ textSoFar }) => textSoFar && setDiagramCode(diagramCode = textSoFar),
);
} catch (error: any) {
@@ -109,7 +109,7 @@ export function DiagramsModal(props: { config: DiagramConfig, onClose: () => voi
setAbortController(null);
}
}, [abortController, conversationId, customInstruction, diagramLanguage, diagramLlm, diagramType, messageId, subject]);
}, [abortController, conversationId, diagramLanguage, diagramLlm, diagramType, subject, customInstruction]);
// [Effect] Auto-abort on unmount
+1 -1
View File
@@ -117,7 +117,7 @@ export function FlattenerModal(props: {
await startStreaming(llm.id, [
{ role: 'system', content: flattenProfile.systemPrompt },
{ role: 'user', content: encodeConversationAsUserMessage(flattenProfile.userPrompt, messages) },
], 'ai-flattener', messages[0].id);
]);
}, [llm, props.conversationId, startStreaming]);
+1 -2
View File
@@ -194,8 +194,7 @@ async function search(query: string): Promise<string> {
async function browse(url: string): Promise<string> {
try {
const page = await callBrowseFetchPage(url);
const pageContent = page.content.markdown || page.content.text || page.content.html || '';
return JSON.stringify(pageContent ? { text: pageContent } : { error: 'Issue reading the page' });
return JSON.stringify(page.content ? { text: page.content } : { error: 'Issue reading the page' });
} catch (error) {
console.error('Error browsing:', (error as Error).message);
return 'An error occurred while browsing to the URL. Missing WSS Key?';
+4 -4
View File
@@ -1,7 +1,7 @@
import * as React from 'react';
import { DLLMId, findLLMOrThrow } from '~/modules/llms/store-llms';
import { llmStreamingChatGenerate, VChatContextName, VChatContextRef, VChatMessageIn } from '~/modules/llms/llm.client';
import { llmStreamingChatGenerate, VChatMessageIn } from '~/modules/llms/llm.client';
// set to true to log to the console
@@ -20,7 +20,7 @@ export interface LLMChainStep {
/**
* React hook to manage a chain of LLM transformations.
*/
export function useLLMChain(steps: LLMChainStep[], llmId: DLLMId | undefined, chainInput: string | undefined, onSuccess: (output: string, input: string) => void, contextName: VChatContextName, contextRef: VChatContextRef) {
export function useLLMChain(steps: LLMChainStep[], llmId: DLLMId | undefined, chainInput: string | undefined, onSuccess?: (output: string, input: string) => void) {
// state
const [chain, setChain] = React.useState<ChainState | null>(null);
@@ -114,7 +114,7 @@ export function useLLMChain(steps: LLMChainStep[], llmId: DLLMId | undefined, ch
setChainStepInterimText(null);
// LLM call (streaming, cancelable)
llmStreamingChatGenerate(llmId, llmChatInput, contextName, contextRef, null, null, stepAbortController.signal,
llmStreamingChatGenerate(llmId, llmChatInput, null, null, stepAbortController.signal,
({ textSoFar }) => {
textSoFar && setChainStepInterimText(interimText = textSoFar);
})
@@ -141,7 +141,7 @@ export function useLLMChain(steps: LLMChainStep[], llmId: DLLMId | undefined, ch
stepAbortController.abort('step aborted');
_chainAbortController.signal.removeEventListener('abort', globalToStepListener);
};
}, [chain, contextRef, contextName, llmId, onSuccess]);
}, [chain, llmId, onSuccess]);
return {
+3 -3
View File
@@ -1,7 +1,7 @@
import * as React from 'react';
import type { DLLMId } from '~/modules/llms/store-llms';
import { llmStreamingChatGenerate, VChatContextName, VChatContextRef, VChatMessageIn } from '~/modules/llms/llm.client';
import { llmStreamingChatGenerate, VChatMessageIn } from '~/modules/llms/llm.client';
export function useStreamChatText() {
@@ -13,7 +13,7 @@ export function useStreamChatText() {
const abortControllerRef = React.useRef<AbortController | null>(null);
const startStreaming = React.useCallback(async (llmId: DLLMId, prompt: VChatMessageIn[], contextName: VChatContextName, contextRef: VChatContextRef) => {
const startStreaming = React.useCallback(async (llmId: DLLMId, prompt: VChatMessageIn[]) => {
setStreamError(null);
setPartialText(null);
setText(null);
@@ -24,7 +24,7 @@ export function useStreamChatText() {
try {
let lastText = '';
await llmStreamingChatGenerate(llmId, prompt, contextName, contextRef, null, null, abortControllerRef.current.signal, ({ textSoFar }) => {
await llmStreamingChatGenerate(llmId, prompt, null, null, abortControllerRef.current.signal, ({ textSoFar }) => {
if (textSoFar) {
lastText = textSoFar;
setPartialText(lastText);
+8 -7
View File
@@ -30,30 +30,32 @@ export function BeamView(props: {
// external state
const { novel: explainerUnseen, touch: explainerCompleted, forget: explainerShow } = useUICounter('beam-wizard');
const gatherAutoStartAfterScatter = useModuleBeamStore(state => state.gatherAutoStartAfterScatter);
const {
/* root */ editInputHistoryMessage,
/* scatter */ setRayCount, startScatteringAll, stopScatteringAll,
} = props.beamStore.getState();
const {
/* root */ inputHistory, inputIssues, inputReady,
/* scatter */ hadImportedRays, isScattering, raysReady,
/* scatter */ isScattering, raysReady,
/* gather (composite) */ canGather,
/* IDs */ rayIds, fusionIds,
} = useBeamStore(props.beamStore, useShallow(state => ({
// input
inputHistory: state.inputHistory,
inputIssues: state.inputIssues,
inputReady: state.inputReady,
// scatter
hadImportedRays: state.hadImportedRays,
isScattering: state.isScattering,
raysReady: state.raysReady,
// gather (composite)
canGather: state.raysReady >= 2 && state.currentFactoryId !== null && state.currentGatherLlmId !== null,
// IDs
rayIds: state.rays.map(ray => ray.rayId),
fusionIds: state.fusions.map(fusion => fusion.fusionId),
})));
const { gatherAutoStartAfterScatter } = useModuleBeamStore(useShallow(state => ({
gatherAutoStartAfterScatter: state.gatherAutoStartAfterScatter,
})));
// the following are independent because of useShallow, which would break in the above call
const rayIds = useBeamStore(props.beamStore, useShallow(state => state.rays.map(ray => ray.rayId)));
const fusionIds = useBeamStore(props.beamStore, useShallow(state => state.fusions.map(fusion => fusion.fusionId)));
// derived state
const raysCount = rayIds.length;
@@ -171,7 +173,6 @@ export function BeamView(props: {
beamStore={props.beamStore}
isMobile={props.isMobile}
rayIds={rayIds}
hadImportedRays={hadImportedRays}
onIncreaseRayCount={handleRayIncreaseCount}
// linkedLlmId={currentGatherLlmId}
/>
+1 -3
View File
@@ -13,7 +13,6 @@ import { BeamStoreApi, useBeamStore } from '../store-beam.hooks';
import { FFactoryId, FUSION_FACTORIES } from './instructions/beam.gather.factories';
import { GATHER_COLOR } from '../beam.config';
import { beamPaneSx } from '../BeamCard';
import { useModuleBeamStore } from '../store-module-beam';
const gatherPaneClasses = {
@@ -80,9 +79,8 @@ export function BeamGatherPane(props: {
setCurrentFactoryId: state.setCurrentFactoryId,
setCurrentGatherLlmId: state.setCurrentGatherLlmId,
})));
const gatherAutoStartAfterScatter = useModuleBeamStore(state => state.gatherAutoStartAfterScatter);
const [_, gatherLlmComponent/*, gatherLlmIcon*/] = useLLMSelect(
currentGatherLlmId, setCurrentGatherLlmId, props.isMobile ? '' : 'Merge Model', true, !props.canGather && !gatherAutoStartAfterScatter,
currentGatherLlmId, setCurrentGatherLlmId, props.isMobile ? '' : 'Merge Model', true, !props.canGather,
);
// derived state
+4 -5
View File
@@ -170,24 +170,23 @@ export function Fusion(props: {
<GoodTooltip title='Use this message'>
<IconButton
size='sm'
// variant='plain'
// variant='solid'
color={GATHER_COLOR}
disabled={isFusing}
onClick={handleFusionUse}
// endDecorator={<TelegramIcon />}
sx={{
// ...BEAM_BTN_SX,
fontSize: 'xs',
// '--Icon-fontSize': 'var(--joy-fontSize-xl)',
// fontSize: 'xs',
// backgroundColor: 'background.popup',
// border: '1px solid',
// borderColor: `${GATHER_COLOR}.outlinedBorder`,
// boxShadow: `0 4px 16px -4px rgb(var(--joy-palette-${GATHER_COLOR}-mainChannel) / 20%)`,
animation: `${animationEnterBelow} 0.1s ease-out`,
whiteSpace: 'nowrap',
// whiteSpace: 'nowrap',
}}
>
{/*Use*/}
{/*Ok*/}
<TelegramIcon />
</IconButton>
</GoodTooltip>
@@ -96,7 +96,7 @@ export async function executeChatGenerate(_i: ChatGenerateInstruction, inputs: E
};
// LLM Streaming generation
return streamAssistantMessage(inputs.llmId, history, 'beam-gather', inputs.contextRef, getUXLabsHighPerformance() ? 0 : 1, 'off', onMessageUpdate, inputs.chainAbortController.signal)
return streamAssistantMessage(inputs.llmId, history, getUXLabsHighPerformance() ? 0 : 1, 'off', onMessageUpdate, inputs.chainAbortController.signal)
.then((status) => {
// re-throw errors, as streamAssistantMessage catches internally
if (status.outcome === 'aborted') {
@@ -23,7 +23,6 @@ export interface ExecutionInputState {
readonly chatMessages: DMessage[];
readonly rayMessages: DMessage[];
readonly llmId: DLLMId;
readonly contextRef: string; // not useful
// interaction
readonly chainAbortController: AbortController;
readonly updateProgressComponent: (component: React.ReactNode) => void;
@@ -68,7 +67,6 @@ export function gatherStartFusion(
chatMessages: chatMessages,
rayMessages: rayMessages,
llmId: initialFusion.llmId,
contextRef: initialFusion.fusionId,
// interaction
chainAbortController: new AbortController(),
updateProgressComponent: (component: React.ReactNode) => onUpdateBFusion({ fusingProgressComponent: component }),
+2 -8
View File
@@ -16,7 +16,6 @@ import type { DLLMId } from '~/modules/llms/store-llms';
import { GoodTooltip } from '~/common/components/GoodTooltip';
import { InlineError } from '~/common/components/InlineError';
import { animationEnterBelow } from '~/common/util/animUtils';
import { copyToClipboard } from '~/common/util/clipboardUtils';
import { useLLMSelect } from '~/common/components/forms/useLLMSelect';
@@ -110,8 +109,7 @@ function RayControls(props: {
export function BeamRay(props: {
beamStore: BeamStoreApi,
hadImportedRays: boolean
isRemovable: boolean,
isRemovable: boolean
rayId: string,
rayIndexWeak: number,
// linkedLlmId: DLLMId | null,
@@ -242,20 +240,16 @@ export function BeamRay(props: {
<GoodTooltip title='Choose this message'>
<IconButton
size='sm'
// variant='plain'
color={GATHER_COLOR}
disabled={isImported || isScattering}
onClick={handleRayUse}
// endDecorator={!isImported ? <TelegramIcon /> : null}
sx={{
fontSize: 'xs',
// '--Icon-fontSize': 'var(--joy-fontSize-xl)',
px: isImported ? 1 : undefined,
animation: `${animationEnterBelow} 0.1s ease-out`,
whiteSpace: 'nowrap',
}}
>
{isImported ? 'From Chat' : /*props.hadImportedRays ? 'Replace' : 'Use'*/ <TelegramIcon />}
{isImported ? 'From Chat' : /*'Use'*/ <TelegramIcon />}
</IconButton>
</GoodTooltip>
-2
View File
@@ -27,7 +27,6 @@ const rayGridMobileSx: SxProps = {
export function BeamRayGrid(props: {
beamStore: BeamStoreApi,
hadImportedRays: boolean
isMobile: boolean,
onIncreaseRayCount: () => void,
rayIds: string[],
@@ -45,7 +44,6 @@ export function BeamRayGrid(props: {
key={'ray-' + rayId}
rayIndexWeak={index}
beamStore={props.beamStore}
hadImportedRays={props.hadImportedRays}
isRemovable={raysCount > SCATTER_RAY_MIN}
rayId={rayId}
// linkedLlmId={props.linkedLlmId}
+1 -4
View File
@@ -67,7 +67,7 @@ function rayScatterStart(ray: BRay, llmId: DLLMId | null, inputHistory: DMessage
// stream the assistant's messages
const messagesHistory: VChatMessageIn[] = inputHistory.map(({ role, text }) => ({ role, content: text }));
streamAssistantMessage(llmId, messagesHistory, 'beam-scatter', ray.rayId, getUXLabsHighPerformance() ? 0 : rays.length, 'off', updateMessage, abortController.signal)
streamAssistantMessage(llmId, messagesHistory, getUXLabsHighPerformance() ? 0 : rays.length, 'off', updateMessage, abortController.signal)
.then((status) => {
_rayUpdate(ray.rayId, {
status: (status.outcome === 'success') ? 'success'
@@ -134,7 +134,6 @@ export function rayIsImported(ray: BRay | null): boolean {
interface ScatterStateSlice {
rays: BRay[];
hadImportedRays: boolean;
// derived state
isScattering: boolean; // true if any ray is scattering at the moment
@@ -149,7 +148,6 @@ export const reInitScatterStateSlice = (prevRays: BRay[]): ScatterStateSlice =>
return {
// (remember) keep the same quantity of rays and same llms
rays: prevRays.map(prevRay => createBRay(prevRay.rayLlmId)),
hadImportedRays: false,
isScattering: false,
raysReady: 0,
@@ -240,7 +238,6 @@ export const createScatterSlice: StateCreator<RootStoreSlice & ScatterStoreSlice
// append the other rays (excluding the ones to remove)
...rays.filter((ray) => !raysToRemove.includes(ray)),
],
hadImportedRays: messages.length > 0,
});
_storeLastScatterConfig();
_syncRaysStateToScatter();
+1 -2
View File
@@ -70,7 +70,7 @@ const createRootSlice: StateCreator<BeamStore, [], [], RootStoreSlice> = (_set,
open: (chatHistory: Readonly<DMessage[]>, initialChatLlmId: DLLMId | null, callback: BeamSuccessCallback) => {
const { isOpen: wasAlreadyOpen, terminateKeepingSettings, loadBeamConfig, hadImportedRays, setRayLlmIds, setCurrentGatherLlmId } = _get();
const { isOpen: wasAlreadyOpen, terminateKeepingSettings, loadBeamConfig, setRayLlmIds, setCurrentGatherLlmId } = _get();
// reset pending operations
terminateKeepingSettings();
@@ -89,7 +89,6 @@ const createRootSlice: StateCreator<BeamStore, [], [], RootStoreSlice> = (_set,
onSuccessCallback: callback,
// rays already reset
hadImportedRays,
// update the model only if the dialog was not already open
...(!wasAlreadyOpen && initialChatLlmId && {
+21 -55
View File
@@ -1,12 +1,10 @@
import * as React from 'react';
import { useShallow } from 'zustand/react/shallow';
import { shallow } from 'zustand/shallow';
import { Checkbox, FormControl, FormHelperText, Option, Select, Typography } from '@mui/joy';
import { Checkbox, FormControl, FormHelperText } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { ExternalLink } from '~/common/components/ExternalLink';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
import { Link } from '~/common/components/Link';
import { platformAwareKeystrokes } from '~/common/components/KeyStroke';
import { useBrowseCapability, useBrowseStore } from './store-module-browsing';
@@ -15,82 +13,50 @@ import { useBrowseCapability, useBrowseStore } from './store-module-browsing';
export function BrowseSettings() {
// external state
const { mayWork, isServerConfig, isClientValid, inCommand, inComposer, inReact, inPersonas } = useBrowseCapability();
const {
wssEndpoint, setWssEndpoint,
pageTransform, setPageTransform,
setEnableCommandBrowse, setEnableComposerAttach, setEnableReactTool, setEnablePersonaTool,
} = useBrowseStore(useShallow(state => ({
const { mayWork, isServerConfig, isClientValid, inCommand, inComposer, inReact } = useBrowseCapability();
const { wssEndpoint, setWssEndpoint, setEnableCommandBrowse, setEnableComposerAttach, setEnableReactTool } = useBrowseStore(state => ({
wssEndpoint: state.wssEndpoint,
pageTransform: state.pageTransform,
setPageTransform: state.setPageTransform,
setWssEndpoint: state.setWssEndpoint,
setEnableCommandBrowse: state.setEnableCommandBrowse,
setEnableComposerAttach: state.setEnableComposerAttach,
setEnableReactTool: state.setEnableReactTool,
setEnablePersonaTool: state.setEnablePersonaTool,
})));
const handlePageTransformChange = (_event: any, value: typeof pageTransform | null) => value && setPageTransform(value);
}), shallow);
return <>
<Typography level='body-sm'>
Configure Browsing to enable loading links and web pages. <ExternalLink
href='https://github.com/enricoros/big-agi/blob/main/docs/config-feature-browse.md'>
Learn more</ExternalLink>.
</Typography>
<FormHelperText sx={{ display: 'block' }}>
Configure a browsing service to enable loading links and pages. See the <Link
href='https://github.com/enricoros/big-agi/blob/main/docs/config-feature-browse.md' target='_blank' noLinkStyle>
browse configuration guide</Link> for more information.
</FormHelperText>
<FormInputKey
autoCompleteId='browse-wss' label='Puppeteer Wss' noKey
autoCompleteId='browse-wss' label='Puppeteer Endpoint' noKey
value={wssEndpoint} onChange={setWssEndpoint}
rightLabel={<AlreadySet required={!isServerConfig} />}
rightLabel={!isServerConfig ? 'required' : '✔️ already set in server'}
required={!isServerConfig} isError={!isClientValid && !isServerConfig}
placeholder='wss://...'
/>
<FormControl orientation='horizontal' sx={{ justifyContent: 'space-between', alignItems: 'center' }}>
<FormLabelStart title='Load pages as:' />
<Select
variant='outlined'
value={pageTransform} onChange={handlePageTransformChange}
slotProps={{
root: { sx: { minWidth: '140px' } },
indicator: { sx: { opacity: 0.5 } },
button: { sx: { whiteSpace: 'inherit' } },
}}
>
<Option value='text'>Text (default)</Option>
<Option value='markdown'>Markdown</Option>
<Option value='html'>HTML</Option>
</Select>
</FormControl>
<Typography level='body-sm' sx={{ mt: 2 }}>Browsing enablement:</Typography>
<FormControl disabled={!mayWork}>
<Checkbox size='sm' label='Paste URLs' checked={inComposer} onChange={(event) => setEnableComposerAttach(event.target.checked)} />
<FormHelperText>{platformAwareKeystrokes('Load and attach when pasting a URL')}</FormHelperText>
<Checkbox variant='outlined' label='Attach URLs' checked={inComposer} onChange={(event) => setEnableComposerAttach(event.target.checked)} />
<FormHelperText>{platformAwareKeystrokes('Load and attach a page when pasting a URL')}</FormHelperText>
</FormControl>
<FormControl disabled={!mayWork}>
<Checkbox size='sm' label='/browse' checked={inCommand} onChange={(event) => setEnableCommandBrowse(event.target.checked)} />
<Checkbox variant='outlined' label='/browse' checked={inCommand} onChange={(event) => setEnableCommandBrowse(event.target.checked)} />
<FormHelperText>{platformAwareKeystrokes('Use /browse to load a web page')}</FormHelperText>
</FormControl>
<FormControl disabled={!mayWork}>
<Checkbox size='sm' label='ReAct' checked={inReact} onChange={(event) => setEnableReactTool(event.target.checked)} />
<Checkbox variant='outlined' label='ReAct' checked={inReact} onChange={(event) => setEnableReactTool(event.target.checked)} />
<FormHelperText>Enables loadURL() in ReAct</FormHelperText>
</FormControl>
<FormControl disabled>
<Checkbox size='sm' label='Chat with Personas' checked={false} onChange={(event) => setEnablePersonaTool(event.target.checked)} />
<FormHelperText>Not yet available</FormHelperText>
{/*<FormHelperText>Enable loading URLs by Personas</FormHelperText>*/}
</FormControl>
{/*<FormControl disabled>*/}
{/* <Checkbox variant='outlined' label='Personas' checked={inPersonas} onChange={(event) => setEnablePersonaTool(event.target.checked)} />*/}
{/* <FormHelperText>Enable loading URLs by Personas</FormHelperText>*/}
{/*</FormControl>*/}
</>;
}
+14 -22
View File
@@ -7,39 +7,31 @@ import { apiAsyncNode } from '~/common/util/trpc.client';
const DEBUG_SHOW_SCREENSHOT = false;
// export function
export async function callBrowseFetchPage(url: string) {
export async function callBrowseFetchPage(
url: string,
// transforms?: BrowsePageTransform[],
// screenshotOptions?: { width: number, height: number, quality?: number },
) {
// validate url
// thow if no URL is provided
url = url?.trim() || '';
if (!url)
throw new Error('Browsing error: Invalid URL');
// noinspection HttpUrlsUsage: assume https if no protocol is provided
// assume https if no protocol is provided
// noinspection HttpUrlsUsage
if (!url.startsWith('http://') && !url.startsWith('https://'))
url = 'https://' + url;
const { wssEndpoint, pageTransform } = useBrowseStore.getState();
const clientWssEndpoint = useBrowseStore.getState().wssEndpoint;
const { pages } = await apiAsyncNode.browse.fetchPages.mutate({
access: {
dialect: 'browse-wss',
...(!!wssEndpoint && { wssEndpoint }),
...(!!clientWssEndpoint && { wssEndpoint: clientWssEndpoint }),
},
requests: [{
url,
transforms: /*transforms ? transforms :*/ [pageTransform],
screenshot: /*screenshotOptions ? screenshotOptions :*/ !DEBUG_SHOW_SCREENSHOT ? undefined : {
width: 512,
height: 512,
// quality: 100,
},
}],
subjects: [{ url }],
screenshot: DEBUG_SHOW_SCREENSHOT ? {
width: 512,
height: 512,
// quality: 100,
} : undefined,
});
if (pages.length !== 1)
@@ -50,7 +42,7 @@ export async function callBrowseFetchPage(
// DEBUG: if there's a screenshot, append it to the dom
if (DEBUG_SHOW_SCREENSHOT && page.screenshot) {
const img = document.createElement('img');
img.src = page.screenshot.webpDataUrl;
img.src = page.screenshot.imageDataUrl;
img.style.width = `${page.screenshot.width}px`;
img.style.height = `${page.screenshot.height}px`;
document.body.appendChild(img);
@@ -59,7 +51,7 @@ export async function callBrowseFetchPage(
// throw if there's an error
if (page.error) {
console.warn('Browsing service error:', page.error);
if (!Object.keys(page.content).length)
if (!page.content)
throw new Error(page.error);
}
+45 -104
View File
@@ -1,9 +1,6 @@
import { z } from 'zod';
import { TRPCError } from '@trpc/server';
import { BrowserContext, connect, ScreenshotOptions } from '@cloudflare/puppeteer';
import { default as TurndownService } from 'turndown';
import { load as cheerioLoad } from 'cheerio';
import { BrowserContext, connect, ScreenshotOptions, TimeoutError } from '@cloudflare/puppeteer';
import { createTRPCRouter, publicProcedure } from '~/server/api/trpc.server';
import { env } from '~/server/env.mjs';
@@ -19,22 +16,17 @@ const browseAccessSchema = z.object({
dialect: z.enum(['browse-wss']),
wssEndpoint: z.string().trim().optional(),
});
type BrowseAccessSchema = z.infer<typeof browseAccessSchema>;
const pageTransformSchema = z.enum(['html', 'text', 'markdown']);
type PageTransformSchema = z.infer<typeof pageTransformSchema>;
const fetchPageInputSchema = z.object({
access: browseAccessSchema,
requests: z.array(z.object({
subjects: z.array(z.object({
url: z.string().url(),
transforms: z.array(pageTransformSchema),
screenshot: z.object({
width: z.number(),
height: z.number(),
quality: z.number().optional(),
}).optional(),
})),
screenshot: z.object({
width: z.number(),
height: z.number(),
quality: z.number().optional(),
}).optional(),
});
@@ -42,18 +34,16 @@ const fetchPageInputSchema = z.object({
const fetchPageWorkerOutputSchema = z.object({
url: z.string(),
content: z.record(pageTransformSchema, z.string()),
content: z.string(),
error: z.string().optional(),
stopReason: z.enum(['end', 'timeout', 'error']),
screenshot: z.object({
webpDataUrl: z.string().startsWith('data:image/webp'),
imageDataUrl: z.string().startsWith('data:image/'),
mimeType: z.string().startsWith('image/'),
width: z.number(),
height: z.number(),
}).optional(),
});
type FetchPageWorkerOutputSchema = z.infer<typeof fetchPageWorkerOutputSchema>;
const fetchPagesOutputSchema = z.object({
pages: z.array(fetchPageWorkerOutputSchema),
@@ -65,23 +55,21 @@ export const browseRouter = createTRPCRouter({
fetchPages: publicProcedure
.input(fetchPageInputSchema)
.output(fetchPagesOutputSchema)
.mutation(async ({ input: { access, requests } }) => {
.mutation(async ({ input: { access, subjects, screenshot } }) => {
const pages: FetchPageWorkerOutputSchema[] = [];
const pagePromises = requests.map(request =>
workerPuppeteer(access, request.url, request.transforms, request.screenshot));
const results = await Promise.allSettled(pagePromises);
const pages: FetchPageWorkerOutputSchema[] = results.map((result, index) =>
result.status === 'fulfilled'
? result.value
: {
url: requests[index].url,
content: {},
error: result.reason?.message || 'Unknown fetch error',
for (const subject of subjects) {
try {
pages.push(await workerPuppeteer(access, subject.url, screenshot?.width, screenshot?.height, screenshot?.quality));
} catch (error: any) {
pages.push({
url: subject.url,
content: '',
error: error?.message || JSON.stringify(error) || 'Unknown fetch error',
stopReason: 'error',
},
);
});
}
}
return { pages };
}),
@@ -89,13 +77,12 @@ export const browseRouter = createTRPCRouter({
});
async function workerPuppeteer(
access: BrowseAccessSchema,
targetUrl: string,
transforms: PageTransformSchema[],
screenshotOptions?: { width: number, height: number, quality?: number },
): Promise<FetchPageWorkerOutputSchema> {
type BrowseAccessSchema = z.infer<typeof browseAccessSchema>;
type FetchPageWorkerOutputSchema = z.infer<typeof fetchPageWorkerOutputSchema>;
async function workerPuppeteer(access: BrowseAccessSchema, targetUrl: string, ssWidth: number | undefined, ssHeight: number | undefined, ssQuality: number | undefined): Promise<FetchPageWorkerOutputSchema> {
// access
const browserWSEndpoint = (access.wssEndpoint || env.PUPPETEER_WSS_ENDPOINT || '').trim();
const isLocalBrowser = browserWSEndpoint.startsWith('ws://');
if (!browserWSEndpoint || (!browserWSEndpoint.startsWith('wss://') && !isLocalBrowser))
@@ -106,7 +93,7 @@ async function workerPuppeteer(
const result: FetchPageWorkerOutputSchema = {
url: targetUrl,
content: {},
content: '',
error: undefined,
stopReason: 'error',
screenshot: undefined,
@@ -130,49 +117,35 @@ async function workerPuppeteer(
if (!isWebPage) {
// noinspection ExceptionCaughtLocallyJS
throw new Error(`Invalid content-type: ${contentType}`);
} else {
} else
result.stopReason = 'end';
}
} catch (error: any) {
// This was "error instanceof TimeoutError;" but threw some type error - trying the below instead
const isTimeout = error?.message?.includes('Navigation timeout') || false;
const isTimeout: boolean = error instanceof TimeoutError;
result.stopReason = isTimeout ? 'timeout' : 'error';
if (!isTimeout) {
result.error = '[Puppeteer] ' + (error?.message || error?.toString() || 'Unknown goto error');
}
if (!isTimeout)
result.error = '[Puppeteer] ' + error?.message || error?.toString() || 'Unknown goto error';
}
// transform the content of the page as text
try {
if (result.stopReason !== 'error') {
for (const transform of transforms) {
switch (transform) {
case 'html':
result.content.html = cleanHtml(await page.content());
break;
case 'text':
result.content.text = await page.evaluate(() => document.body.innerText || document.textContent || '');
break;
case 'markdown':
const html = await page.content();
const cleanedHtml = cleanHtml(html);
const turndownService = new TurndownService({ headingStyle: 'atx' });
result.content.markdown = turndownService.turndown(cleanedHtml);
break;
}
}
if (!Object.keys(result.content).length)
result.error = '[Puppeteer] Empty content';
result.content = await page.evaluate(() => {
const content = document.body.innerText || document.textContent;
if (!content)
throw new Error('No content');
return content;
});
}
} catch (error: any) {
result.error = '[Puppeteer] ' + (error?.message || error?.toString() || 'Unknown content error');
result.error = '[Puppeteer] ' + error?.message || error?.toString() || 'Unknown evaluate error';
}
// get a screenshot of the page
try {
if (screenshotOptions?.width && screenshotOptions?.height) {
const { width, height, quality } = screenshotOptions;
const scale = Math.round(100 * width / 1024) / 100;
if (ssWidth && ssHeight) {
const width = ssWidth;
const height = ssHeight;
const scale = Math.round(100 * ssWidth / 1024) / 100;
await page.setViewport({ width: width / scale, height: height / scale, deviceScaleFactor: scale });
@@ -183,10 +156,10 @@ async function workerPuppeteer(
type: imageType,
encoding: 'base64',
clip: { x: 0, y: 0, width: width / scale, height: height / scale },
...(quality && { quality }),
...(ssQuality && { quality: ssQuality }),
}) as string;
result.screenshot = { webpDataUrl: `data:${mimeType};base64,${dataString}`, mimeType, width, height };
result.screenshot = { imageDataUrl: `data:${mimeType};base64,${dataString}`, mimeType, width, height };
}
} catch (error: any) {
console.error('workerPuppeteer: page.screenshot', error);
@@ -219,35 +192,3 @@ async function workerPuppeteer(
return result;
}
function cleanHtml(html: string) {
const $ = cheerioLoad(html);
// Remove standard unwanted elements
$('script, style, nav, aside, noscript, iframe, svg, canvas, .ads, .comments, link[rel="stylesheet"]').remove();
// Remove elements that might be specific to proxy services or injected by them
$('[id^="brightdata-"], [class^="brightdata-"]').remove();
// Remove comments
$('*').contents().filter(function() {
return this.type === 'comment';
}).remove();
// Remove empty elements
$('p, div, span').each(function() {
if ($(this).text().trim() === '' && $(this).children().length === 0) {
$(this).remove();
}
});
// Merge consecutive paragraphs
$('p + p').each(function() {
$(this).prev().append(' ' + $(this).text());
$(this).remove();
});
// Return the cleaned HTML
return $.html();
}
@@ -5,16 +5,11 @@ import { CapabilityBrowsing } from '~/common/components/useCapabilities';
import { getBackendCapabilities } from '~/modules/backend/store-backend-capabilities';
export type BrowsePageTransform = 'html' | 'text' | 'markdown';
interface BrowseState {
wssEndpoint: string;
setWssEndpoint: (url: string) => void;
pageTransform: BrowsePageTransform;
setPageTransform: (transform: BrowsePageTransform) => void;
enableCommandBrowse: boolean;
setEnableCommandBrowse: (value: boolean) => void;
@@ -36,9 +31,6 @@ export const useBrowseStore = create<BrowseState>()(
wssEndpoint: '', // default WSS endpoint
setWssEndpoint: (wssEndpoint: string) => set(() => ({ wssEndpoint })),
pageTransform: 'text',
setPageTransform: (pageTransform: BrowsePageTransform) => set(() => ({ pageTransform })),
enableCommandBrowse: true,
setEnableCommandBrowse: (enableCommandBrowse: boolean) => set(() => ({ enableCommandBrowse })),
@@ -2,7 +2,6 @@ import * as React from 'react';
import { FormControl } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
import { useCapabilityElevenLabs } from '~/common/components/useCapabilities';
@@ -32,7 +31,7 @@ export function ElevenlabsSettings() {
{!isConfiguredServerSide && <FormInputKey
autoCompleteId='elevenlabs-key' label='ElevenLabs API Key'
rightLabel={<AlreadySet required={!isConfiguredServerSide} />}
rightLabel={isConfiguredServerSide ? '✔️ already set in server' : 'required'}
value={apiKey} onChange={setApiKey}
required={!isConfiguredServerSide} isError={!isValidKey}
/>}
+3 -19
View File
@@ -5,7 +5,7 @@ import { hasGoogleAnalytics } from '~/common/components/GoogleAnalytics';
import type { ModelDescriptionSchema } from './server/llm.server.types';
import type { OpenAIWire } from './server/openai/openai.wiretypes';
import type { StreamingClientUpdate } from './vendors/unifiedStreamingClient';
import { DLLM, DLLMId, DModelSource, DModelSourceId, LLM_IF_OAI_Chat, LLM_IF_OAI_Fn, useModelsStore } from './store-llms';
import { DLLM, DLLMId, DModelSource, DModelSourceId, LLM_IF_OAI_Chat, useModelsStore } from './store-llms';
import { FALLBACK_LLM_TEMPERATURE } from './vendors/openai/openai.vendor';
import { findAccessForSourceOrThrow, findVendorForLlmOrThrow } from './vendors/vendors.registry';
@@ -21,16 +21,6 @@ export interface VChatMessageIn {
export type VChatFunctionIn = OpenAIWire.ChatCompletion.RequestFunctionDef;
export type VChatContextName =
| 'conversation'
| 'ai-diagram'
| 'ai-flattener'
| 'beam-scatter'
| 'beam-gather'
| 'call'
| 'persona-extract';
export type VChatContextRef = string;
export interface VChatMessageOut {
role: 'assistant' | 'system' | 'user';
content: string;
@@ -79,7 +69,7 @@ function modelDescriptionToDLLMOpenAIOptions<TSourceSetup, TLLMOptions>(model: M
// null means unknown contenxt/output tokens
const contextTokens = model.contextWindow || null;
const maxOutputTokens = model.maxCompletionTokens || (contextTokens ? Math.round(contextTokens / 2) : null);
const llmResponseTokensRatio = model.maxCompletionTokens ? 1 : 1 / 4;
const llmResponseTokensRatio = model.maxCompletionTokens ? 1 / 2 : 1 / 4;
const llmResponseTokens = maxOutputTokens ? Math.round(maxOutputTokens * llmResponseTokensRatio) : null;
return {
@@ -129,10 +119,6 @@ export async function llmChatGenerateOrThrow<TSourceSetup = unknown, TAccess = u
// id to DLLM and vendor
const { llm, vendor } = findVendorForLlmOrThrow<TSourceSetup, TAccess, TLLMOptions>(llmId);
// if the model does not support function calling and we're trying to force a function, throw
if (forceFunctionName && !llm.interfaces.includes(LLM_IF_OAI_Fn))
throw new Error(`Model ${llmId} does not support function calling`);
// FIXME: relax the forced cast
const options = llm.options as TLLMOptions;
@@ -153,8 +139,6 @@ export async function llmChatGenerateOrThrow<TSourceSetup = unknown, TAccess = u
export async function llmStreamingChatGenerate<TSourceSetup = unknown, TAccess = unknown, TLLMOptions = unknown>(
llmId: DLLMId,
messages: VChatMessageIn[],
contextName: VChatContextName,
contextRef: VChatContextRef,
functions: VChatFunctionIn[] | null,
forceFunctionName: string | null,
abortSignal: AbortSignal,
@@ -177,5 +161,5 @@ export async function llmStreamingChatGenerate<TSourceSetup = unknown, TAccess =
await new Promise(resolve => setTimeout(resolve, delay));
// execute via the vendor
return await vendor.streamingChatGenerateOrThrow(access, llmId, llmOptions, messages, contextName, contextRef, functions, forceFunctionName, abortSignal, onUpdate);
return await vendor.streamingChatGenerateOrThrow(access, llmId, llmOptions, messages, functions, forceFunctionName, abortSignal, onUpdate);
}
+17 -167
View File
@@ -1,162 +1,16 @@
import type { GeminiModelSchema } from './gemini.wiretypes';
import type { ModelDescriptionSchema } from '../llm.server.types';
import { LLM_IF_OAI_Chat, LLM_IF_OAI_Json, LLM_IF_OAI_Vision } from '../../store-llms';
import { LLM_IF_OAI_Chat, LLM_IF_OAI_Vision } from '../../store-llms';
// supported interfaces
const geminiChatInterfaces: GeminiModelSchema['supportedGenerationMethods'] = ['generateContent'];
// unsupported interfaces
const filterUnallowedNames = ['Legacy'];
const filterUnallowedInterfaces: GeminiModelSchema['supportedGenerationMethods'] = ['generateAnswer', 'embedContent', 'embedText'];
const geminiLinkModels = ['models/gemini-pro', 'models/gemini-pro-vision'];
/* Manual models details
Gemini Name Mapping example:
- Latest version gemini-1.0-pro-latest <model>-<generation>-<variation>-latest
- Latest stable version gemini-1.0-pro <model>-<generation>-<variation>
- Stable versions gemini-1.0-pro-001 <model>-<generation>-<variation>-<version>
*/
const _knownGeminiModels: ({
id: string,
isNewest?: boolean,
isPreview?: boolean
symLink?: string
} & Pick<ModelDescriptionSchema, 'interfaces' | 'pricing' | 'trainingDataCutoff' | 'hidden'>)[] = [
// Generation 1.5
{
id: 'models/gemini-1.5-flash-latest', // updated regularly and might be a preview version
isNewest: true,
isPreview: true,
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'May 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
},
{
id: 'models/gemini-1.5-flash',
// copied from above
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
hidden: true,
},
{
id: 'models/gemini-1.5-flash-001',
// copied from above
pricing: {
chatIn: 0.70, // 0.35 up to 128k tokens, 0.70 prompts > 128k tokens
chatOut: 2.10, // 1.05 up to 128k tokens, 2.10 prompts > 128k tokens
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
hidden: true,
},
{
id: 'models/gemini-1.5-pro-latest', // updated regularly and might be a preview version
isNewest: true,
isPreview: true,
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'May 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json], // input: audio, images and text
},
{
id: 'models/gemini-1.5-pro', // latest stable -> 001
// copied from above
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json],
hidden: true,
},
{
id: 'models/gemini-1.5-pro-001', // stable snapshot
// copied from above
pricing: {
chatIn: 7.00, // $3.50 / 1 million tokens (for prompts up to 128K tokens), $7.00 / 1 million tokens (for prompts longer than 128K)
chatOut: 21.00, // $10.50 / 1 million tokens (128K or less), $21.00 / 1 million tokens (128K+)
},
trainingDataCutoff: 'Apr 2024',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision, LLM_IF_OAI_Json],
hidden: true,
},
// Generation 1.0
{
id: 'models/gemini-1.0-pro-latest',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
},
{
id: 'models/gemini-1.0-pro',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
},
{
id: 'models/gemini-1.0-pro-001',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
},
// Generation 1.0 + Vision
{
id: 'models/gemini-1.0-pro-vision-latest',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision], // Text and Images
hidden: true,
},
// Older symlinks
{
id: 'models/gemini-pro',
symLink: 'models/gemini-1.0-pro',
// copied from symlinked
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
},
{
id: 'models/gemini-pro-vision',
// copied from symlinked
symLink: 'models/gemini-1.0-pro-vision',
pricing: {
chatIn: 0.50,
chatOut: 1.50,
},
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Vision], // Text and Images
hidden: true,
},
];
// interfaces mapping
const geminiChatInterfaces: GeminiModelSchema['supportedGenerationMethods'] = ['generateContent'];
const geminiVisionNames = ['-vision'];
export function geminiFilterModels(geminiModel: GeminiModelSchema): boolean {
@@ -172,20 +26,17 @@ export function geminiSortModels(a: ModelDescriptionSchema, b: ModelDescriptionS
return b.label.localeCompare(a.label);
}
export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): ModelDescriptionSchema {
export function geminiModelToModelDescription(geminiModel: GeminiModelSchema, allModels: GeminiModelSchema[]): ModelDescriptionSchema {
const { description, displayName, name: modelId, supportedGenerationMethods } = geminiModel;
// find known manual mapping
const knownModel = _knownGeminiModels.find(m => m.id === modelId);
// handle symlinks
const label = knownModel?.symLink
? `🔗 ${displayName.replace('1.0', '')}${knownModel.symLink}`
: displayName;
const isSymlink = geminiLinkModels.includes(modelId);
const symlinked = isSymlink ? allModels.find(m => m.displayName === displayName && m.name !== modelId) : null;
const label = isSymlink ? `🔗 ${displayName.replace('1.0', '')}${symlinked ? symlinked.name : '?'}` : displayName;
// handle hidden models
const hasChatInterfaces = supportedGenerationMethods.some(iface => geminiChatInterfaces.includes(iface));
const hidden = knownModel?.hidden || !!knownModel?.symLink || !hasChatInterfaces;
const hidden = isSymlink || !hasChatInterfaces;
// context window
const { inputTokenLimit, outputTokenLimit } = geminiModel;
@@ -195,27 +46,26 @@ export function geminiModelToModelDescription(geminiModel: GeminiModelSchema): M
const { version, topK, topP, temperature } = geminiModel;
const descriptionLong = description + ` (Version: ${version}, Defaults: temperature=${temperature}, topP=${topP}, topK=${topK}, interfaces=[${supportedGenerationMethods.join(',')}])`;
// use known interfaces, or add chat if this is a generateContent model
const interfaces: ModelDescriptionSchema['interfaces'] = knownModel?.interfaces || [];
if (!interfaces.length && hasChatInterfaces) {
const interfaces: ModelDescriptionSchema['interfaces'] = [];
if (hasChatInterfaces) {
interfaces.push(LLM_IF_OAI_Chat);
// if (geminiVisionNames.some(name => modelId.includes(name)))
// interfaces.push(LLM_IF_OAI_Vision);
if (geminiVisionNames.some(name => modelId.includes(name)))
interfaces.push(LLM_IF_OAI_Vision);
}
return {
id: modelId,
label: label, // + (knownModel?.isNewest ? ' 🌟' : ''),
label,
// created: ...
// updated: ...
description: descriptionLong,
contextWindow: contextWindow,
maxCompletionTokens: outputTokenLimit,
trainingDataCutoff: knownModel?.trainingDataCutoff,
// trainingDataCutoff: '...',
interfaces,
// rateLimits: isGeminiPro ? { reqPerMinute: 60 } : undefined,
// benchmarks: ...
pricing: knownModel?.pricing, // TODO: needs <>128k, and per-character and per-image pricing
// pricing: isGeminiPro ? { needs per-character and per-image pricing } : undefined,
hidden,
};
}
@@ -147,7 +147,7 @@ export const llmGeminiRouter = createTRPCRouter({
// map to our output schema
const models = detailedModels
.filter(geminiFilterModels)
.map(geminiModel => geminiModelToModelDescription(geminiModel))
.map(geminiModel => geminiModelToModelDescription(geminiModel, detailedModels))
.sort(geminiSortModels);
return {
@@ -19,7 +19,7 @@ import { OLLAMA_PATH_CHAT, ollamaAccess, ollamaAccessSchema, ollamaChatCompletio
// OpenAI server imports
import type { OpenAIWire } from './openai/openai.wiretypes';
import { openAIAccess, openAIAccessSchema, openAIChatCompletionPayload, openAIHistorySchema, openAIModelSchema } from './openai/openai.router';
import { openAIAccess, openAIAccessSchema, openAIChatCompletionPayload, OpenAIHistorySchema, openAIHistorySchema, OpenAIModelSchema, openAIModelSchema } from './openai/openai.router';
// configuration
@@ -46,17 +46,11 @@ type MuxingFormat = 'sse' | 'json-nl';
*/
type AIStreamParser = (data: string, eventType?: string) => { text: string, close: boolean };
const streamingContextSchema = z.object({
method: z.literal('chat-stream'),
name: z.enum(['conversation', 'ai-diagram', 'ai-flattener', 'call', 'beam-scatter', 'beam-gather', 'persona-extract']),
ref: z.string(),
});
const chatStreamingInputSchema = z.object({
access: z.union([anthropicAccessSchema, geminiAccessSchema, ollamaAccessSchema, openAIAccessSchema]),
model: openAIModelSchema,
history: openAIHistorySchema,
context: streamingContextSchema,
});
export type ChatStreamingInputSchema = z.infer<typeof chatStreamingInputSchema>;
@@ -78,15 +72,14 @@ export async function llmStreamingRelayHandler(req: NextRequest): Promise<Respon
// Parse the request
const body = await req.json();
const _chatStreamingInput: ChatStreamingInputSchema = chatStreamingInputSchema.parse(body);
const { dialect: accessDialect } = _chatStreamingInput.access;
const prettyDialect = serverCapitalizeFirstLetter(accessDialect);
const { access, model, history } = chatStreamingInputSchema.parse(body);
const prettyDialect = serverCapitalizeFirstLetter(access.dialect);
// Prepare the upstream API request and demuxer/parser
let requestData: ReturnType<typeof _prepareRequestData>;
try {
requestData = _prepareRequestData(_chatStreamingInput);
requestData = _prepareRequestData(access, model, history);
} catch (error: any) {
console.error(`[POST] /api/llms/stream: ${prettyDialect}: prepareRequestData issue:`, safeErrorString(error));
return new NextResponse(`**[Service Issue] ${prettyDialect}**: ${safeErrorString(error) || 'Unknown streaming error'}`, {
@@ -110,7 +103,7 @@ export async function llmStreamingRelayHandler(req: NextRequest): Promise<Respon
} catch (error: any) {
// server-side admins message
const capDialect = serverCapitalizeFirstLetter(accessDialect);
const capDialect = serverCapitalizeFirstLetter(access.dialect);
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + JSON.stringify(error.cause) : '');
console.error(`[POST] /api/llms/stream: ${capDialect}: fetch issue:`, fetchOrVendorError, requestData?.url);
@@ -132,7 +125,7 @@ export async function llmStreamingRelayHandler(req: NextRequest): Promise<Respon
* a 'healthy' level of inventory (i.e., pre-buffering) on the pipe to the client.
*/
const transformUpstreamToBigAgiClient = createUpstreamTransformer(
requestData.vendorMuxingFormat, requestData.vendorStreamParser, accessDialect,
requestData.vendorMuxingFormat, requestData.vendorStreamParser, access.dialect,
);
const chatResponseStream =
@@ -493,7 +486,7 @@ function createStreamParserOpenAI(): AIStreamParser {
}
function _prepareRequestData({ access, model, history, context: _context }: ChatStreamingInputSchema): {
function _prepareRequestData(access: ChatStreamingInputSchema['access'], model: OpenAIModelSchema, history: OpenAIHistorySchema): {
headers: HeadersInit;
url: string;
body: object;
+59 -111
View File
@@ -376,31 +376,8 @@ export function localAIModelToModelDescription(modelId: string): ModelDescriptio
// [Mistral]
// updated from the models on: https://docs.mistral.ai/getting-started/models/
// and the pricing available on: https://mistral.ai/technology/#pricing
const _knownMistralChatModels: ManualMappings = [
// Codestral
{
idPrefix: 'codestral-2405',
label: 'Codestral (2405)',
description: 'Designed and optimized for code generation tasks.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 1, chatOut: 3 },
},
{
idPrefix: 'codestral-latest',
label: 'Mistral Large (latest)',
symLink: 'mistral-codestral-2405',
hidden: true,
// copied
description: 'Designed and optimized for code generation tasks.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 1, chatOut: 3 },
},
// Large
{
idPrefix: 'mistral-large-2402',
@@ -408,7 +385,7 @@ const _knownMistralChatModels: ManualMappings = [
description: 'Top-tier reasoning for high-complexity tasks.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 4, chatOut: 12 },
pricing: { chatIn: 8, chatOut: 24 },
benchmark: { cbaElo: 1159 },
},
{
@@ -420,135 +397,103 @@ const _knownMistralChatModels: ManualMappings = [
description: 'Top-tier reasoning for high-complexity tasks.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 4, chatOut: 12 },
pricing: { chatIn: 8, chatOut: 24 },
benchmark: { cbaElo: 1159 },
},
// Open Mixtral (8x22B)
{
idPrefix: 'open-mixtral-8x22b-2404',
label: 'Open Mixtral 8x22B (2404)',
description: 'Mixtral 8x22B model',
contextWindow: 65536,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 2, chatOut: 6 },
},
{
idPrefix: 'open-mixtral-8x22b',
label: 'Open Mixtral 8x22B',
symLink: 'open-mixtral-8x22b-2404',
idPrefix: 'mistral-large',
label: 'Mistral Large (?)',
description: 'Flagship model, with top-tier reasoning capabilities and language support (English, French, German, Italian, Spanish, and Code)',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
// copied
description: 'Mixtral 8x22B model',
contextWindow: 65536,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 2, chatOut: 6 },
},
// Medium (Deprecated)
// Medium - not updated on 2024-02-26
{
idPrefix: 'mistral-medium-2312',
label: 'Mistral Medium (2312)',
description: 'Ideal for intermediate tasks that require moderate reasoning (Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Descriptions)',
description: 'Mistral internal prototype model.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 2.7, chatOut: 8.1 },
benchmark: { cbaElo: 1148 },
isLegacy: true,
hidden: true,
},
{
idPrefix: 'mistral-medium-latest',
label: 'Mistral Medium (latest)',
symLink: 'mistral-medium-2312',
hidden: true,
// copied
description: 'Ideal for intermediate tasks that require moderate reasoning (Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Descriptions)',
description: 'Mistral internal prototype model.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 2.7, chatOut: 8.1 },
benchmark: { cbaElo: 1148 },
isLegacy: true,
hidden: true,
},
{
idPrefix: 'mistral-medium',
label: 'Mistral Medium',
symLink: 'mistral-medium-2312',
// copied
description: 'Ideal for intermediate tasks that require moderate reasoning (Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Descriptions)',
description: 'Mistral internal prototype model.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 2.7, chatOut: 8.1 },
benchmark: { cbaElo: 1148 },
isLegacy: true,
hidden: true,
},
// Open Mixtral (8x7B) -> currently points to `mistral-small-2312` (as per the docs)
{
idPrefix: 'open-mixtral-8x7b',
label: 'Open Mixtral (8x7B)',
description: 'A sparse mixture of experts model. As such, it leverages up to 45B parameters but only uses about 12B during inference, leading to better inference throughput at the cost of more vRAM.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 0.7, chatOut: 0.7 },
},
// Small (deprecated)
// Small (8x7B)
{
idPrefix: 'mistral-small-2402',
label: 'Mistral Small (2402)',
description: 'Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)',
description: 'Optimized endpoint. Cost-efficient reasoning for low-latency workloads. Mistral Small outperforms Mixtral 8x7B and has lower latency',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 1, chatOut: 3 },
pricing: { chatIn: 2, chatOut: 6 },
},
{
idPrefix: 'mistral-small-2312',
label: 'Mistral Small (2312)',
description: 'Aka open-mixtral-8x7b. Cost-efficient reasoning for low-latency workloads. Mistral Small outperforms Mixtral 8x7B and has lower latency',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 2, chatOut: 6 },
hidden: true,
isLegacy: true,
},
{
idPrefix: 'mistral-small-latest',
label: 'Mistral Small (latest)',
symLink: 'mistral-small-2402',
hidden: true,
// copied
description: 'Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)',
description: 'Cost-efficient reasoning for low-latency workloads. Mistral Small outperforms Mixtral 8x7B and has lower latency',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn],
pricing: { chatIn: 1, chatOut: 3 },
hidden: true,
isLegacy: true,
},
{
idPrefix: 'mistral-small-2312',
label: 'Mistral Small (2312)',
description: 'Aka open-mixtral-8x7b. Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 1, chatOut: 3 },
hidden: true,
isLegacy: true,
pricing: { chatIn: 2, chatOut: 6 },
},
{
idPrefix: 'mistral-small',
label: 'Mistral Small',
symLink: 'mistral-small-2312',
// copied
description: 'Aka open-mixtral-8x7b. Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)',
description: 'Cost-efficient reasoning for low-latency workloads.',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 1, chatOut: 3 },
pricing: { chatIn: 2, chatOut: 6 },
hidden: true,
isLegacy: true,
},
// Open Mistral (7B) -> currently points to mistral-tiny-2312 (as per the docs)
// Open Mixtral (8x7B)
{
idPrefix: 'open-mistral-7b',
label: 'Open Mistral (7B)',
description: 'The first dense model released by Mistral AI, perfect for experimentation, customization, and quick iteration. At the time of the release, it matched the capabilities of models up to 30B parameters.',
idPrefix: 'open-mixtral-8x7b',
label: 'Open Mixtral (8x7B)',
description: 'Mixtral 8x7B model, aka mistral-small-2312',
// symLink: 'mistral-small-2312',
// copied
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 0.25, chatOut: 0.25 },
pricing: { chatIn: 0.7, chatOut: 0.7 },
},
// Tiny (deprecated)
// Tiny (7B)
{
idPrefix: 'mistral-tiny-2312',
label: 'Mistral Tiny (2312)',
@@ -556,34 +501,43 @@ const _knownMistralChatModels: ManualMappings = [
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
isLegacy: true,
},
{
idPrefix: 'mistral-tiny',
label: 'Mistral Tiny',
symLink: 'mistral-tiny-2312',
// copied
description: 'Aka open-mistral-7b. Used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial',
description: 'Used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial',
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
hidden: true,
isLegacy: true,
},
// Open Mistral (7B)
{
idPrefix: 'open-mistral-7b',
label: 'Open Mistral (7B)',
description: 'Mistral 7B model, aka mistral-tiny-2312',
// symLink: 'mistral-tiny-2312',
// copied
contextWindow: 32768,
interfaces: [LLM_IF_OAI_Chat],
pricing: { chatIn: 0.25, chatOut: 0.25 },
},
{
idPrefix: 'mistral-embed',
label: 'Mistral Embed',
description: 'A model that converts text into numerical vectors of embeddings in 1024 dimensions. Embedding models enable retrieval and retrieval-augmented generation applications.',
description: 'State-of-the-art semantic for extracting representation of text extracts.',
// output: 1024 dimensions
maxCompletionTokens: 1024, // HACK - it's 1024 dimensions, but those are not 'completion tokens'
contextWindow: 8192, // Updated context window
contextWindow: 32768, // actually unknown, assumed from the other models
interfaces: [],
pricing: { chatIn: 0.1, chatOut: 0.1 },
hidden: true,
},
];
const mistralModelFamilyOrder = [
'codestral', 'mistral-large', 'open-mixtral-8x22b', 'mistral-medium', 'open-mixtral-8x7b', 'mistral-small', 'open-mistral-7b', 'mistral-tiny', 'mistral-embed', '🔗',
'mistral-large', 'mistral-medium', 'mistral-small', 'open-mixtral-8x7b', 'mistral-tiny', 'open-mistral-7b', 'mistral-embed', '🔗',
];
export function mistralModelToModelDescription(_model: unknown): ModelDescriptionSchema {
@@ -599,13 +553,13 @@ export function mistralModelToModelDescription(_model: unknown): ModelDescriptio
}
export function mistralModelsSort(a: ModelDescriptionSchema, b: ModelDescriptionSchema): number {
if (a.label.startsWith('🔗') && !b.label.startsWith('🔗')) return 1;
if (!a.label.startsWith('🔗') && b.label.startsWith('🔗')) return -1;
const aPrefixIndex = mistralModelFamilyOrder.findIndex(prefix => a.id.startsWith(prefix));
const bPrefixIndex = mistralModelFamilyOrder.findIndex(prefix => b.id.startsWith(prefix));
if (aPrefixIndex !== -1 && bPrefixIndex !== -1) {
if (aPrefixIndex !== bPrefixIndex)
return aPrefixIndex - bPrefixIndex;
if (a.label.startsWith('🔗') && !b.label.startsWith('🔗')) return 1;
if (!a.label.startsWith('🔗') && b.label.startsWith('🔗')) return -1;
return b.label.localeCompare(a.label);
}
return aPrefixIndex !== -1 ? 1 : -1;
@@ -921,13 +875,7 @@ export function groqModelSortFn(a: ModelDescriptionSchema, b: ModelDescriptionSc
// Helpers
type ManualMapping = ({
idPrefix: string,
isLatest?: boolean,
isPreview?: boolean,
isLegacy?: boolean,
symLink?: string
} & Omit<ModelDescriptionSchema, 'id' | 'created' | 'updated'>);
type ManualMapping = ({ idPrefix: string, isLatest?: boolean, isPreview?: boolean, isLegacy?: boolean, symLink?: string } & Omit<ModelDescriptionSchema, 'id' | 'created' | 'updated'>);
type ManualMappings = ManualMapping[];
function fromManualMapping(mappings: ManualMappings, id: string, created?: number, updated?: number, fallback?: ManualMapping): ModelDescriptionSchema {
+1 -2
View File
@@ -8,7 +8,7 @@ import type { DLLM, DLLMId, DModelSourceId } from '../store-llms';
import type { ModelDescriptionSchema } from '../server/llm.server.types';
import type { ModelVendorId } from './vendors.registry';
import type { StreamingClientUpdate } from './unifiedStreamingClient';
import type { VChatContextName, VChatContextRef, VChatFunctionIn, VChatMessageIn, VChatMessageOrFunctionCallOut, VChatMessageOut } from '../llm.client';
import type { VChatFunctionIn, VChatMessageIn, VChatMessageOrFunctionCallOut, VChatMessageOut } from '../llm.client';
export interface IModelVendor<TSourceSetup = unknown, TAccess = unknown, TLLMOptions = unknown, TDLLM = DLLM<TSourceSetup, TLLMOptions>> {
@@ -53,7 +53,6 @@ export interface IModelVendor<TSourceSetup = unknown, TAccess = unknown, TLLMOpt
llmId: DLLMId,
llmOptions: TLLMOptions,
messages: VChatMessageIn[],
contextName: VChatContextName, contexRef: VChatContextRef,
functions: VChatFunctionIn[] | null, forceFunctionName: string | null,
abortSignal: AbortSignal,
onUpdate: (update: StreamingClientUpdate, done: boolean) => void,
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Alert } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { ExternalLink } from '~/common/components/ExternalLink';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormTextField } from '~/common/components/forms/FormTextField';
@@ -50,7 +49,7 @@ export function AnthropicSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='anthropic-key' label={!!anthropicHost ? 'API Key' : 'Anthropic API Key'}
rightLabel={<>{needsUserKey
? !anthropicKey && <Link level='body-sm' href='https://www.anthropic.com/earlyaccess' target='_blank'>request Key</Link>
: <AlreadySet />
: '✔️ already set in server'
} {anthropicKey && keyValid && <Link level='body-sm' href='https://console.anthropic.com/settings/usage' target='_blank'>show tokens usage</Link>}
</>}
value={anthropicKey} onChange={value => updateSetup({ anthropicKey: value })}
+1 -2
View File
@@ -1,6 +1,5 @@
import * as React from 'react';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormTextField } from '~/common/components/forms/FormTextField';
import { InlineError } from '~/common/components/InlineError';
@@ -50,7 +49,7 @@ export function AzureSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='azure-key' label='Azure Key'
rightLabel={<>{needsUserKey
? !azureKey && <Link level='body-sm' href='https://azure.microsoft.com/en-us/products/ai-services/openai-service' target='_blank'>request Key</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={azureKey} onChange={value => updateSetup({ azureKey: value })}
required={needsUserKey} isError={keyError}
+1 -2
View File
@@ -3,7 +3,6 @@ import * as React from 'react';
import { FormControl, FormHelperText, Option, Select } from '@mui/joy';
import HealthAndSafetyIcon from '@mui/icons-material/HealthAndSafety';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
import { InlineError } from '~/common/components/InlineError';
@@ -51,7 +50,7 @@ export function GeminiSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='gemini-key' label='Gemini API Key'
rightLabel={<>{needsUserKey
? !geminiKey && <Link level='body-sm' href={GEMINI_API_KEY_LINK} target='_blank'>request Key</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={geminiKey} onChange={value => updateSetup({ geminiKey: value.trim() })}
required={needsUserKey} isError={showKeyError}
+1 -2
View File
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Typography } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { InlineError } from '~/common/components/InlineError';
import { Link } from '~/common/components/Link';
@@ -43,7 +42,7 @@ export function GroqSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='groq-key' label='Groq API Key'
rightLabel={<>{needsUserKey
? !groqKey && <Link level='body-sm' href={GROQ_REG_LINK} target='_blank'>API keys</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={groqKey} onChange={value => updateSetup({ groqKey: value })}
required={needsUserKey} isError={showKeyError}
+1 -2
View File
@@ -6,7 +6,6 @@ import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
import { getBackendCapabilities } from '~/modules/backend/store-backend-capabilities';
import { AlreadySet } from '~/common/components/AlreadySet';
import { ExpanderAccordion } from '~/common/components/ExpanderAccordion';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { InlineError } from '~/common/components/InlineError';
@@ -82,7 +81,7 @@ export function LocalAISourceSetup(props: { sourceId: DModelSourceId }) {
noKey
required={userHostRequired}
isError={userHostError}
rightLabel={backendHasHost ? <AlreadySet /> : <Link level='body-sm' href='https://localai.io' target='_blank'>Learn more</Link>}
rightLabel={backendHasHost ? '✔️ already set in server' : <Link level='body-sm' href='https://localai.io' target='_blank'>Learn more</Link>}
value={localAIHost} onChange={value => updateSetup({ localAIHost: value })}
/>
+1 -2
View File
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Typography } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { InlineError } from '~/common/components/InlineError';
import { Link } from '~/common/components/Link';
@@ -40,7 +39,7 @@ export function MistralSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='mistral-key' label='Mistral Key'
rightLabel={<>{needsUserKey
? !mistralKey && <Link level='body-sm' href={MISTRAL_REG_LINK} target='_blank'>request Key</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={mistralKey} onChange={value => updateSetup({ oaiKey: value })}
required={needsUserKey} isError={showKeyError}
+1 -2
View File
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Alert } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { Brand } from '~/common/app.config';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormSwitchControl } from '~/common/components/forms/FormSwitchControl';
@@ -49,7 +48,7 @@ export function OpenAISourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='openai-key' label='API Key'
rightLabel={<>{needsUserKey
? !oaiKey && <><Link level='body-sm' href='https://platform.openai.com/account/api-keys' target='_blank'>create key</Link> and <Link level='body-sm' href='https://openai.com/waitlist/gpt-4-api' target='_blank'>request access to GPT-4</Link></>
: <AlreadySet />
: '✔️ already set in server'
} {oaiKey && keyValid && <Link level='body-sm' href='https://platform.openai.com/account/usage' target='_blank'>check usage</Link>}
</>}
value={oaiKey} onChange={value => updateSetup({ oaiKey: value })}
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Button, Typography } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { InlineError } from '~/common/components/InlineError';
import { Link } from '~/common/components/Link';
@@ -57,7 +56,7 @@ export function OpenRouterSourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='openrouter-key' label='OpenRouter API Key'
rightLabel={<>{needsUserKey
? !oaiKey && <Link level='body-sm' href='https://openrouter.ai/keys' target='_blank'>your keys</Link>
: <AlreadySet />
: '✔️ already set in server'
} {oaiKey && keyValid && <Link level='body-sm' href='https://openrouter.ai/activity' target='_blank'>check usage</Link>}
</>}
value={oaiKey} onChange={value => updateSetup({ oaiKey: value })}
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Typography } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { InlineError } from '~/common/components/InlineError';
import { Link } from '~/common/components/Link';
@@ -43,7 +42,7 @@ export function PerplexitySourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='perplexity-key' label='Perplexity API Key'
rightLabel={<>{needsUserKey
? !perplexityKey && <Link level='body-sm' href={PERPLEXITY_REG_LINK} target='_blank'>API keys</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={perplexityKey} onChange={value => updateSetup({ perplexityKey: value })}
required={needsUserKey} isError={showKeyError}
@@ -2,7 +2,6 @@ import * as React from 'react';
import { Alert, Typography } from '@mui/joy';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormSwitchControl } from '~/common/components/forms/FormSwitchControl';
import { InlineError } from '~/common/components/InlineError';
@@ -49,7 +48,7 @@ export function TogetherAISourceSetup(props: { sourceId: DModelSourceId }) {
autoCompleteId='togetherai-key' label='Together AI Key'
rightLabel={<>{needsUserKey
? !togetherKey && <Link level='body-sm' href={TOGETHERAI_REG_LINK} target='_blank'>request Key</Link>
: <AlreadySet />}
: '✔️ already set in server'}
</>}
value={togetherKey} onChange={value => updateSetup({ togetherKey: value })}
required={needsUserKey} isError={showKeyError}
+1 -7
View File
@@ -3,7 +3,7 @@ import { frontendSideFetch } from '~/common/util/clientFetchers';
import type { ChatStreamingInputSchema, ChatStreamingPreambleModelSchema, ChatStreamingPreambleStartSchema } from '../server/llm.server.streaming';
import type { DLLMId } from '../store-llms';
import type { VChatContextName, VChatContextRef, VChatFunctionIn, VChatMessageIn } from '../llm.client';
import type { VChatFunctionIn, VChatMessageIn } from '../llm.client';
import type { OpenAIAccessSchema } from '../server/openai/openai.router';
import type { OpenAIWire } from '../server/openai/openai.wiretypes';
@@ -29,7 +29,6 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
llmId: DLLMId,
llmOptions: TLLMOptions,
messages: VChatMessageIn[],
contextName: VChatContextName, contextRef: VChatContextRef,
functions: VChatFunctionIn[] | null, forceFunctionName: string | null,
abortSignal: AbortSignal,
onUpdate: (update: StreamingClientUpdate, done: boolean) => void,
@@ -56,11 +55,6 @@ export async function unifiedStreamingClient<TSourceSetup = unknown, TLLMOptions
...(llmResponseTokens ? { maxTokens: llmResponseTokens } : {}),
},
history: messages,
context: {
method: 'chat-stream',
name: contextName, // this errors if the client VChatContextName mismatches the server z.enum
ref: contextRef,
},
};
// connect to the server-side streaming endpoint
+1 -2
View File
@@ -10,7 +10,6 @@ import StayPrimaryPortraitIcon from '@mui/icons-material/StayPrimaryPortrait';
import { getBackendCapabilities } from '~/modules/backend/store-backend-capabilities';
import { AlreadySet } from '~/common/components/AlreadySet';
import { FormInputKey } from '~/common/components/forms/FormInputKey';
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
import { FormRadioControl } from '~/common/components/forms/FormRadioControl';
@@ -81,7 +80,7 @@ export function ProdiaSettings(props: { noSkipKey?: boolean }) {
{!backendHasProdia && !!props.noSkipKey && <FormInputKey
autoCompleteId='prodia-key' label='Prodia API Key'
rightLabel={<AlreadySet required={!backendHasProdia} />}
rightLabel={backendHasProdia ? '✔️ already set in server' : 'required'}
value={apiKey} onChange={setApiKey}
required={!backendHasProdia} isError={!isValidKey}
/>}