Compare commits

...

764 Commits

Author SHA1 Message Date
Enrico Ros f754c32d0d Eslint: use aggressive configuration, from create-t3-app 2023-11-11 17:32:32 -08:00
Enrico Ros 5c65e888d7 More Lint fixes 2023-11-11 17:29:58 -08:00
Enrico Ros 69932b17c9 Lint fixes 2023-11-11 16:45:11 -08:00
Enrico Ros 7fbafa14a2 Rationalize tsconfig.json, from create-t3-app 2023-11-11 16:31:54 -08:00
Enrico Ros 9b25d89d80 Fixes: Found lockfile missing swc dependencies, patching...
Lockfile was successfully patched, please run "npm install" to ensure @next/swc dependencies are downloaded
2023-11-11 16:05:02 -08:00
Enrico Ros 7fb65c260e Update next.config.js 2023-11-11 16:04:11 -08:00
Enrico Ros 97f8b03b19 Roll tRPC 2023-11-11 15:20:58 -08:00
Enrico Ros 53a71224e6 docs: Ollama: proxy: add buffering disable 2023-11-11 15:11:29 -08:00
Enrico Ros f0ed480e81 Call: disabled, but show 2023-11-10 19:02:09 -08:00
Enrico Ros 8010ca3a6e Ollama stream encoding: fixing a huge bug 2023-11-10 18:50:53 -08:00
Enrico Ros c844a0c319 cleanup of non-openai transports 2023-11-10 18:45:15 -08:00
Enrico Ros 11f2a22b2e Ollama: debug malformed JSON packets 2023-11-10 18:26:45 -08:00
Enrico Ros 11cdb72370 Media hooks to differentiate devices 2023-11-10 14:10:20 -08:00
Enrico Ros fe09334783 Improve Vendor icons 2023-11-10 13:51:02 -08:00
Enrico Ros 8c7618be49 ollama: svg icon 2023-11-10 13:34:55 -08:00
Enrico Ros 648ab3e188 docs: ollama: added the advaced reverse proxy configuration 2023-11-10 13:21:12 -08:00
Enrico Ros e5f498c310 docs: ollama: move 2023-11-10 13:13:59 -08:00
Enrico Ros 278594b543 docs: ollama: move 2023-11-10 13:13:51 -08:00
Enrico Ros 649bfdc957 docs: ollama: fix refs 2023-11-10 13:10:38 -08:00
Enrico Ros 251bbcfc5b docs: ollama: update 2023-11-10 13:02:35 -08:00
Enrico Ros 70e73b2c81 ollama: stop auto-fetch while typing every char of the url 2023-11-10 12:57:53 -08:00
Enrico Ros 72a93f9ffa docs: optipng 2023-11-10 12:52:17 -08:00
Enrico Ros cc9a6db859 docs: Ollama: integration guide 2023-11-10 12:11:55 -08:00
Enrico Ros 1814e71cbe Ollama: update model display style 2023-11-10 11:48:30 -08:00
Enrico Ros 06e21d6d9a Update Ollama models 2023-11-10 11:37:15 -08:00
Enrico Ros f53053d3f6 YouTube persona selector and Augmented chat are out of the Experimental mode (still not polished) 2023-11-10 11:28:46 -08:00
Enrico Ros 214983ee82 NextJS 14 Support, with App Router, TurboPack 2023-11-09 22:54:38 -08:00
Enrico Ros 19e0d36204 Roll packages 2023-11-09 21:36:46 -08:00
Enrico Ros 64196b29ce Voice Continuation Mode
See also #175. This accomplishes a similar function in an elegant way.
2023-11-09 21:34:48 -08:00
Enrico Ros 5b2e0fbff2 UseSpeechRecognition: adapt to Callback changes 2023-11-09 20:51:36 -08:00
Enrico Ros 8fa735401d Composer: debounce token counting 2023-11-09 01:12:05 -08:00
Enrico Ros e1f8230bc9 Debouncing hook for Frontend. 2023-11-09 00:53:34 -08:00
Enrico Ros 47f1fcd3bf Ollama: full support (stream, gen, list, pull, index). Fixes #179 2023-11-08 17:47:41 -08:00
Enrico Ros 73d0f430fa Improve Shortcuts 2023-11-08 15:14:36 -08:00
Enrico Ros fc812654d1 Shortcuts: require ctrl/shift state 2023-11-08 15:14:36 -08:00
Enrico Ros e84a9e46c0 OpenAI: removed properties 2023-11-08 14:58:44 -08:00
Enrico Ros c354d146ae OpenAI: improve errors display 2023-11-08 14:52:14 -08:00
Enrico Ros ce2441affe Ctrl+Shift+R: regenerate assistant 2023-11-08 14:21:56 -08:00
Enrico Ros c695d4b6d4 Shortcuts: stop propagation, just in case 2023-11-08 14:06:18 -08:00
Enrico Ros be7dc82b75 Cleanup streaming errors 2023-11-08 13:55:59 -08:00
Enrico Ros 4b5519a134 Cleanup streaming errors 2023-11-08 13:53:32 -08:00
Enrico Ros 3dd9e56708 Reuse more tRPC fetchers 2023-11-08 13:35:56 -08:00
Enrico Ros a78658aac7 OpenAI: Vision (Preview) -> Vision 2023-11-08 13:20:17 -08:00
Enrico Ros 65b46cfe79 Improve and disambiguate tRPC errors 2023-11-08 12:22:11 -08:00
Enrico Ros 5d20b63f98 Server-side errors 2023-11-08 11:53:14 -08:00
Enrico Ros 54288bb2e2 Streaming & Fetches: improve error reporting 2023-11-08 11:32:39 -08:00
Enrico Ros b3be1c6e91 Wire cleanups 2023-11-08 01:52:03 -08:00
Enrico Ros bdcc0fb09f Roll packages 2023-11-08 01:07:13 -08:00
Enrico Ros 635e54ae07 Cloudflare deployment docs: mention the compatibility flags
Fixes #174
2023-11-08 00:55:53 -08:00
Enrico Ros 58fa4465ce Cleanup 2023-11-08 00:32:39 -08:00
Enrico Ros 0adc273e0f Fix #182 properly. Allows special tokens. 2023-11-08 00:32:17 -08:00
Enrico Ros f76d5fa8ea Fix #182. Don't crash the UI if the tokenizer throws. 2023-11-08 00:26:49 -08:00
Enrico Ros 9615ff44af OpenAI: support for maxCompletionTokens (in desc) -> maxOutputTokens (in DLLM). Fixes #181
Note: you will have to "Update" the OpenAI models for this to be effective.
2023-11-08 00:10:05 -08:00
Enrico Ros db69516d5f bits 2023-11-07 23:34:45 -08:00
Enrico Ros 6e93b125d5 Azure: improve list clarity 2023-11-07 23:07:06 -08:00
Enrico Ros a187a89444 Model List: highlight latest 2023-11-07 23:06:54 -08:00
Enrico Ros a4c11646af OpenAI: update 128 'k' tokens 2023-11-07 22:44:10 -08:00
Enrico Ros 0a73eb2ca6 Openrouter: update models (new OpenAI, Google 32ks, Phind, Zephyr) 2023-11-07 22:35:39 -08:00
Enrico Ros b25dc4dbea docs: update Oobabooga 2023-11-07 22:14:29 -08:00
Enrico Ros a268f621eb docs: Added LocalAI 2023-11-07 22:14:23 -08:00
Enrico Ros 247b3228f9 Fully server-side Model Description 2023-11-07 22:14:10 -08:00
Enrico Ros 63541b37ec llms: scope files 2023-11-07 18:50:46 -08:00
Enrico Ros 3d507741e4 OpenAI: new models: improve appearance/defaults 2023-11-06 13:19:23 -08:00
Enrico Ros 86a3d86408 OpenAI: speculative support for 1106 models 2023-11-06 06:42:48 -08:00
Enrico Ros 9ce61b6ea3 OpenAI: speculative support for 1106 models 2023-11-06 06:40:19 -08:00
Enrico Ros a9d97b97bb Anthropic: show model refresh button when missing key 2023-11-05 21:21:51 -08:00
Enrico Ros 87f0bf16fa Update OpenAISourceSetup.tsx 2023-11-03 07:40:12 -07:00
Enrico Ros 5dcdff20d4 Shortcuts: fix names 2023-11-02 17:37:44 -07:00
Enrico Ros 151117ed5e Default Fast/Func llms to 'gpt-3.5-turbo-16k-0613' 2023-11-02 16:24:12 -07:00
Enrico Ros b7e40cfb6b Enable Speech Recognition on IPhone 2023-11-02 16:22:31 -07:00
Enrico Ros 16be43edcc Audio: begin cleanup 2023-11-02 16:13:48 -07:00
Enrico Ros 5fe3aa56cc Calls: Feedback menu items 2023-11-02 16:13:48 -07:00
Enrico Ros 9ed75a4d55 Call: show the presence of context 2023-11-02 16:13:48 -07:00
Enrico Ros 7fed742bab Personas: set starters and voice IDs for all 2023-11-02 16:13:48 -07:00
Enrico Ros 16b6c0dd43 Call UI: override voice 2023-11-02 16:13:48 -07:00
Enrico Ros da6555dfc7 Call UI: quickfixes 2023-11-02 16:13:48 -07:00
Enrico Ros 351d25170b Call: take it off the experimental flag 2023-11-02 16:13:48 -07:00
Enrico Ros fce4f043a4 Call: Call Wizard to debug issues before they present themselves 2023-11-02 16:05:33 -07:00
Enrico Ros 90fb3945a6 Call: persona dropdown buttons 2023-11-02 16:05:28 -07:00
Enrico Ros b7d56afb52 Style: final adjustments 2023-11-02 15:55:39 -07:00
Enrico Ros 23c8dc27cf Style: cleanup 2023-11-02 15:55:38 -07:00
Enrico Ros 5660b592de Style: improve message colors 2023-11-02 15:55:38 -07:00
Enrico Ros 9b2f938b49 Style: improve theming 2023-11-02 15:55:37 -07:00
Enrico Ros 3a4f5ffa3d Style: persona selector fixes 2023-11-02 15:55:37 -07:00
Enrico Ros 14b8350bf1 Roll Joy 5.0.0-beta.13 2023-11-02 15:55:21 -07:00
Enrico Ros e9ec1361ac Rationalize AppLayout state, and add Shortcuts
Ctrl + Alt + M: quick model setup
Ctrl + Alt + P: preferences
2023-11-02 15:41:11 -07:00
Enrico Ros a283d034e1 Support for String avatar 2023-11-02 01:56:50 -07:00
Enrico Ros 5e8fd7ea4e Support for String avatar 2023-11-02 01:56:41 -07:00
Enrico Ros 121bbd0d6f Add LeftButton support 2023-11-02 01:53:16 -07:00
Enrico Ros 2db5fd545b tRPC: don't repeat curl debug 2023-11-01 17:31:40 -07:00
Enrico Ros 3dc94c7f23 Fix normal paste. 2023-11-01 17:31:07 -07:00
Enrico Ros dafc5117d2 Reduce visibility 2023-11-01 17:15:25 -07:00
Enrico Ros 2297a20a15 Fix state 2023-11-01 16:29:53 -07:00
Enrico Ros ca37803be3 Cleanup code path for 'draw-imagine-plus' - prompt is still not great 2023-11-01 16:25:08 -07:00
Enrico Ros e3d2327d93 Enter to send: renamed to Enter is Newline 2023-10-30 17:36:07 -07:00
Enrico Ros 53533d0f9d Draw+: simple prompt augmentation - will redo with a preview window 2023-10-29 00:06:38 -07:00
Enrico Ros 6b51a9f69b ChatMode - extract as store, to persist between top-levels
Not sure it belongs here, maybe should be part of a Chat Store instead.
2023-10-28 23:57:01 -07:00
Enrico Ros 33e1f7e21f Debug - hook to understand component lifetimes 2023-10-28 23:23:30 -07:00
Enrico Ros 7e86104ef9 Debug - hook to understand component lifetimes 2023-10-28 23:22:35 -07:00
Enrico Ros a577823b48 Cleanup routing 2023-10-28 22:40:29 -07:00
Enrico Ros e59d6b089f Easier Drawing, mode description, accessible settings 2023-10-27 02:05:32 -07:00
Enrico Ros a8839b71ac Prodia: unified SDXL support, with model list, priority, advanced settings, resolution, default to R.V.5 2023-10-27 01:29:50 -07:00
Enrico Ros 6e7aa71b0d Differentiate network issues 2023-10-27 01:27:34 -07:00
Enrico Ros 1486f61511 Roll packages (Prisma, tRPC, types) 2023-10-26 22:57:50 -07:00
Enrico Ros d68a1c34bf next.js: lock down to 13.4; 13.5 inflates the outputs ("parsed size" increases), and 14 even more. I see more compiled modules and lower speed 2023-10-26 16:40:13 -07:00
Enrico Ros 8c2bbe2eb4 Update tsconfig.json, and remove a bad dep 2023-10-26 14:45:19 -07:00
Enrico Ros 6fff438872 Call: composer buttons (disabled) 2023-10-26 14:32:46 -07:00
Enrico Ros db110a9957 Buildfix 2023-10-25 12:34:44 -07:00
Enrico Ros 0fd14db84c useGlobalShortcut: Ctrl+Shift+V to paste attachment 2023-10-25 12:29:05 -07:00
Enrico Ros ecce20d2bf useGlobalShortcut: register shortcuts for global actions 2023-10-25 12:29:02 -07:00
Enrico Ros 1e8782a177 (old) Sent History: remove 2023-10-25 12:29:00 -07:00
Enrico Ros 17e05cf5af Capabilities framework: begin 2023-10-25 12:17:15 -07:00
Enrico Ros 28989f8828 Linting 2023-10-25 11:53:35 -07:00
Enrico Ros dd774eedfb Roll Prisma 2023-10-25 11:53:32 -07:00
Enrico Ros b828fc0c57 Fix OpenAI/Helicone 2023-10-25 11:53:30 -07:00
Enrico Ros d2e0fecfb7 Easier Drawing Mode 2023-10-25 11:28:48 -07:00
Enrico Ros 1d0e789902 Rename constant 2023-10-24 21:57:05 -07:00
Enrico Ros 796aeb99a4 Improve server-side debugging 2023-10-24 21:55:48 -07:00
Enrico Ros f756ac5fc2 CloudFlare: document how to fix build - closes #174 2023-10-24 13:31:23 -07:00
Enrico Ros 9b779e788f Tryfix #174 2023-10-24 12:51:20 -07:00
Enrico Ros e11ca878b6 Roll Superjson and types 2023-10-24 12:34:47 -07:00
Enrico Ros 8ebcff6483 Roll Prisma and tRPC 2023-10-24 12:31:24 -07:00
Enrico Ros f8e23b4016 Style: Theme components - to keep style consistent 2023-10-24 12:27:03 -07:00
Enrico Ros d2217eb142 Style: format theme file 2023-10-24 12:27:03 -07:00
Enrico Ros 68274d827e Style: Preferences modal fixes 2023-10-24 12:27:03 -07:00
Enrico Ros 76601c1d46 Style: bunch of FormControl adjustments 2023-10-24 12:11:15 -07:00
Enrico Ros b526998c8b Anthropic: add support through AWS/Bedrock 2023-10-24 00:11:21 -07:00
Enrico Ros fcf5316aa1 OpenAI: further improve debugging 2023-10-24 00:06:21 -07:00
Enrico Ros dffef1a6e9 FormFieldText: add disablement 2023-10-23 23:40:21 -07:00
Enrico Ros ec29c63cf3 OpenAI transport: mode debuggability 2023-10-23 23:39:50 -07:00
Enrico Ros a35f259986 Remove double click on chat button to set mode 2023-10-23 22:03:30 -07:00
Enrico Ros 206345b451 Package.json: add Node 20 support 2023-10-23 22:01:57 -07:00
Enrico Ros 622bde003e Debug: llm streaming I/O (default: off) 2023-10-23 21:15:54 -07:00
Enrico Ros 9a80b8870e Smaller 2023-10-21 15:35:56 -07:00
Enrico Ros cdaf97226a Documentation: Azure OpenAI (has GPT-4-32k) 2023-10-21 15:24:52 -07:00
Enrico Ros 3a66f50318 Documentation: Azure OpenAI (has GPT-4-32k) 2023-10-21 15:12:41 -07:00
Enrico Ros 7b27f0ed22 Call: cleanups 2023-10-19 17:36:52 -07:00
Enrico Ros ba35840cbd Call - brand new application; baseline support
Notes:
 - Sounds Source: https://mixkit.co/free-sound-effects/phone-ring/
2023-10-19 17:13:13 -07:00
Enrico Ros 7ab347523f Roll packages 2023-10-19 16:25:16 -07:00
Enrico Ros ddccd78269 Token counting: much better counting/presentation - verified: perfect 2023-10-19 16:11:51 -07:00
Enrico Ros 77c781e7b8 Moar Bette Env Vars 2023-10-19 14:51:15 -07:00
Enrico Ros 26030c1efe Update Env Vars docs. 2023-10-19 14:46:15 -07:00
Enrico Ros d8313f4d0a Document Environment Variables 2023-10-19 14:40:50 -07:00
Enrico Ros 5225dc34e1 Update Documentation: Docker 2023-10-19 13:47:33 -07:00
Enrico Ros b6d9393513 Update Documentation 2023-10-19 13:37:33 -07:00
Enrico Ros 54f66da5d8 Minor cleans 2023-10-19 12:59:43 -07:00
Enrico Ros ae3d4750f3 Cleanup code and update OpenRouter settings 2023-10-19 12:49:01 -07:00
Enrico Ros 56cb1c6d24 Cleanup 2023-10-19 12:23:22 -07:00
Enrico Ros 371f02c869 ChatGPT Importer: Working again - but OpenAI may be unreliable. Closes #165 2023-10-19 04:47:34 -07:00
Enrico Ros a450cdaa42 Try a fix for OpenAI import 2023-10-19 04:39:25 -07:00
Enrico Ros 8989bf9a4f Text Tools: Highlight differences 2023-10-19 03:18:16 -07:00
Enrico Ros d41ad780c5 Fix custom personas being lost when switching to other personas. Mark the custom as final. 2023-10-19 00:11:14 -07:00
Enrico Ros ed3a752912 Write down changes 2023-10-18 23:33:24 -07:00
Enrico Ros 358378c7e6 Merge branch 'jontybrook-main' 2023-10-18 23:21:49 -07:00
Enrico Ros 05097af27b Merge branch 'main' of https://github.com/jontybrook/big-agi into jontybrook-main 2023-10-18 23:21:37 -07:00
Enrico Ros 15eb6a235d OpenAI "-instruct" models cannot be used for the chat endpoint. Closes #169 2023-10-18 22:54:08 -07:00
Enrico Ros 138b043f0f Anthropic: full support for Helicone. Closes #173 2023-10-18 22:43:35 -07:00
Enrico Ros 99557b46f5 OpenAI: explain Helicone setup 2023-10-18 22:39:05 -07:00
Enrico Ros 4d42379374 Simplify OpenAI source setup 2023-10-18 21:58:27 -07:00
Enrico Ros fb207d99b9 Improve Sharing Store 2023-10-18 17:47:18 -07:00
Enrico Ros 188a18d6ac Great working Shared Links history 2023-10-18 17:45:27 -07:00
Enrico Ros e81acdf0eb Show outgoing chatlinks (stored locally) 2023-10-18 17:12:58 -07:00
Enrico Ros 99ba47397a Store ChatLink Chat Title too 2023-10-18 16:05:39 -07:00
Enrico Ros 380e07aa9c Less 'share' 2023-10-18 15:59:56 -07:00
Enrico Ros 6aa98da2f4 chatLinkId 2023-10-18 15:56:39 -07:00
Enrico Ros 30d2416ba2 ChatLink: as precaution - append object/keys in localstorage 2023-10-18 15:55:17 -07:00
Enrico Ros 695fde6f8b ChatLink: remember ID 2023-10-18 15:43:34 -07:00
Enrico Ros 989b4461e7 ChatLink: move to /link/chat, update DB, cleanups 2023-10-18 15:25:46 -07:00
Enrico Ros 2d0ec4df8a Move conv title 2023-10-18 15:12:40 -07:00
Enrico Ros 42fe23a4cf Update autoSuggestions.ts 2023-10-17 23:24:10 -07:00
Enrico Ros 66b79054df Cleanups 2023-10-17 21:42:03 -07:00
Enrico Ros 06a2fe3fcc ViewShared: Detect tables and turn on markdown 2023-10-17 21:11:17 -07:00
Enrico Ros 7ffc8df247 Fix a visual bug (overflow-x) 2023-10-17 20:48:38 -07:00
Enrico Ros f934bad2e4 Small bits 2023-10-17 20:20:02 -07:00
Enrico Ros 302c674d70 Remove older file 2023-10-17 19:47:27 -07:00
Enrico Ros c9231684f6 Roll PDFJS 2023-10-17 19:45:32 -07:00
Enrico Ros 3a150c063f Roll Tessetact 2023-10-17 19:41:19 -07:00
Enrico Ros 91e8da3a53 Roll misc packages 2023-10-17 19:37:42 -07:00
Enrico Ros 17a36e1fc3 Roll markdown (and github flavored markdown) 2023-10-17 19:29:29 -07:00
Enrico Ros 7ff9e9f75c Roll tRPC 2023-10-17 19:02:06 -07:00
Enrico Ros 3fadba76ba Roll typescript 2023-10-17 19:00:34 -07:00
Enrico Ros 7ea232f516 Formalize Shared Viewer application 2023-10-17 19:00:11 -07:00
Enrico Ros 65831fa1e9 Extract Logo Progress 2023-10-17 18:59:37 -07:00
Enrico Ros 58ad0ece69 Render Markdown: ON by default (test) 2023-10-17 18:10:02 -07:00
Enrico Ros d2c7261f74 Update README.md 2023-10-17 17:20:19 -07:00
Enrico Ros ac9e415d08 Version 1.4.0 2023-10-17 17:19:22 -07:00
Enrico Ros 42646c1ee2 Docker: try the fix again 2023-10-17 17:05:09 -07:00
Enrico Ros 65f0cf4c8f Docker: try this fix (npm ci won't run postinstall, which is Prisma generate) 2023-10-17 17:00:50 -07:00
Enrico Ros b207d61f78 SSS: improve the viewing page 2023-10-17 15:58:24 -07:00
Enrico Ros dcebd08f55 SSS: return the creation date 2023-10-17 15:58:08 -07:00
Enrico Ros bacc153cc8 SSS: improve creation dialog 2023-10-17 15:57:46 -07:00
Enrico Ros 331ecfeae5 Chat message - disable copy on hover by default 2023-10-17 15:57:24 -07:00
Enrico Ros d3d45c82d4 Improve messaging 2023-10-17 14:08:15 -07:00
Enrico Ros be641a43c3 Improve the appearance of time 2023-10-17 13:47:01 -07:00
Enrico Ros b00479ffbb SSS: simplify variables 2023-10-17 13:32:45 -07:00
Enrico Ros de305dbdb9 Sharing Page 2023-10-17 05:15:15 -07:00
Enrico Ros 7958f87c24 More flexible ChatItems 2023-10-17 05:13:33 -07:00
Enrico Ros e5d6d9fc16 More Imaginable and Speakable 2023-10-17 05:09:33 -07:00
Enrico Ros 052f71b1bd More Flexible Chat Messages 2023-10-17 05:05:53 -07:00
Enrico Ros df08ec2b51 Fix to show the models dialog when not configured 2023-10-17 05:05:08 -07:00
Enrico Ros a5c89a3edd Routes: navigate to chat 2023-10-17 04:45:45 -07:00
Enrico Ros 52d26cb825 Improve layout 2023-10-17 04:45:13 -07:00
Enrico Ros c46741f733 Import Conversation: update signature 2023-10-17 04:44:54 -07:00
Enrico Ros 3f63d03572 Visibility changes 2023-10-17 02:53:50 -07:00
Enrico Ros 9129e9b507 Visibility changes 2023-10-17 02:53:33 -07:00
Enrico Ros 911c2e8b27 Azure: remove router - obsolete since the llms transport unification 2023-10-17 01:10:08 -07:00
Enrico Ros a2de6e358c SSS: add Share to big-AGI
This is the first change to require server-side DB, and required to
pull in Prisma for ORM.

There are 3 env vars needed during build time and run time to activate this feature.
2023-10-17 01:09:43 -07:00
Enrico Ros 3971bcedda PWA: add Web Share helpers 2023-10-17 00:55:33 -07:00
Enrico Ros 725b08e021 Trade: server-side prisma utility function 2023-10-17 00:37:08 -07:00
Enrico Ros 49755abe8b Trade: add server-side-storage 2023-10-17 00:35:27 -07:00
Enrico Ros 45cfb14219 Misc: reuse origin functions 2023-10-17 00:34:04 -07:00
Enrico Ros fa656726ef UI: improve Modals 2023-10-17 00:32:53 -07:00
Enrico Ros 784d6361f8 Ignore more 2023-10-16 21:13:17 -07:00
Enrico Ros 66782faba4 tRPC: add a Node NextJS API route (Edge Function), in addition to the existing Edge Runtime
Move the 'trade' router from the Edge to the Node runtime.
2023-10-16 18:24:39 -07:00
Enrico Ros 8c40fadc2e Sharing: constrain the spec of the stored object 2023-10-16 17:33:57 -07:00
Enrico Ros 13b97e58f5 SSS: Sharing schema 2023-10-16 16:24:26 -07:00
Enrico Ros 50c1b84f94 UI: helpers for showing badges 2023-10-16 15:59:19 -07:00
Enrico Ros df76ec7d6f tRCP: move client code to src/common/utils 2023-10-16 15:25:56 -07:00
Enrico Ros 47539c8d44 tRCP: move server code to the new src/server 2023-10-16 15:20:14 -07:00
Enrico Ros 6022aeee50 Server-Side-Storage (SSS): use Prisma 2023-10-16 14:58:10 -07:00
Enrico Ros 7aaae21e0c Trade: move router locally 2023-10-16 01:00:03 -07:00
Enrico Ros 69db13e4c4 Privacy policy URL, available to the Client side 2023-10-16 00:45:31 -07:00
Enrico Ros 8661bf6fc8 Trade (import/export): cleanup 2023-10-16 00:44:32 -07:00
Enrico Ros 85562b5888 Mobile share_target: move to /launch 2023-10-15 23:53:00 -07:00
Enrico Ros 9a851e342f Azure: unhide gpt4-32k 2023-10-15 17:10:32 -07:00
Enrico Ros 5278c04051 Share: improve menu items 2023-10-15 16:52:27 -07:00
Enrico Ros 21a04212d5 OpenRouter: show free models 2023-10-15 16:51:56 -07:00
Enrico Ros 005ad5b042 OpenRouter: enable mistral 2023-10-15 16:34:42 -07:00
Enrico Ros e25c0dc006 OpenRouter: update models, and doc the gpt4 update prompt 2023-10-15 16:34:02 -07:00
Enrico Ros 09d38eb57c Merge pull request #171 from enricoros/llms-rework
Llms rework
2023-10-11 19:20:13 -07:00
Enrico Ros 19361ac7cb Update README.md 2023-10-10 19:35:38 -07:00
Enrico Ros 85e97e984b OpenRouter: update available model names 2023-10-10 02:24:25 -07:00
Enrico Ros dcd7f65223 Show when there's server-side key support 2023-10-10 01:45:11 -07:00
Enrico Ros 6ee231d271 OpenRouter: server-side API key support 2023-10-10 01:16:56 -07:00
Enrico Ros acd34f7b8d Rework of the LLM paths in progress 2023-10-06 02:03:15 -07:00
Jonty Brook e339262251 (feat): add support for cloudflare ai gateway openai endpoints 2023-10-05 15:25:16 +01:00
Enrico Ros acb06bcc6d Improve error reporting to debug #165 2023-10-03 22:01:57 -07:00
Enrico Ros 25da2556ac Render HTML within Code blocks 2023-09-29 07:44:39 -07:00
Enrico Ros b2f7c6f204 HTML block: render as HTML, e.g. in case of a full proxy 2023-09-29 07:29:20 -07:00
Enrico Ros 5272fa972a Merge pull request #163 from enricoros/feature-azure-openai
Land restructuring of the LLMs folder and partial Azure support. Full support will come next.
2023-09-22 23:07:00 -07:00
Enrico Ros 91353ced8a Azure: land in main, disable instancing as we finish it 2023-09-22 23:02:47 -07:00
Enrico Ros 3448267344 Llms: downgrade tsx -> ts (not required) 2023-09-22 22:47:40 -07:00
Enrico Ros 34c150924e Llms: bits 2023-09-22 22:29:33 -07:00
Enrico Ros 617f7676ce Llms: moved (client) vendors inside ../vendors 2023-09-22 22:20:22 -07:00
Enrico Ros adaff91225 Llms: removed and spread out llm.routes 2023-09-22 22:03:21 -07:00
Enrico Ros 1597675f4e Llms: separate client transport functions 2023-09-22 21:46:05 -07:00
Enrico Ros 2f92c81bee Llms: small move 2023-09-22 21:15:16 -07:00
Enrico Ros 1e0f11d064 Llms: move the server side proximally closer 2023-09-22 20:57:32 -07:00
Enrico Ros b26ddc422a Llms: remove the 'types' file and extract the vendor description out 2023-09-22 20:22:54 -07:00
Enrico Ros 813d95b898 Llms: move out icons 2023-09-22 19:27:33 -07:00
Enrico Ros 4f3f7963d0 Llms: cleanup routers 2023-09-22 19:15:38 -07:00
Enrico Ros 4d2209ca8d Anthropic: rename wiretypes 2023-09-22 19:14:41 -07:00
Enrico Ros 06e866a3e8 LLms: unify model priors 2023-09-22 18:31:02 -07:00
Enrico Ros aee6c85349 Llms: cleanups 2023-09-22 02:05:19 -07:00
Enrico Ros cd141048f5 Azure: immediate chat calls are working - integration still WIP 2023-09-22 02:28:38 -07:00
Enrico Ros 01ea8c7091 Azure: consistent naming of endpoints 2023-09-22 01:48:31 -07:00
Enrico Ros ce08f6fc50 Extended PlantUML support to mindmaps, improved syntax highlighting and language detection. 2023-09-22 01:42:43 -07:00
Enrico Ros b16fc0b0c1 Improve error messaging on http errors 2023-09-22 01:38:09 -07:00
Enrico Ros f69245adaa Partial Azure OpenAI Service support 2023-09-22 01:26:08 -07:00
Enrico Ros da751c06ca Anthropic: reduce access 2023-09-22 01:25:55 -07:00
Enrico Ros 6f92a2ec2c Anthropic: cleanup the hardcode 2023-09-22 01:23:29 -07:00
Enrico Ros bb42f3cd77 Begin the move of model descriptors to the server side 2023-09-21 23:19:08 -07:00
Enrico Ros 3fd4167335 Reminders 2023-09-20 01:24:15 -07:00
Enrico Ros 89820b94ef Latex Block: improve parsing, to fix https://github.com/enricoros/big-agi/issues/153#issuecomment-1698587371 2023-09-20 00:18:51 -07:00
Enrico Ros 99564e7fa1 OpenRouter: clarify config 2023-09-19 23:46:58 -07:00
Enrico Ros 908da13317 Fix ID clash 2023-09-19 23:42:05 -07:00
Enrico Ros a905a0d6e4 Augmented Chat (was ".. & Follow-Up") - first augmentation: Diagrams
Also fix function calling when with a mandatory function name,
even in case it doesn't hit.
2023-09-19 23:24:56 -07:00
Enrico Ros e86a83a676 Update voice dropdown 2023-09-19 21:17:17 -07:00
Enrico Ros 48dcdaaa57 React hooks for LLM/Persona selects 2023-09-19 21:07:38 -07:00
Enrico Ros c1d0093d48 3.5 Turbo Instruct models - not supported for Chat (only /completions) 2023-09-19 08:39:34 -07:00
Enrico Ros bfa4ab46b1 What happened here 2023-09-19 08:13:04 -07:00
Enrico Ros cb83f2ddb0 Update messages 2023-09-19 07:55:51 -07:00
Enrico Ros 14a5e3a9b8 Merge branch 'main' of https://github.com/DeFiFoFum/big-agi into DeFiFoFum-main 2023-09-19 07:39:07 -07:00
Enrico Ros 7cffa7931a Merge branch 'Ashesh3-IndexedDB-storage' 2023-09-19 07:29:29 -07:00
Enrico Ros c83637118c IndexedDB: strengthen the migration process, including localStorage backup (#158) 2023-09-19 07:26:23 -07:00
Enrico Ros f4cd952b1c OpenRouter: describe how to configure it 2023-09-18 23:13:23 -07:00
Enrico Ros 34d5a32fe5 Re-Enable Speech Recognition on Safari (still untested on iPhones) 2023-09-18 22:43:23 -07:00
Enrico Ros f1e5585337 Merged #158 2023-09-15 07:44:25 -07:00
Enrico Ros b7c7268806 Merge branch 'IndexedDB-storage' of https://github.com/Ashesh3/big-agi into Ashesh3-IndexedDB-storage 2023-09-15 12:03:09 -07:00
Enrico Ros 7f49ddb2cc Export text cleanup 2023-09-15 07:00:32 -07:00
Enrico Ros 0e7c9e3d45 A forked chat's messages are all done (not typing) 2023-09-13 07:26:52 -07:00
Enrico Ros 7e6a7a2e2a Show chat sizes when at the maximum 2023-09-13 07:23:39 -07:00
Enrico Ros 5cc2661375 Update news page 2023-09-12 00:46:14 -07:00
Enrico Ros 1aefd6836c Hovering models in the list adds the context window size overlay 2023-09-12 00:35:45 -07:00
Enrico Ros ae6b9c5eed OpenRouter: enable 2023-09-12 00:35:24 -07:00
Enrico Ros 901db54fe9 OpenRouter: send HTTP headers 2023-09-12 00:35:07 -07:00
Enrico Ros bf0068f015 OpenRouter: improve models list from the official docs page
https://openrouter.ai/docs#models
2023-09-12 00:34:36 -07:00
Enrico Ros d7e974fff4 OpenRouter: improve models list 2023-09-11 23:41:38 -07:00
Ashesh c611066a58 Add migration 2023-09-11 19:26:57 +00:00
Ashesh 67d3b21414 Update package-lock file 2023-09-11 19:26:11 +00:00
Ashesh3 3e38a71893 Use IndexedDB for storing chats 2023-08-30 12:23:11 +05:30
defifofum 9829f99055 Add chat drawer indicator for num conversations 2023-08-29 17:30:21 -05:00
Enrico Ros 17077e4c16 Render Latex via React-Katex (dynamic) 2023-08-25 19:45:57 -07:00
Enrico Ros f7aed8dea6 Improve block parsing, now with inline images, multiple-interleaved blocks support 2023-08-25 19:19:00 -07:00
Enrico Ros 9c1d5d761e Cleanups 2023-08-23 09:24:51 -07:00
Enrico Ros 74f8e66a70 Dynamic Code Highligher/Type Inferrer import. Large performance gains. 2023-08-23 08:58:34 -07:00
Enrico Ros f626d98fcf Remove scrollbar 2023-08-23 08:42:49 -07:00
Enrico Ros c0235e212f Clearer, faster, and more scoped Code Rendering 2023-08-23 08:15:27 -07:00
Enrico Ros 4d21e136bc Disable duplication when it would lose data 2023-08-23 07:05:17 -07:00
Enrico Ros 49f30a8e62 Roll packages 2023-08-23 00:43:41 -07:00
Enrico Ros 131b0c7351 Report error message from Google Search misconfiguration 2023-08-20 17:18:44 -07:00
Enrico Ros 4e3b1706cf ElevenLabs: support Streaming (output) endpoint & extract Voices Dropdown 2023-08-20 16:21:20 -07:00
Enrico Ros 9495a509e6 Update useSpeechRecognition to reflect enablement state
Note: the REF is holding the current state, while the state holds the
delayed state. But it's good enough to
never race-cond form the UI.
2023-08-20 16:17:28 -07:00
Enrico Ros c9b22215aa Vast improvements to the Speech Reco hook
- Fix initial delay, up to ~8s on desktop
- Much improve this state
- More improvements and cleanups
2023-08-20 15:42:02 -07:00
Enrico Ros e5e2b9b8b0 Personas: custom avatars and voices 2023-08-20 15:26:35 -07:00
Enrico Ros 98d791810a MPEG Streaming support in the ElevenLabs API
With this patch, the edge function begins streaming the content right away.
This leads to some minor optimization for the non-streaming use case, as there
is no large audio file kept on the server before transferring.
But this mainly creates a large optimization for the "streaming" use case,
as as the data trickles in, it is sent to the client in pass-through fashion.
2023-08-17 23:54:57 -07:00
Enrico Ros 7b107df84e fix Chat Message errors displayed as objects 2023-08-17 08:35:32 -07:00
Enrico Ros bbf6e289d3 Cleanups 2023-08-17 14:53:13 -07:00
Enrico Ros 81bbbdf652 Extract audioUtils 2023-08-16 23:32:22 -07:00
Enrico Ros 0fe49cf5a9 Pre-flush 2023-08-16 21:47:28 -07:00
Enrico Ros ca87e4b118 Typical spacings if options are appended to the dropdown (consistency) 2023-08-16 21:36:44 -07:00
Enrico Ros 2ad13fb1b4 Fix Error 2023-08-16 21:36:08 -07:00
Enrico Ros cd245a9ef6 Improve predictability of Speech Reco availability 2023-08-16 21:20:48 -07:00
Enrico Ros 4a258a32eb More resilient component (for usage in unintended uses) 2023-08-16 21:07:05 -07:00
Enrico Ros 5b8c9281f1 Improve Speech Recognition (less delay, better casing of the temp vs. final) 2023-08-16 21:06:14 -07:00
Enrico Ros 60c8952863 Lints 2023-08-16 07:10:26 -07:00
Enrico Ros 9a320d451c Consistency improvements 2023-08-16 07:05:19 -07:00
Enrico Ros a3f16cae1e Update news 2023-08-15 01:07:53 -07:00
Enrico Ros 76966ebd6c Explain the Goofy Labs mode 2023-08-15 01:06:53 -07:00
Enrico Ros fee11748df Show progress when Goofying around 2023-08-15 00:43:04 -07:00
Enrico Ros 871079c23d Labs Mode 2023-08-15 00:41:01 -07:00
Enrico Ros 15d39cea06 Roll packages 2023-08-15 00:20:16 -07:00
Enrico Ros 02d0948982 Auto-Close Drawer after selecting a chat, esp. useful on Mobile 2023-08-13 03:09:58 -07:00
Enrico Ros b96623d558 Reduce to 10 lines of Composer input (max) - as mobile is overflowing 2023-08-13 02:57:55 -07:00
Enrico Ros 3875d2d1d2 Text 2023-08-12 01:43:28 -07:00
Enrico Ros 65f5e0cb0b Roll packages 2023-08-11 23:12:09 -07:00
Enrico Ros 82455eb7c5 Support Anthropic Claude-Instant 1.2 2023-08-11 07:19:46 -07:00
Enrico Ros b44deb7404 Improve YT Persona, with Instructions 2023-08-11 04:50:30 -07:00
Enrico Ros e1939e1a50 Large YT Creation improvement 2023-08-11 04:36:20 -07:00
Enrico Ros 0e546ddbbb Extract URL and Title from video as well 2023-08-11 04:29:24 -07:00
Enrico Ros c070af524f Fix Global to Local chain aborts 2023-08-11 04:29:09 -07:00
Enrico Ros a27178845b Improve 'Models' Settings Button 2023-08-11 03:19:42 -07:00
Enrico Ros 6a8463c82e Fix TabFlex 2023-08-11 03:14:30 -07:00
Enrico Ros b5ba5b524c Stop LLM Chain correctly 2023-08-11 03:12:55 -07:00
Enrico Ros 4cc953edd4 Merge branch 'feature-youtrip'
# Conflicts:
#	src/apps/chat/components/persona-selector/PersonaSelector.tsx
#	src/modules/trpc/trpc.router.ts
2023-08-11 01:31:43 -07:00
Enrico Ros 854ff927b9 Roll packages 2023-08-11 01:29:13 -07:00
Enrico Ros 60ed8b3298 Update news 2023-08-11 01:28:01 -07:00
Enrico Ros 3bb05f2bed Export / Import all, and use the File Picker for both load and save (where available) 2023-08-11 01:08:12 -07:00
Enrico Ros 38793b51a3 Export: backup all 2023-08-10 07:57:19 -07:00
Enrico Ros 7d81a84c85 Accept plugins as 'recipients' in the ChatGPT import 2023-08-10 07:13:15 -07:00
Enrico Ros e63f6156fe Overhaul 'Trade' (Import and Export) 2023-08-10 03:04:38 -07:00
Enrico Ros f63105ef14 Sort 2023-08-10 00:41:33 -07:00
Enrico Ros ea392d2189 Cleanups 2023-08-09 23:06:03 -07:00
Enrico Ros 0da8930e26 Fix menu length in the worst case 2023-08-09 22:25:45 -07:00
Enrico Ros 19229b1bbc Fix oblong buttons 2023-08-09 22:04:57 -07:00
Enrico Ros 65bbca20ff Cleanup ChatGPT import - fixes #150 2023-08-09 21:50:30 -07:00
Enrico Ros 852fba0d35 Import ChatGPT Shared conversations 2023-08-09 20:31:04 -07:00
Enrico Ros e58643908d Improve Import/Export, moving code out of the AppChat 2023-08-09 18:08:32 -07:00
Enrico Ros 38fe0b84ff Rename 2023-08-09 08:12:39 -07:00
Enrico Ros 29194f7003 Rename Chat Application files 2023-08-09 08:09:51 -07:00
Enrico Ros 78fe408dcd PasteGG -> Sharing 2023-08-09 07:39:29 -07:00
Enrico Ros e024e73286 Shared server-side TRPC-throwing safe fetch functions 2023-08-09 07:22:57 -07:00
Enrico Ros 69dce7af16 Cleanups 2023-08-09 03:25:41 -07:00
Enrico Ros de0bd30eda As GoofyLabs feature 2023-08-09 03:19:38 -07:00
Enrico Ros 86bdcea181 Improve YTPersonaCreator 2023-08-09 03:12:05 -07:00
Enrico Ros faa6d12430 Add YouTube persona creation (part), incl. LLM Chain Hook
The features can fetch and analyze the captions/transcripts of YouTube videos.
This analysis is then used to create detailed character sheets which will be used
to generate the system prompt of a persona.
2023-08-09 02:02:45 -07:00
Enrico Ros 67636c6b92 Improve spacing for the Models menu 2023-08-09 02:01:06 -07:00
Enrico Ros 19788fcf05 Update Theme & Manifest 2023-08-09 01:49:52 -07:00
Enrico Ros e019344d09 UI Fix 2023-08-08 22:22:08 -07:00
Enrico Ros bce6669874 Black Switcher bar 2023-08-08 22:03:59 -07:00
Enrico Ros bde43b751a Blue Switcher bar 2023-08-08 22:02:06 -07:00
Enrico Ros 10ce70f444 Cleanup 2023-08-08 21:36:57 -07:00
Enrico Ros f52b514ea7 Dumb rename 2023-08-08 21:31:21 -07:00
Enrico Ros 87e9cca57b Merge branch 'feature-joy5' 2023-08-08 21:09:38 -07:00
Enrico Ros 2ce1492394 OpenRouter models - initial support 2023-08-08 21:09:05 -07:00
Enrico Ros 0a5d4f670c App Item Switcher 2023-08-08 20:40:49 -07:00
Enrico Ros 732a66d740 Fix the 'Arial' bug on Tabs, and restore the former Tabs look
See https://github.com/mui/material-ui/issues/38309 for tracking the
font issue.
2023-08-08 19:45:15 -07:00
Enrico Ros 8eae1f229f Largely improve pluggable layout 2023-08-08 19:07:56 -07:00
Enrico Ros 8526380448 Unalpha 2023-08-08 18:03:02 -07:00
Enrico Ros e80af8fc6c Unrole this 2023-08-08 18:00:14 -07:00
Enrico Ros dc58062e37 Improve Model Configurations styles 2023-08-08 18:00:06 -07:00
Enrico Ros 3d9ed088cd Improve code blocks 2023-08-08 08:24:59 -07:00
Enrico Ros 5900d2bd10 Roll packages 2023-08-08 08:15:06 -07:00
Enrico Ros 0cb09ffc44 Mic Fixes 2023-08-08 08:14:46 -07:00
Enrico Ros a13ddf490a Improve Select(s) and visibility of the conversation items 2023-08-08 07:54:17 -07:00
Enrico Ros 5cc02d261a Fix Menus post Joy Beta 2023-08-08 07:25:21 -07:00
Enrico Ros a4c83d3a00 Show fetch error descriptions 2023-08-06 22:26:02 -07:00
Enrico Ros c171c8494b Port to Joy 5-beta 2023-08-05 18:07:53 -07:00
Enrico Ros a6777d7c88 Improve CSS of the Code blocks 2023-08-05 18:05:38 -07:00
Enrico Ros 2e4316d71a Update Oobabooga instructions 2023-08-05 17:51:50 -07:00
Enrico Ros 36a215ce73 Explain any JSON decode issue - fixes #141 2023-08-05 17:40:57 -07:00
Enrico Ros 8e2d8b16c0 Explain 403 2023-08-05 17:27:15 -07:00
Enrico Ros c94a7efc8c Add Code 2023-08-05 17:23:34 -07:00
Enrico Ros d94c184b68 Add error checkers on the GET methods for OpenAI 2023-08-05 17:21:30 -07:00
Enrico Ros f948664e01 Fix build - will revert later 2023-08-05 16:56:21 -07:00
Enrico Ros be5b650cdd Improve scrolling of Code blocks 2023-08-04 19:56:24 -07:00
Enrico Ros e83c5ce5e3 Prism CSS local import 2023-08-04 19:33:02 -07:00
Enrico Ros 0a1e712e1b Roll back Zustand - see https://github.com/pmndrs/zustand/discussions/1937 2023-08-04 16:11:58 -07:00
Enrico Ros fea23844d3 Update react-query 2023-08-04 16:10:44 -07:00
Enrico Ros e6ee2d4f44 Update PDFJS 2023-08-04 15:57:13 -07:00
Enrico Ros abd3acca76 Update tRPC (mostly) 2023-08-04 15:55:42 -07:00
Enrico Ros 7e076cd3c1 Merge pull request #148 from borgmon/patch-1
Update stream content-type
2023-08-04 12:33:29 -07:00
Enrico Ros bf633e17ca Cleanups 2023-08-02 01:32:29 -07:00
Enrico Ros fcecdd3d3f Cleanups 2023-08-02 01:32:01 -07:00
Kevin 783f5499b5 Update stream content-type 2023-08-01 17:50:48 -07:00
Enrico Ros 5fea852e1f Support multiple /imagine generations (.. xN, .. [N]) 2023-08-01 07:10:44 -07:00
Enrico Ros 47d5f47900 Small fix 2023-07-29 18:47:19 -07:00
Enrico Ros 39b26340c5 Merge pull request #145 from enricoros/feature-cam-ocr
Camera OCR - real-world text understanding
2023-07-29 18:32:02 -07:00
Enrico Ros b381700d94 Camera OCR drop 2023-07-29 18:31:03 -07:00
Enrico Ros 3551c66995 Disable Cam on Desktop 2023-07-29 18:29:46 -07:00
Enrico Ros cdd78f2477 Cleanup Camera support 2023-07-29 18:20:38 -07:00
Enrico Ros c2fd2ca716 Extract Camera Hook 2023-07-28 08:13:21 -07:00
Enrico Ros d71437b9f6 Dynamic Tesseract import 2023-07-28 07:51:40 -07:00
Enrico Ros 75ececb483 Roll packages 2023-07-28 07:41:36 -07:00
Enrico Ros c832a7bf11 Camera with OCR support 2023-07-28 14:28:36 -07:00
Enrico Ros bad8e684ee Remove badges from 'advanced' icons 2023-07-26 23:17:25 -07:00
Enrico Ros 4a2b9a3e40 Add personas sorting (but don't enable it yet) 2023-07-26 23:05:05 -07:00
Enrico Ros 0dc9a80c7a Improve chats scrolling 2023-07-26 06:01:25 -07:00
Enrico Ros 4cde3fe399 Remove Badge on App Menu 2023-07-26 05:51:58 -07:00
Enrico Ros f580044503 Improve menus 2023-07-25 07:43:49 -07:00
Enrico Ros 8ec54c5d16 Roll packages 2023-07-25 07:43:30 -07:00
Enrico Ros fc5cbc2e1f Prodia: support Aspect Ratios 2023-07-25 01:24:02 -07:00
Enrico Ros 6bdb666f4f Fix 2023-07-25 01:22:36 -07:00
Enrico Ros 33886f988f Dynamic Prodia models list, and error reporting 2023-07-25 01:02:40 -07:00
Enrico Ros 77979e14cd Support for Prodia upscale 2023-07-25 00:17:35 -07:00
Enrico Ros a942d5ff6f Merge pull request #144
Update prodia.models.ts
2023-07-24 23:59:43 -07:00
Ross Cohen f645e5641e Update prodia.models.ts 2023-07-25 02:42:14 -04:00
Enrico Ros 7c568c5ce6 Four model providers all working well 2023-07-24 21:49:10 -07:00
Enrico Ros 502706792e Nothing 2023-07-24 21:49:00 -07:00
Enrico Ros bed02cc1ca Improve OpenAI auto-hide heuristics 2023-07-24 21:16:10 -07:00
Enrico Ros d2aa7e5710 Default to showing a single vendor 2023-07-24 21:06:57 -07:00
Enrico Ros 7079eb6b72 Dropdowns: group models by vendor 2023-07-24 20:37:07 -07:00
Enrico Ros 366538f6c3 Optionally seed Model Source IPs 2023-07-24 20:14:18 -07:00
Enrico Ros 2a50347fc3 Fold this 2023-07-24 19:56:51 -07:00
Enrico Ros 546938d152 Move token-counter.ts 2023-07-24 19:44:52 -07:00
Enrico Ros 9a21cd4c8f Move the SettingsModal 2023-07-24 19:27:08 -07:00
Enrico Ros 95a5d1cef3 Half-convert the ModelsModal to an APP (overlay) 2023-07-24 19:26:32 -07:00
Enrico Ros cd2a7f43cb Update README.md 2023-07-23 23:43:11 -07:00
Enrico Ros c40b2e3c8a Server-side Anthropic key 2023-07-23 23:40:30 -07:00
Enrico Ros 1287330241 1.3.5 2023-07-23 03:28:57 -07:00
Enrico Ros 9666e58399 Anthropic: full support (stream and immediate calls) 2023-07-23 03:19:59 -07:00
Enrico Ros ac29d925a3 Roll packages 2023-07-22 18:20:06 -07:00
Enrico Ros 47d812ae12 Spelllling 2023-07-22 18:06:38 -07:00
Enrico Ros 346e4fba38 Cleanups 2023-07-22 18:02:48 -07:00
Enrico Ros 89184978b2 .openAI -> .llmOpenAI 2023-07-22 17:48:19 -07:00
Enrico Ros 1f67008765 Single instance LocalAI, to avoid confusions 2023-07-21 02:13:06 -07:00
Enrico Ros 09d58e26ed LocalAI support - works in Stream and Function mode 2023-07-21 02:12:18 -07:00
Enrico Ros 8c3d95cef8 Shrink msg 2023-07-21 00:59:43 -07:00
Enrico Ros 70ba39ce68 1.3.1 2023-07-21 00:32:34 -07:00
Enrico Ros ed80d8e468 Smarts: Flatten conversations 2023-07-21 00:14:35 -07:00
Enrico Ros dbd1d5a2a8 Resolve some lints 2023-07-21 00:00:36 -07:00
Enrico Ros bfdcb740cd Roll deps 2023-07-20 23:56:30 -07:00
Enrico Ros 0eecb410da Sticky Cleanup bar 2023-07-20 07:48:13 -07:00
Enrico Ros fa5c68c24d Update token count on edited messages 2023-07-20 07:28:35 -07:00
Enrico Ros a09c3fd728 Do not auto-timeout the microphone in the absence of interim results 2023-07-20 07:15:23 -07:00
Enrico Ros e803721e4b Merge branch 'JustMrPhoenix-feat-system-message' 2023-07-17 21:05:20 -07:00
Enrico Ros 3d715cc18d Complete merge. use /s to add system messages, and /a to add assistant's 2023-07-17 21:04:29 -07:00
Enrico Ros 1eb355a221 Merge branch 'feat-system-message' of https://github.com/JustMrPhoenix/big-agi into JustMrPhoenix-feat-system-message 2023-07-17 20:27:05 -07:00
Enrico Ros ec84b3ac91 Merge branch 'PtrckAraujo-patch-3' 2023-07-17 19:30:09 -07:00
Enrico Ros f8c34847ae Complete merge 2023-07-17 19:29:45 -07:00
Enrico Ros 366453d9f7 Smarts: Fork conversation 2023-07-16 13:01:11 -07:00
Enrico Ros 31b621ab3b Roll packages 2023-07-10 22:50:05 -07:00
Enrico Ros fcdc3266af Improve showing errors 2023-07-10 21:58:03 -07:00
Enrico Ros 6f251269cf Success. 2023-07-10 09:32:23 -07:00
Enrico Ros fb0b1d0549 Fix warn about missing OpenAI API key on local deployments 2023-07-10 09:18:56 -07:00
Enrico Ros c3dcc74dc2 Improve looks 2023-07-09 00:16:29 -07:00
Enrico Ros acfbb10907 Improve the Chat Mode menu 2023-07-09 00:14:44 -07:00
Enrico Ros ce580611bd Add a 'Write-only' mode, where the user can keep stacking messages without an Assistant reply 2023-07-09 00:05:58 -07:00
Enrico Ros c01b1dabb5 Improve short model naming 2023-07-08 23:32:35 -07:00
Enrico Ros 1c7f70022a Cleanups 2023-07-08 23:24:07 -07:00
Enrico Ros 7e658b5efd Hide another virtual model 2023-07-08 23:11:23 -07:00
Enrico Ros eee1a9a506 Update instructions 2023-07-08 23:10:27 -07:00
Enrico Ros da647c0e7d Initial Support for Oobabooga/text-generation-webui 2023-07-08 23:03:47 -07:00
Enrico Ros 9f0d6bb17e Rename and update 2023-07-08 23:02:18 -07:00
Enrico Ros 2d59095ab1 Improve Vendors list looks 2023-07-08 23:02:06 -07:00
Enrico Ros 3a0e4226e9 Improve OpenAI vendor 2023-07-08 22:52:27 -07:00
Enrico Ros 9c4f337267 Wrap assistant model name 2023-07-08 22:51:04 -07:00
Enrico Ros 844dbd8c2f Improve Local models List functionality 2023-07-07 23:20:15 -07:00
justmrphoenix b483f3f322 feature: A command to send messages as a system 2023-07-08 07:13:39 +04:00
PtrckAraujo e00956a1a7 Update Local LLM Integration.md
Tried to make step 9 from the setup guide a little more understandable for people that may not have many network knowledge.
2023-07-06 18:17:56 +02:00
Enrico Ros 8b3201b74c Broder HTML block support 2023-07-06 08:51:24 -07:00
Enrico Ros dfa3d42162 Merge pull request #138 from PtrckAraujo/patch-1
Create Local LLM Integration.md
2023-07-06 08:33:48 -07:00
PtrckAraujo 29722b6b32 Create Local LLM Integration.md 2023-07-06 14:37:32 +02:00
Enrico Ros 9af365d116 Chat -> AppChat 2023-07-06 00:56:03 -07:00
Enrico Ros ee70bea6ae Move functions 2023-07-06 00:53:26 -07:00
Enrico Ros a08b13ce3b Roll PDFJS and the worker 2023-07-05 23:30:45 -07:00
Enrico Ros bc871bd2ac Roll packages 2023-07-05 23:29:19 -07:00
Enrico Ros 367dc9c662 More defensive coding around the flakey OpenAI API 2023-07-05 01:35:34 -07:00
Enrico Ros 1f3dca1ff7 Do not waste tokens on 'STOP' (roll Next) - Fixes #22
Thanks to the Vercel team (@jridgewell), an interruption of the stream on the client
side will lead to the cancellation of the TransformStream on the servers side, which
in turns cancels the open fetch() to the upstream. This was a long needed change and
we are happy to report it works well.

Related: #114
 - https://github.com/vercel-labs/ai/issues/90
 - https://github.com/vercel/edge-runtime/pull/428
 - https://github.com/trpc/trpc/issues/4586 (enormous thanks to the tRPC team to issue
   a quick release as well)
2023-07-05 00:59:49 -07:00
Enrico Ros 25aea07acd Correctly configure the tRPC query client 2023-06-30 02:30:16 -07:00
Enrico Ros 77306840a8 News to App 2023-06-30 00:05:51 -07:00
Enrico Ros 79a61e1f2e Roll Packages 2023-06-29 23:06:11 -07:00
Enrico Ros 263f33eb65 Merge branch 'DeFiFoFum-main' 2023-06-29 18:26:35 -07:00
Enrico Ros dab170b317 Merge branch 'main' of https://github.com/DeFiFoFum/big-agi into DeFiFoFum-main 2023-06-29 18:26:25 -07:00
Enrico Ros e47a0b7fcd Roll News 2023-06-29 17:46:54 -07:00
Enrico Ros aeb4538bff Goofy labs 2023-06-29 17:43:03 -07:00
defifofum e5b9c259d9 chore: Mark todo 2023-06-29 16:32:38 -05:00
defifofum bdfed023c7 Double Click to Edit: Add settings button to toggle setting 2023-06-29 16:32:16 -05:00
Enrico Ros 1c5ea78b44 Cleanup multi-page support 2023-06-29 00:59:01 -07:00
Enrico Ros d7f689c0d5 Improve Chat Button - (@HarlanLewis) 2023-06-29 00:35:29 -07:00
Enrico Ros 00a341ab4b Bits 2023-06-29 00:06:05 -07:00
Enrico Ros fca848d82f Small update 2023-06-29 00:02:06 -07:00
Enrico Ros 43fbf90c51 Shrink the Dark Mode icon to a smaller button - saves menu space 2023-06-28 23:58:03 -07:00
Enrico Ros 2e1b6ae346 Roll tRPC and TS 2023-06-28 22:49:51 -07:00
Enrico Ros 490f8bdac3 Move file 2023-06-28 20:24:53 -07:00
Enrico Ros 675474127c Rework stream-chat into a backpressure-driven TransformStream - allows for real cancellations
This implementation has been largely inspired by the Vercel AI (stream) SDK,
available at https://github.com/vercel-labs/ai/, and in particular by the work
of @jridgewell on https://github.com/vercel-labs/ai/issues/90 and related
issues.

As soon as some pending changes land in edge-runtime and nextjs, we'll have
full stream cancellation and tokens saving #57
2023-06-28 20:18:19 -07:00
Enrico Ros 503e3f8aa6 Minor rename 2023-06-28 20:06:24 -07:00
Enrico Ros e56bfcb600 Edge runtimes 2023-06-28 19:51:58 -07:00
Enrico Ros 47553cb1e8 Update prettier configuration 2023-06-28 17:30:20 -07:00
Enrico Ros 2d4c0e9c64 CallChatWithFunctions - functions support, incl. OpenAI Implementation
May be rough on the edges, but should not create issues.
The implementation is defensive, excessively validates the
return types as the OpenAI API is brittle and can easily misbehave
2023-06-28 03:00:25 -07:00
Enrico Ros 87d9309a8e Mention Google 2023-06-28 01:03:27 -07:00
Enrico Ros f35545a1b1 Bits 2023-06-28 00:40:23 -07:00
Enrico Ros 9e7a7b0d9b Cleaner 2023-06-28 00:38:51 -07:00
Enrico Ros 2931be7493 Chat+ button 2023-06-28 00:37:27 -07:00
Enrico Ros dcaf30161a CallChat -> CallChatGenerate 2023-06-28 00:36:57 -07:00
Enrico Ros cb21970040 Added 'FuncLLM' to store and configurator 2023-06-27 23:56:50 -07:00
Enrico Ros 4bc97c18dd Renamed 'SendMode' to 'ChatMode' to better reflect its functionality 2023-06-27 23:40:28 -07:00
Enrico Ros e86269cf53 Update Menu 2023-06-27 16:37:10 -07:00
Enrico Ros ef94c709e3 Some Meta Improvements 2023-06-27 00:35:45 -07:00
Enrico Ros e092790ea0 Disable the 'New chat' button when full (force manual deletion, rather than overwriting) 2023-06-26 23:38:00 -07:00
Enrico Ros 5364bbe6a6 Roll packages 2023-06-26 23:00:20 -07:00
Enrico Ros 7bce4dd234 Update Home, how that we have one 2023-06-27 04:06:01 -07:00
Enrico Ros 02b4f444db Roll Mui 2023-06-24 17:33:51 -07:00
Enrico Ros 2ebd629e4f Fix interims on Mobile 2023-06-23 13:16:25 -07:00
Enrico Ros 2f61a4bb61 Significantly improve the microphone: multi-sentence, 3s soft timeout, show partials
See #131
2023-06-23 07:57:36 -07:00
Enrico Ros b87acc5954 Update Mobile gaps 2023-06-23 07:31:32 -07:00
Enrico Ros 1a4628455a Cleanup the App State Counter 2023-06-23 00:45:00 -07:00
Enrico Ros e928186669 Closeable Menus, again 2023-06-23 00:11:40 -07:00
Enrico Ros 146391f142 Lints 2023-06-22 23:14:59 -07:00
Enrico Ros 5d265364e3 Roll packages 2023-06-22 21:53:26 -07:00
Enrico Ros e3ea589b13 Close Sent Messages menu when pasting a message 2023-06-22 18:06:42 -07:00
Enrico Ros 26cf66be20 Improve Sent messages menu 2023-06-22 17:57:20 -07:00
Enrico Ros 84b0e03551 Share target: receive text/urls when sharing from other Apps on Mobile (webapp needs to be installed) 2023-06-22 17:30:51 -07:00
Enrico Ros 66882b527a Composer: allow queuing up startup text 2023-06-21 22:10:42 -07:00
Enrico Ros 30519e4405 Improve pasting behavior 2023-06-21 21:46:49 -07:00
Enrico Ros 6942b7a226 Don't chop the top Selects (centerItems) on Mobile 2023-06-21 19:38:33 -07:00
Enrico Ros 9c19a3da25 PWA fullscreen by default 2023-06-19 17:58:09 -07:00
Enrico Ros 7136dd2a8a Roll Packages 2023-06-19 11:23:53 -07:00
Enrico Ros eaca40c238 Suspense Fix for React 18 2023-06-19 11:10:09 -07:00
Enrico Ros 88011d4705 Dynamically Import 'react-markdown' and 'remark-gfm' 2023-06-19 11:02:58 -07:00
Enrico Ros 049976aa81 Dynamically Import PlantUML 2023-06-19 10:46:41 -07:00
Enrico Ros 52bcf0eff8 Optional NextJS Bundle Analyzer 2023-06-19 10:40:38 -07:00
Enrico Ros 100c949d40 Default Fast LLM to gpt-3.5-turbo-0613 (faster as of now) 2023-06-19 10:30:46 -07:00
Enrico Ros aeef988e7c Selection of Fast/Chat Models 2023-06-19 10:27:38 -07:00
Enrico Ros 9fcfffb1c6 Further improve Error reporting 2023-06-19 09:21:43 -07:00
Enrico Ros a1e0c015bb Add App State, for Tutorial/Walkthroughs 2023-06-19 08:54:47 -07:00
Enrico Ros e0b0f8f764 Improve error reporting 2023-06-19 08:48:09 -07:00
Enrico Ros 4916ffd2d0 Support OpenAI upstream errors in the streaming API 2023-06-19 08:44:22 -07:00
Enrico Ros a47422f975 Improve OpenAI Error Handling 2023-06-19 08:05:36 -07:00
Enrico Ros a97cfb87cc Buildfix 2023-06-18 10:47:35 -07:00
Enrico Ros 95e478d1d5 Merge branch 'danbalarin-feat/moderation-api-call' 2023-06-18 10:38:06 -07:00
Enrico Ros 838751e93a Moved Moderation to tRPC
Also default to off, and slightly change the text.
2023-06-18 10:37:14 -07:00
Enrico Ros e82f5816b0 Merge branch 'feat/moderation-api-call' of https://github.com/danbalarin/big-agi into danbalarin-feat/moderation-api-call 2023-06-16 11:30:12 -07:00
Enrico Ros 3c9e80b43c In the future we'll need Model flags 2023-06-16 11:29:28 -07:00
Dan Balarin b6d6c0d136 feat: add moderation API endpoint, add settings field, add moderation call on chat message 2023-06-16 02:05:53 +02:00
Enrico Ros 16458af245 Remove older Todo 2023-06-14 20:02:54 -07:00
Enrico Ros a8478b12f4 Convert to mutation, for size 2023-06-14 19:57:28 -07:00
Enrico Ros d9073dd6e6 complete the move 2023-06-14 19:51:30 -07:00
Enrico Ros 1f4e6dfd34 Move Publish to tRPC 2023-06-14 19:48:59 -07:00
Enrico Ros e8cc60dc62 Layout change 2023-06-14 07:57:08 -07:00
Enrico Ros 6406c8577b Settle, pre-functions 2023-06-13 22:02:36 -07:00
Enrico Ros 4b170a09dc OpenAI: Improve namespacing, ahead of functions support 2023-06-13 21:44:01 -07:00
Enrico Ros b00bc2e1e2 LLM Vendors rename 2023-06-13 19:59:29 -07:00
Enrico Ros 87f6bfd52e Crashfix 2023-06-13 19:42:11 -07:00
Enrico Ros 51d732f5bc Remove debug 2023-06-13 19:34:47 -07:00
Enrico Ros 8d545b8cf7 Misc 2023-06-13 19:32:24 -07:00
Enrico Ros 54e689c054 Default to the latest models 2023-06-13 19:22:21 -07:00
Enrico Ros 03a26300a9 LLM Details on demand 2023-06-13 19:10:59 -07:00
Enrico Ros 983b9e09a6 Improve LLM Options 2023-06-13 19:04:45 -07:00
Enrico Ros f9ebd6473f Disable DLLM.tags (unused for now) 2023-06-13 19:04:34 -07:00
Enrico Ros 4b3279f062 Improve naming 2023-06-13 18:54:42 -07:00
Enrico Ros e90b318b74 Autosizing 2023-06-13 18:35:26 -07:00
Enrico Ros 2c7ee6676f MoreMoji 2023-06-13 18:12:38 -07:00
Enrico Ros ff690493c9 Reword 2023-06-13 18:10:17 -07:00
Enrico Ros f5b526fc4b Update Readme 2023-06-13 18:09:42 -07:00
Enrico Ros 05b05bed67 Wider models 2023-06-13 17:28:54 -07:00
Enrico Ros b2faa48f52 Lints 2023-06-13 16:24:02 -07:00
Enrico Ros 8d6e7a8cfd Support today's OpenAI models 2023-06-13 10:44:33 -07:00
Enrico Ros ec983adb0b big-AGI 1.2 2023-06-13 09:57:56 -07:00
Enrico Ros 4c437e6204 Merge pull request #125 from enricoros/next
Merge the 'next' branch
2023-06-13 09:22:23 -07:00
Enrico Ros 1eb99b1936 Cleanup menus 2023-06-13 09:09:58 -07:00
Enrico Ros 0a10a983d4 Improve placeholder 2023-06-13 08:43:04 -07:00
Enrico Ros 6879976163 Re-add placements, for wide layouts 2023-06-13 08:39:46 -07:00
Enrico Ros 3066a4fff5 Tone it down 2023-06-13 08:34:35 -07:00
Enrico Ros bcec8babb4 Proper key missing indicator 2023-06-13 08:33:54 -07:00
Enrico Ros 5ccee6c6be Remove LocalAI for now 2023-06-13 08:23:33 -07:00
Enrico Ros 7b72925a1f Improve Models Select: use ListItems instead of options 2023-06-13 08:10:16 -07:00
Enrico Ros 993dbb0e12 Improve Icon 2023-06-13 07:31:05 -07:00
Enrico Ros 56c030994a Update Icon colors and menu sizing 2023-06-13 07:24:13 -07:00
Enrico Ros 9b91735684 Cleanup packages 2023-06-13 07:04:49 -07:00
Enrico Ros da50c44276 Roll Joy 2023-06-13 07:02:14 -07:00
Enrico Ros b77bae8ff2 Roll PDFJS 2023-06-13 07:01:06 -07:00
Enrico Ros 6a67c9485d Roll Next/Trpc 2023-06-13 06:58:58 -07:00
Enrico Ros 19850c3531 Roll Typescript 2023-06-13 06:56:38 -07:00
Enrico Ros 34363f81e9 Extract Prodia models to a separate file 2023-06-13 06:47:51 -07:00
Enrico Ros 6d0ef76a66 Merge branch 'main' into next 2023-06-13 06:39:19 -07:00
Enrico Ros f66c88446a Merge pull request #121 from rossman22590/patch-1
Update models.ts
2023-05-31 14:47:13 -07:00
Ross Cohen 3df16c36d3 Update models.ts 2023-05-29 18:40:49 -04:00
Enrico Ros f9069d46c3 O_O 2023-05-24 00:06:15 -07:00
Enrico Ros 8ad3a034a6 Unlock pkgr - 2.4.1 doesn't seem dangerous 2023-05-24 00:02:21 -07:00
Enrico Ros bd7393978a Tool Settings update 2023-05-23 23:50:50 -07:00
Enrico Ros 7d66b55253 Comments 2023-05-23 23:50:35 -07:00
Enrico Ros 6c9d0c88d6 Settings: Improve 2023-05-22 02:44:18 -07:00
Enrico Ros b6871accac Settings: Migrate to Tabs 2023-05-22 02:11:19 -07:00
Enrico Ros 7c74821ffb Vendors: Icons as Components 2023-05-22 00:20:40 -07:00
Enrico Ros 84f39ff871 Vendors: Use Components instead of instances 2023-05-22 00:08:35 -07:00
Enrico Ros 160937e674 Merge branch 'main' into next 2023-05-21 23:12:57 -07:00
Enrico Ros 1f4dea6f3c More descriptive 2023-05-21 23:12:00 -07:00
Enrico Ros da93630544 tRPC: PARTIAL port of ElevenLabs 2023-05-21 23:11:52 -07:00
Enrico Ros df3d77dcee Don't allow editing while typing 2023-05-21 22:37:46 -07:00
Enrico Ros 7ae45c3e2b Merge pull request #116 from typpo/main
Prevent menus from hiding on small window height
2023-05-21 22:15:54 -07:00
Ian Webster fd0e205ec1 Prevent menus from hiding on small window height 2023-05-21 08:52:37 -07:00
Enrico Ros 9cf981d6d4 Prodia: types to router 2023-05-20 02:23:08 -07:00
Enrico Ros 65a55af804 Prodia -> tRPC 2023-05-20 02:16:52 -07:00
Enrico Ros ef296e6336 Google Search -> tRPC 2023-05-20 01:27:18 -07:00
Enrico Ros 2c9d509311 Convert to .75 syntax 2023-05-20 00:37:05 -07:00
Enrico Ros e9bf19e9da Revert to Joy alpha.75, as the bugs on Select make it very broken for users: https://github.com/mui/material-ui/issues/37235 2023-05-20 00:30:45 -07:00
Enrico Ros 017fc5947d Disable delete, even on dev 2023-05-20 00:12:05 -07:00
Enrico Ros 88d396666c Single OpenAI source instance 2023-05-19 23:43:18 -07:00
Enrico Ros 84b5a8105b Small color fixes 2023-05-19 23:39:37 -07:00
Enrico Ros bf9c0610a3 AutoSpeak: off (instead of firstline) 2023-05-19 22:46:05 -07:00
Enrico Ros a4d9312225 Nothing 2023-05-19 22:42:15 -07:00
Enrico Ros d94a33179c Nothing 2023-05-19 22:39:06 -07:00
Enrico Ros 0586243d88 Roll MUI again, although the insidious https://github.com/mui/material-ui/issues/37235 hasn't been fixes 2023-05-19 22:30:04 -07:00
Enrico Ros 8349704ec6 Remove the OpenAI module 2023-05-19 22:28:02 -07:00
Enrico Ros 9c51f71d7d Move the Streaming function to the new model 2023-05-19 22:25:11 -07:00
Enrico Ros ddd5ecf8ad Roll packages 2023-05-19 21:20:42 -07:00
Enrico Ros 77db6242d3 Disable 'Chat' if no model is selected 2023-05-18 22:29:51 -07:00
Enrico Ros b0b9fa21c5 Remove Settings! 2023-05-18 21:11:02 -07:00
Enrico Ros cb66af6438 Rename to GoogleSearchStore 2023-05-18 20:27:41 -07:00
Enrico Ros 4a4e516f37 Renames 2023-05-18 19:59:35 -07:00
Enrico Ros acbb4dac04 Rename to UIPreferencesStore 2023-05-18 19:57:04 -07:00
Enrico Ros 940f002d43 Rename to UIStateStore 2023-05-18 19:35:43 -07:00
Enrico Ros 8d5dcaac11 Extract ElevenLabs module config 2023-05-18 19:29:33 -07:00
Enrico Ros 73765990d2 Extract Prodia module config 2023-05-18 19:18:47 -07:00
Enrico Ros 62836d9468 Renames 2023-05-18 19:03:24 -07:00
Enrico Ros ba2f4115ce Cleanup SendMode 2023-05-18 19:00:19 -07:00
Enrico Ros 306a4aa2b2 Rename the LLMs store 2023-05-18 18:32:06 -07:00
Enrico Ros 4a0955b300 Rename the AppBar store 2023-05-18 18:30:58 -07:00
Enrico Ros faaf1e9848 Fix option (not option) 2023-05-18 18:23:49 -07:00
Enrico Ros be850c7d6e Roll packages 2023-05-18 18:20:35 -07:00
Enrico Ros 65b5251738 Merge pull request #115 from ixonos/pdf-feature
fix a bug pdf is not working in docker
2023-05-19 09:01:39 -07:00
Akafreakingbox 70620412c9 fix a bug pdf is not working in docker 2023-05-19 14:06:45 +03:00
Enrico Ros 6ed226c489 Begin virtualizing chats 2023-05-18 03:36:40 -07:00
Enrico Ros 4e2b8964bf Move vendor registry 2023-05-18 03:07:28 -07:00
Enrico Ros 49871db568 Lint 2023-05-18 03:07:00 -07:00
Enrico Ros f4e3bcf55c Fixme-- 2023-05-18 02:56:34 -07:00
Enrico Ros b58eecdbec Move Content reducer 2023-05-18 02:50:11 -07:00
Enrico Ros 2f736d097f Relocate some files 2023-05-18 02:47:16 -07:00
Enrico Ros c9ff703592 Cleanup 2023-05-18 02:39:03 -07:00
Enrico Ros 40220a25de Default to Enter to Send 2023-05-18 02:26:41 -07:00
Enrico Ros 0e4010c1ee Remove 2 newlines 2023-05-18 02:21:59 -07:00
Enrico Ros dbbc2ead60 ContentReducer: port 2023-05-18 02:16:26 -07:00
Enrico Ros 56e0bbeeca Removed per-conversation model support 2023-05-18 02:08:19 -07:00
Enrico Ros 98c25d7b78 support chatLLM in Chat 2023-05-18 01:24:35 -07:00
Enrico Ros c309aa21d6 useChatLLM on Composer and Message list 2023-05-18 01:15:01 -07:00
Enrico Ros 8e6bf2b7a7 Composer ported to the global model 2023-05-18 00:45:37 -07:00
Enrico Ros c0474b72cf Dropdown & Chips to show the 2 active models 2023-05-18 00:36:49 -07:00
Enrico Ros 26bc0e98d7 Auto-select Chat/Fast LLM IDs 2023-05-18 00:16:44 -07:00
Enrico Ros daa17dc72c "Fix" build 2023-05-17 23:07:40 -07:00
Enrico Ros 2a6a7da245 Improve appearance 2023-05-17 22:54:12 -07:00
Enrico Ros 399e7ad6c4 Migrate ChatModelId to LLMId (needs bugfixes) 2023-05-17 22:53:47 -07:00
Enrico Ros 64b82059f2 Nothing 2023-05-16 03:25:51 -07:00
Enrico Ros 4995e9b7fc Small fixes 2023-05-15 19:23:49 -07:00
Enrico Ros 8612138861 Refactor-ception continues 2023-05-15 18:56:51 -07:00
Enrico Ros 46dce05e10 Minutia 2023-05-15 03:46:00 -07:00
Enrico Ros 3cfffa9440 Seems to be working 2023-05-15 03:38:48 -07:00
Enrico Ros 690bd6f5c1 Still in a refactoring inception 2023-05-15 03:16:38 -07:00
Enrico Ros 64c5ac444d Buildfix 2023-05-15 03:14:43 -07:00
Enrico Ros da833bbb8a Begin LLM configuration - with a common and per-source part 2023-05-15 02:26:44 -07:00
Enrico Ros 7d785ed3dd Improve OpenAI models metadata heuristics 2023-05-15 02:24:46 -07:00
Enrico Ros 674354fc79 Show that there's a server-side key 2023-05-15 00:43:18 -07:00
Enrico Ros 3b0ca67441 trpc: migrate to Edge functions(!) - improve query/async API distinction 2023-05-15 00:42:04 -07:00
Enrico Ros 706f5feb9c trpc: use for openai/chat (non-streaming) 2023-05-14 03:58:38 -07:00
Enrico Ros 5628fc3951 Summarize: prevent endless looping 2023-05-14 03:53:16 -07:00
Enrico Ros 5f8a8609f6 Show LocalAI models 2023-05-14 01:59:01 -07:00
Enrico Ros 3c486f659b Merge remote-tracking branch 'origin/main' into feaure-model-sources 2023-05-13 23:31:45 -07:00
Enrico Ros ab91bc64a8 Support disabling 'enter to send' when editing messages as well 2023-05-13 23:31:33 -07:00
Enrico Ros 623d78c092 Merge branch 'main' into feaure-model-sources 2023-05-13 23:17:58 -07:00
Enrico Ros 28b1ac7328 Fix casing 2023-05-13 23:17:33 -07:00
Enrico Ros ba1d027f24 Merge remote-tracking branch 'origin/main' into feaure-model-sources 2023-05-13 23:09:48 -07:00
Enrico Ros 7174029227 Update more enter Hints 2023-05-13 23:09:23 -07:00
Enrico Ros e7e7464a14 Hint the correct Enter behavior on mobile 2023-05-13 23:04:24 -07:00
Enrico Ros 4f502b6b8f prefix drag/drop files 2023-05-13 22:52:09 -07:00
Enrico Ros abbdddf329 The refactoring inception continues 2023-05-13 22:12:40 -07:00
Enrico Ros a54de2626d Hint at server-side config 2023-05-13 18:55:09 -07:00
Enrico Ros f52df1c1b0 Del all sources in dev. 2023-05-13 18:44:56 -07:00
Enrico Ros 22e22440c9 Accessible Sources Configuration 2023-05-13 18:41:48 -07:00
Enrico Ros 7f869d2175 Merge pull request #113 from smileynet/docker-compose
add docker-compose for pre-built image
2023-05-13 18:14:31 -07:00
Enrico Ros 053e485c2d Cleanups 2023-05-13 17:51:46 -07:00
Enrico Ros 2ef75ec94b Move class 2023-05-13 17:29:45 -07:00
Enrico Ros 941168e14a _Config -> Setup 2023-05-13 17:26:55 -07:00
Enrico Ros 50f3f0919a Default Source Creation 2023-05-13 13:30:48 -07:00
Enrico Ros b412236ad2 Simplified yet enhanced the UI 2023-05-13 13:11:33 -07:00
Sam Biggins 02cd5db630 build: add docker-compose as pre-built image option, add usage to readme
For .gitignore, change ignored env file to .env (a common use case). .env.local is automatically ignored by git and more related application.
2023-05-13 11:49:40 -07:00
Enrico Ros 9a7069caf7 fix react issue & improve list 2023-05-13 01:28:55 -07:00
Enrico Ros 7a61341d15 rename modules/models to modules/llms 2023-05-13 01:08:10 -07:00
Enrico Ros 0376ab918a trpc: OAI models enumeration is working 2023-05-12 23:56:42 -07:00
Enrico Ros 2a209ffb1a trpc: wiring & example 2023-05-12 20:56:22 -07:00
Enrico Ros 7b67116e2f trpc: packages 2023-05-12 20:37:43 -07:00
Enrico Ros f75896f0e4 Fix Packages 2023-05-12 20:28:17 -07:00
Enrico Ros 1bff4dd0bd Fix Lints 2023-05-12 20:01:04 -07:00
Enrico Ros 54b2d289e2 ~/modules 2023-05-12 19:44:07 -07:00
Enrico Ros 37ae23a553 Improve source configuration UI 2023-05-12 18:33:50 -07:00
Enrico Ros 335457f0d7 Dynamic Configuration of sources 2023-05-12 18:17:35 -07:00
Enrico Ros 511320982d Roll packages 2023-05-12 04:15:35 -07:00
Enrico Ros 98cc620ea4 Cleanup 2023-05-12 04:37:41 -07:00
Enrico Ros 736b59bdc7 Rename to ModelsModal 2023-05-12 04:19:00 -07:00
Enrico Ros f968290d26 Delete Modelconfigurator 2023-05-12 04:16:25 -07:00
Enrico Ros 978fbcd428 ConfigureSources: use Store 2023-05-12 04:12:00 -07:00
Enrico Ros a3fe4bd818 AddVendor: use Store 2023-05-12 03:49:55 -07:00
Enrico Ros 4eacceac4e Model Store, to share and persist state 2023-05-12 03:25:08 -07:00
Enrico Ros 3f592ab28a Test Localsource @smileynet 2023-05-12 03:24:53 -07:00
Enrico Ros bb6a2d66a5 Add Vendors to Sources (ok). Configure Sources (bad) 2023-05-12 03:24:41 -07:00
Enrico Ros 846360f8c1 Basic Model Configuration dialog 2023-05-12 03:24:12 -07:00
Enrico Ros 17cb8451b0 Move OpenAISettings to the source configuration 2023-05-12 03:23:03 -07:00
Enrico Ros 1fd64cbcac Smaller env change 2023-05-12 03:21:02 -07:00
Enrico Ros 3032891038 Align modal button to the bottom 2023-05-12 03:19:53 -07:00
Enrico Ros 4f7dfe54ca Flatten Section when used with an outside gap:. 2023-05-12 03:19:33 -07:00
Enrico Ros d24e9751db Add shared Key Input component 2023-05-12 03:18:57 -07:00
Enrico Ros f8d222ffa4 UI: change the 'enter to send' behavior, to default to longer text 2023-05-11 21:19:36 -07:00
Enrico Ros 74cea9af7f View HTML content - e.g. for MiM or Errors 2023-05-11 03:41:13 -07:00
Enrico Ros e39271b5cf Skel for Model Setup 2023-05-11 03:34:45 -07:00
Enrico Ros 8d222fd4d9 Improve Modal in Settings 2023-05-11 03:32:56 -07:00
Enrico Ros 7aff555b61 Share Col Width 2023-05-11 03:31:57 -07:00
Enrico Ros 50c6ecae66 Adapt to the new location 2023-05-11 03:31:32 -07:00
Enrico Ros 4c8590dcdd Move AppBar files 2023-05-11 03:30:33 -07:00
Enrico Ros 03f9917518 Improve shade 2023-05-11 03:29:00 -07:00
Enrico Ros dc04b73e2d ApplicationBar fully pluggable 2023-05-11 02:44:10 -07:00
Enrico Ros fb926225d9 Improve Message Blocks 2023-05-11 01:44:15 -07:00
Enrico Ros 6f3b670662 Fix Package-Lock due to Joy being stuck to Alpha.75 2023-05-11 01:03:53 -07:00
Enrico Ros 768db4835a BlockAction example 2023-05-11 00:37:04 -07:00
Enrico Ros a6b7f452b2 PlantUML: directly use/modify the SVG 2023-05-11 00:35:52 -07:00
Enrico Ros f4f5224d6b PlantUML rendering 2023-05-11 00:04:59 -07:00
Enrico Ros f4a0d4fbc0 Downgrade Joy UI to alpha.75, as the Select does not repaint updated Options text 2023-05-10 23:44:39 -07:00
Enrico Ros 161b93171f Roll packages 2023-05-10 18:41:53 -07:00
Enrico Ros 5249878e85 Remove debug code 2023-05-10 02:48:22 -07:00
Enrico Ros 1a872646b6 ApplicationBar is now pluggable 2023-05-10 02:27:49 -07:00
Enrico Ros b17a582c57 Extract the SupportItem from The ApplicationBar 2023-05-10 00:05:41 -07:00
Enrico Ros 6df2cf82ad Improve Token Progress Bar 2023-05-09 02:37:57 -07:00
Enrico Ros 3ea839903d Prepare for router 2023-05-09 01:30:57 -07:00
Enrico Ros a132cab3bd Update worker 2023-05-09 00:44:33 -07:00
Enrico Ros 986754b0c4 Update settings for LocalAI 2023-05-09 00:08:47 -07:00
Enrico Ros a6113bc407 Support stream ending for LocalAI 2023-05-09 00:06:02 -07:00
Enrico Ros b3a176db6c Improve compatibility of the streaming mode 2023-05-08 23:55:45 -07:00
Enrico Ros b50d14590f Improve API_HOST support: can use HTTP links & better TS support 2023-05-08 23:33:29 -07:00
Enrico Ros 375bfbce47 Raised Max Conversations to 20 2023-05-08 22:42:27 -07:00
Enrico Ros e7dab34926 Workaround Joy UI Menu (doesn't close) bug for the SentMessagesMenu
https://github.com/mui/material-ui/issues/36821
2023-05-08 22:09:20 -07:00
Enrico Ros 5776e773c0 Workaround Joy UI Menu (doesn't close) bug
https://github.com/mui/material-ui/issues/36821
2023-05-08 21:52:27 -07:00
Enrico Ros 704b27ea6d Roll packages 2023-05-08 21:05:04 -07:00
Enrico Ros 8afc5227b5 Finish Elevenlabs modularization 2023-05-08 20:44:22 -07:00
Enrico Ros 6d64347cae Clarify server-side error messages 2023-05-07 02:15:31 -07:00
Enrico Ros 8663ff52dc Update error message 2023-05-07 01:45:07 -07:00
Enrico Ros 9e6730f968 Predicted token counting for the cleanup mode 2023-05-06 17:45:27 -07:00
Enrico Ros 5e317a2c29 Highlight the cleanup title 2023-05-06 17:29:56 -07:00
Enrico Ros 0209975a48 Highlight the cleanup mode 2023-05-06 17:28:03 -07:00
Enrico Ros 97a693f9eb Clean token math, Tooltip on progress bar 2023-05-06 17:21:52 -07:00
Enrico Ros c90d75ab63 Roll packages 2023-05-06 16:47:43 -07:00
Enrico Ros 645b03ff60 Better usage of Space 2023-05-05 17:03:29 -07:00
Enrico Ros c55d43a726 Merge pull request #108 from PeterDaveHello/patch-1
Fix README.md markdown format & rendering
2023-05-04 12:01:52 -07:00
Peter Dave Hello d72e30eea1 Fix README.md markdown format & rendering
"Code Execution: Sandpack" under "#### March: first release" seems to be broken.
2023-05-04 19:45:58 +08:00
Enrico Ros a311d456b2 Improve height 2023-05-03 11:30:01 -07:00
Enrico Ros 4d19840db8 Support Helicone Keys 2023-05-03 11:22:09 -07:00
Enrico Ros ba6d368226 Support Helicone Keys 2023-05-03 11:14:15 -07:00
Enrico Ros d3d526423f Silence 2023-05-03 10:34:43 -07:00
Enrico Ros 11ba2d6dec Silence 429s from the OpenAI API 2023-05-03 10:07:18 -07:00
Enrico Ros 2c41453395 Silence this warning 2023-05-03 10:04:26 -07:00
Enrico Ros 834f34c0ff Roll packages 2023-05-03 10:02:21 -07:00
Enrico Ros 1dcdb3ba3e Use ElevenLabs only if configured 2023-05-03 09:49:38 -07:00
Enrico Ros 6d514108dd Cleanup 2023-05-02 23:28:19 -07:00
Enrico Ros d9ef77f12d Fix custom SVG 2023-05-02 22:44:31 -07:00
Enrico Ros 22465c1a4b Typo 2023-05-02 20:38:13 -07:00
Enrico Ros 2232d0cdc6 Fix command detection 2023-05-02 20:37:48 -07:00
Enrico Ros b5f3dbf7c9 Roll next 2023-05-01 23:55:20 -07:00
301 changed files with 19384 additions and 6687 deletions
-21
View File
@@ -1,21 +0,0 @@
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
OPENAI_API_KEY=
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
OPENAI_API_HOST=
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
OPENAI_API_ORG_ID=
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# [Optional] Prodia credentials on the server side - for optional image generation
PRODIA_API_KEY=
# [Optional, Search] Google Cloud API Key
# https://console.cloud.google.com/apis/credentials -
GOOGLE_CLOUD_API_KEY=
# [Optional, Search] Google Custom/Programmable Search Engine ID
# https://programmablesearchengine.google.com/
GOOGLE_CSE_ID=
+57 -1
View File
@@ -1,3 +1,59 @@
{
"extends": "next/core-web-vitals"
"parser": "@typescript-eslint/parser",
"parserOptions": {
"project": true
},
"plugins": [
"@typescript-eslint"
],
"extends": [
"next/core-web-vitals",
"plugin:@typescript-eslint/recommended-type-checked",
"plugin:@typescript-eslint/stylistic-type-checked"
],
"ignorePatterns": [
"next.config.js",
"node_modules/**/*",
"out/**/*",
".next/**/*",
".vercel/**/*"
],
"rules": {
"@typescript-eslint/no-explicit-any": "off",
"@typescript-eslint/no-inferrable-types": "off",
"@typescript-eslint/no-namespace": "off",
"@typescript-eslint/no-redundant-type-constituents": "off",
"@typescript-eslint/no-unsafe-argument": "off",
"@typescript-eslint/no-unsafe-assignment": "off",
"@typescript-eslint/no-unsafe-call": "off",
"@typescript-eslint/no-unsafe-member-access": "off",
"@typescript-eslint/no-unsafe-return": "off",
"@typescript-eslint/prefer-nullish-coalescing": "off",
"@typescript-eslint/unbound-method": "off",
"@typescript-eslint/array-type": "off",
"@typescript-eslint/consistent-type-definitions": "off",
"@typescript-eslint/consistent-type-imports": "off",
/*"@typescript-eslint/consistent-type-imports": [
"warn",
{
"prefer": "type-imports",
"fixStyle": "separate-type-imports"
}
],*/
"@typescript-eslint/no-unused-vars": [
"warn",
{
"argsIgnorePattern": "^_"
}
],
"@typescript-eslint/no-misused-promises": [
2,
{
"checksVoidReturn": {
"attributes": false
}
}
]
}
}
+2 -1
View File
@@ -26,7 +26,8 @@ yarn-error.log*
.pnpm-debug.log*
# local env files
.env*.local
.env
.env.*
# vercel
.vercel
+2 -1
View File
@@ -1,6 +1,7 @@
{
"singleAttributePerLine": false,
"singleQuote": true,
"trailingComma": "all",
"endOfLine": "lf",
"printWidth": 160
}
}
+2 -1
View File
@@ -5,7 +5,7 @@ ENV PATH $PATH:/usr/src/app/node_modules/.bin
WORKDIR /usr/src/app
COPY package*.json ./
COPY package*.json prisma/ ./
# CI and release builds should use npm ci to fully respect the lockfile.
# Local development may use npm install for opportunistic package updates.
@@ -34,6 +34,7 @@ WORKDIR /usr/src/app
# Include only the release build and production packages.
COPY --from=build-target /usr/src/app/node_modules node_modules
COPY --from=build-target /usr/src/app/.next .next
COPY --from=build-target /usr/src/app/public public
# Expose port 3000 for the application to listen on
EXPOSE 3000
+121 -47
View File
@@ -1,9 +1,9 @@
# `BIG-AGI` 🤖💬
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
Welcome to `big-AGI` 👋 your personal AGI application
powered by OpenAI GPT-4 and beyond. Designed for smart humans and super-heroes,
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
data Rendering, AGI functions, chats and much more. Comes with plenty of `#big-AGI-energy` 🚀
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=demo)](https://big-agi.com)
@@ -11,27 +11,26 @@ Or fork & run on Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
## Useful 👊
## ✨ Key Features 👊
![Ask away, paste a ton, copy the gems](docs/pixels/big-AGI-compo1.png)
- Engaging AI Personas
- Clean UX, w/ tokens counters
- Privacy: user-owned API keys and localStorage
- Human I/O: Advanced voice support (TTS, STT)
- Machine I/O: PDF import & Summarization, code execution
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
- Coming up: automatic-AGI reasoning
- **AI Personas**
- **Polished UI**: installable web app, mobile-friendly, token counters, etc.
- **Fast UX**: Microphone, Camera OCR, Drag files, Voice Synthesis
- **Models**: [OpenAI](https://platform.openai.com/overview), [Anthropic](https://www.anthropic.com/product), [Azure](https://oai.azure.com/), [OpenRouter](https://openrouter.ai/), [Local models](https://github.com/oobabooga/text-generation-webui), and more
- **Private**: use your own API keys and self-host if you like
- **Advanced**: PDF import & Summarization, code execution
- **Integrations**: ElevenLabs, Helicone, Paste.gg, Prodia and more
## Support 🙌
## 💖 Support
[//]: # ([![Official Discord](https://img.shields.io/discord/1098796266906980422?label=discord&logo=discord&logoColor=%23fff&style=for-the-badge)](https://discord.gg/MkH4qj2Jp9))
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
energy!
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
* send PRs! ...
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
@@ -40,41 +39,80 @@ Or fork & run on Vercel
<br/>
## Latest Drops 🚀
## 🧠 Latest Drops
#### 🚨 April: more #big-agi-energy
#### Next
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- 🎉 **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- 🎉 **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- 🎉 **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- 🎉 **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- 🎉 **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- 🎉 **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- 🎉 Chats: multiple chats, AI titles, Import/Export, Selection mode
- 🎉 Rendering: Markdown, SVG, improved Code blocks
- 🎉 Integrations: OpenAI organization ID
- 🎉 [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
- **Cloudflare API Gateway** support
- **Helicone for Anthropic** support
- **Text Tools** - incl. highlight differences
#### 1.4.0: Sept/Oct: scale OUT
- **Expanded Model Support**: Azure and [OpenRouter](https://openrouter.ai/docs#models) models, including gpt-4-32k
- **Share and clone** conversations with public links
- Removed the 20 chats hard limit ([Ashesh3](https://github.com/enricoros/big-agi/pull/158))
- Latex Rendering
- Augmented Chat modes (Labs)
#### July/Aug: More Better Faster
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
- **Anthropic models** support, e.g. Claude
- **Backup/Restore** - save chats, and restore them later
- **[Local model support with Oobabooga server](docs/config-local-oobabooga)** - run your own LLMs!
- **Flatten conversations** - conversations summarizer with 4 modes
- **Fork conversations** - create a new chat, to experiment with different endings
- New commands: /s to add a System message, and /a for an Assistant message
- New Chat modes: Write-only - just appends the message, without assistant response
- Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
- Fixes on the HTML block - particularly useful to see error pages
#### June: scale UP
- **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
- **Cleaner UI** - with rationalized Settings, Modals, and Configurators
- **Dynamic Models Configurator** - easy connection with different model vendors
- **Multiple Model Vendors Support** framework to support many LLM vendors
- **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
- Support for GPT-4-32k
- Improved Dialogs and Messages
- Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
#### April / May: more #big-agi-energy
- **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
Search
- **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- Chats: multiple chats, AI titles, Import/Export, Selection mode
- Rendering: Markdown, SVG, improved Code blocks
- Integrations: OpenAI organization ID
- [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
[awesome-agi](https://github.com/enricoros/awesome-agi)
- 🎉 [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
- [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
<!-- p><a href="docs/pixels/gif_typing_040123.gif"><img src="docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
#### March: first release
- 🎉 **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- 🎉 **Syntax highlighting** - for multiple languages 🌈
- 🎉 **Code Execution: Sandpack
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
- 🎉 Real-time streaming of AI responses ⚡
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
- 🎉 Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- 🎉 Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- **Syntax highlighting** - for multiple languages 🌈
- **Code Execution: Sandpack** -
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- Chat with GPT-4 and 3.5 Turbo 🧠💨
- Real-time streaming of AI responses ⚡
- **Voice Input** 🎙️ - works great on Chrome / Windows
- Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- 🌙 Dark model - Wide mode ⛶
<br/>
@@ -89,7 +127,7 @@ with features that matter to them.
![Much features, so fun](docs/pixels/big-AGI-compo2b.png)
## Code 🧩
## Develop 🧩
![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=&logo=typescript&logoColor=white)
![React](https://img.shields.io/badge/React-61DAFB?style=&logo=react&logoColor=black)
@@ -109,10 +147,46 @@ Now the app should be running on `http://localhost:3000`
### Integrations:
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Settings > Advanced > API Host: 'oai.hconeai.com'
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
## Deploy with Docker 🐳
For more detailed information on deploying with Docker, please refer to the [docker deployment documentation](docs/deploy-docker.md).
### 🔧 Locally built image
> Firstly, write all your API keys and env vars to an `.env` file, and make sure the env file is using *both build and run*.
> See [docs/environment-variables.md](docs/environment-variables.md) for a list of all environment variables.
```bash
```bash
docker build -t big-agi .
docker run --detach 'big-agi'
```
### Pre-built image
> Warning: the UI will still be asking for keys, as the image was built without the API keys
```bash
docker-compose up
```
## Deploy with Cloudflare Pages ☁️
Please refer to the [Cloudflare deployment documentation](docs/deploy-cloudflare.md).
## Deploy with Vercel 🚀
Create your GitHub fork, create a Vercel project over that fork, and deploy it. Or press the button below for convenience.
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
<br/>
This project is licensed under the MIT License.
@@ -124,4 +198,4 @@ This project is licensed under the MIT License.
[//]: # ([![GitHub issues]&#40;https://img.shields.io/github/issues/enricoros/big-agi&#41;]&#40;https://github.com/enricoros/big-agi/issues&#41;)
Made with 💙
Made with 💙
+52
View File
@@ -0,0 +1,52 @@
import { createEmptyReadableStream, safeErrorString, serverFetchOrThrow } from '~/server/wire';
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
/* NOTE: Why does this file even exist?
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
and that would force us to use base64 encoding for the audio data, which would be a waste of
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
and client-side vs. the tRPC implementation. So at lease we recycle the input structures.
*/
const handler = async (req: Request) => {
try {
// construct the upstream request
const {
elevenKey, text, voiceId, nonEnglish,
streaming, streamOptimization,
} = speechInputSchema.parse(await req.json());
const path = `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}` + (streaming ? `/stream?optimize_streaming_latency=${streamOptimization || 1}` : '');
const { headers, url } = elevenlabsAccess(elevenKey, path);
const body: ElevenlabsWire.TTSRequest = {
text: text,
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
};
// elevenlabs POST
const upstreamResponse: Response = await serverFetchOrThrow(url, 'POST', headers, body);
// NOTE: this is disabled, as we pass-through what we get upstream for speed, as it is not worthy
// to wait for the entire audio to be downloaded before we send it to the client
// if (!streaming) {
// const audioArrayBuffer = await upstreamResponse.arrayBuffer();
// return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
// }
// stream the data to the client
const audioReadableStream = upstreamResponse.body || createEmptyReadableStream();
return new Response(audioReadableStream, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error: any) {
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + error.cause : '');
console.log(`api/elevenlabs/speech: fetch issue: ${fetchOrVendorError}`);
return new Response(`[Issue] elevenlabs: ${fetchOrVendorError}`, { status: 500 });
}
};
export const runtime = 'edge';
export { handler as POST };
+2
View File
@@ -0,0 +1,2 @@
export const runtime = 'edge';
export { openaiStreamingRelayHandler as POST } from '~/modules/llms/transports/server/openai/openai.streaming';
+19
View File
@@ -0,0 +1,19 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterEdge } from '~/server/api/trpc.router';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerEdgeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterEdge,
endpoint: '/api/trpc-edge',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'edge';
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
+19
View File
@@ -0,0 +1,19 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterNode } from '~/server/api/trpc.router';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerNodeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterNode,
endpoint: '/api/trpc-node',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'nodejs';
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
+10
View File
@@ -0,0 +1,10 @@
version: '3.9'
services:
big-agi:
image: ghcr.io/enricoros/big-agi:main
ports:
- "3000:3000"
env_file:
- .env
command: [ "next", "start", "-p", "3000" ]
+87
View File
@@ -0,0 +1,87 @@
# Configuring Azure OpenAI Service with `big-AGI`
The entire procedure takes about 5 minutes and involves creating an Azure account,
setting up the Azure OpenAI service, deploying models, and configuring `big-AGI`
to access these models.
Please note that Azure operates on a 'pay-as-you-go' pricing model and requires
credit card information tied to a 'subscription' to the Azure service.
## Configuring `big-AGI`
If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follows:
1. Launch the `big-AGI` application
2. Go to the **Models** settings
3. Add a Vendor and select **Azure OpenAI**
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
- Enter the API Key (e.g., 'fd5...........................ba')
The deployed models are now available in the application. If you don't have a configured
Azure OpenAI service instance, continue with the next section.
## Setting Up Azure
### Step 1: Azure Account & Subscription
1. Create an account on [azure.microsoft.com](https://azure.microsoft.com/en-us/)
2. Go to the [Azure Portal](https://portal.azure.com/)
3. Click on **Create a resource** in the top left corner
4. Search for **Subscription** and select **[Create Subscription](https://portal.azure.com/#create/Microsoft.Subscription)**
- Fill in the required fields and click on **Create**
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
### Step 2: Apply for Azure OpenAI Service
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
and acceptance should be quick (even as low as minutes).
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
2. Click on **Apply for access**
- Fill in the required fields (including the subscription ID) and click on **Apply**
Once your application is accepted, you can create OpenAI resources on Azure.
### Step 3: Create Azure OpenAI Resource
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
1. Click on **Create a resource** in the top left corner
2. Search for **OpenAI** and select **[Create OpenAI](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)**
3. Fill in the necessary fields on the **Create OpenAI** page
![Creating an OpenAI service](pixels/config-azure-openai-create.png)
- Select the subscription
- Select a resource group or create a new one
- Select the region. Note that the region determines the available models.
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
- Name the service (e.g., `your-openai-api-1234`)
- Select a pricing tier (e.g., `S0` for standard)
- Select: "All networks, including the internet, can access this resource."
- Click on **Review + create** and then **Create**
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
the OpenAI Service instance page to get this information.
- Click on **Go to resource**
- Click on **Develop**
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
- Copy `KEY 1`
### Step 4: Deploy Models
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
1. Click on **Model Deployments > Manage Deployments**
2. Click on **+Create New Deployment**
![Deploying a model](pixels/config-azure-openai-deploy.png)
- Select the model you want to deploy
- Optionally select a version
- name the model, e.g., `gpt4-32k-0613`
Repeat as necessary for each model you want to deploy.
## Resources
- [Azure OpenAI Service Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
- [Guide: Create an Azure OpenAI Resource](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
- [Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
+34
View File
@@ -0,0 +1,34 @@
# Local LLM integration with `localai`
Integrate local Large Language Models (LLMs) with [LocalAI](https://localai.io).
_Last updated Nov 7, 2023_
## Instructions
### LocalAI installation and configuration
Follow the guide at: https://localai.io/basics/getting_started/
For instance with [Use luna-ai-llama2 with docker compose](https://localai.io/basics/getting_started/#example-use-luna-ai-llama2-model-with-docker-compose):
- clone LocalAI
- get the model
- copy the prompt template
- start docker
- -> the server will be listening on `localhost:8080`
- verify it works by going to [http://localhost:8080/v1/models](http://localhost:8080/v1/models) on
your browser and seeing listed the model you downloaded
### Integrating LocalAI with big-AGI
- Go to Models > Add a model source of type: **LocalAI**
- Enter the address: `http://localhost:8080` (default)
- If running remotely, replace localhost with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- Select model & Chat
> NOTE: LocalAI does not list details about the mdoels. Every model is assumed to be
> capable of chatting, and with a context window of 4096 tokens.
> Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/transports/server/openai/models.data.ts)
> file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
+54
View File
@@ -0,0 +1,54 @@
# Local LLM Integration with `text-web-ui` :llama:
Integrate local Large Language Models (LLMs) with
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
_Last updated on Nov 7, 2023_
### Components
The implementation of local LLMs involves the following components:
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
## Instructions
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
### Text-web-ui Installation & Configuration:
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation).
- Download the one-click installer, extract it, and double-click on "start" - ~10 minutes
- Close it afterwards as we need to modify the startup flags
2. Enable the **openai extension**
- Edit `CMD_FLAGS.txt`
- Make sure that `--listen --extensions openai` is present and uncommented
3. Restart text-generation-webui
- Double-click on "start"
- You should see something like:
```
2023-11-07 21:24:26 INFO:Loading the extension "openai"...
2023-11-07 21:24:27 INFO:OpenAI compatible API URL:
http://0.0.0.0:5000/v1
```
- The OpenAI API is now running on port 5000, on both localhost (127.0.0.1) and your network IP address
4. Load your first model
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
- Switch to the **Model** tab
- Download, for instance, `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - 4.3 GB
- Select the model once it's loaded
### Integrating text-web-ui with big-AGI:
1. Integrating Text-Generation-WebUI with big-AGI:
- Go to Models > Add a model source of type: **Oobabooga**
- Enter the address: `http://127.0.0.1:5000`
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
- Select model & Chat
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
+81
View File
@@ -0,0 +1,81 @@
# `Ollama` x `big-AGI` :llama:
This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama.ai/library) to
[big-AGI](https://big-agi.com) for a professional AI/AGI operation and a good UI/Conversational
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
editing tools, models switching, personas, and more.
![config-local-ollama-0-example.png](pixels/config-ollama-0-example.png)
## Quick Integration Guide
1. **Ensure Ollama API Server is Running**: Before starting, make sure your Ollama API server is up and running.
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**.
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`).
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models.
5. **Start Using AI Personas**: Select an Ollama model and begin interacting with AI personas tailored to your needs.
### Ollama: installation and Setup
For detailed instructions on setting up the Ollama API server, please refer to the
[Ollama download page](https://ollama.ai/download) and [instructions for linux](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
### Visual Guide
* After adding the `Ollama` model vendor, entering the IP address of an Ollama server, and refreshing models:
<img src="pixels/config-ollama-1-models.png" alt="config-local-ollama-1-models.png" style="max-width: 320px;">
* The `Ollama` admin panel, with the `Pull` button highlighted, after pulling the "Yi" model:
<img src="pixels/config-ollama-2-admin-pull.png" alt="config-local-ollama-2-admin-pull.png" style="max-width: 320px;">
* You can now switch model/persona dynamically and text/voice chat with the models:
<img src="pixels/config-ollama-3-chat.png" alt="config-local-ollama-3-chat.png" style="max-width: 320px;">
### Advanced: Model parameters
For users who wish to delve deeper into advanced settings, `big-AGI` offers additional configuration options, such
as the model temperature, maximum tokens, etc.
### Advanced: Ollama under a reverse proxy
You can elegantly expose your Ollama server to the internet (and thus make it easier to use from your server-side
big-AGI deployments) by exposing it on an http/https URL, such as: `https://yourdomain.com/ollama`
On Ubuntu Servers, you will need to install `nginx` and configure it to proxy requests to Ollama.
```bash
sudo apt update
sudo apt install nginx
sudo apt install certbot python3-certbot-nginx
sudo certbot --nginx -d yourdomain.com
```
Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and add the following block:
```nginx
location /ollama/ {
proxy_pass http://localhost:11434;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# Disable buffering for the streaming responses
proxy_buffering off;
}
```
Reach out to our community if you need help with this.
### Community and Support
Join our community to share your experiences, get help, and discuss best practices:
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
---
`big-AGI` is committed to providing a powerful, intuitive, and privacy-respecting AI experience.
We are excited for you to explore the possibilities with Ollama models. Happy creating!
+31
View File
@@ -0,0 +1,31 @@
# OpenRouter Configuration
[OpenRouter](https://openrouter.ai) is a standalone, premium service
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
This document details the process of integrating OpenRouter with big-AGI.
### 1. OpenRouter Account Setup and API Key Generation
1. Register for an OpenRouter account at [openrouter.ai](https://openrouter.ai) by clicking on Sign In > Continue with Google.
2. Top up your account (minimum $5) by navigating to [openrouter.ai/account](https://openrouter.ai/account) > Add Credits > Pay with Stripe.
3. Generate an API key at [openrouter.ai/keys](https://openrouter.ai/keys) > API Key > Generate API Key.
- **Remember to copy and securely store your API key** - the key will not be displayed again and will be in the format `sk-or-v1-...`.
- Keep the key confidential as it can be used to expend your credits.
### 2. Integrating OpenRouter with big-AGI
1. Launch big-AGI, and navigate to the AI **Models** settings.
2. Add a Vendor, and select **OpenRouter**.
![feature-openrouter-add.png](pixels/feature-openrouter-add.png)
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
![feature-openrouter-configure.png](pixels/feature-openrouter-configure.png)
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
### Pricing
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
credits - a single prompt may cost $1 or more, at the time of writing.
+47 -34
View File
@@ -1,55 +1,68 @@
# Deploying Next.js App on Cloudflare Pages
# Deploying a Next.js App on Cloudflare Pages
Follow these steps to deploy your Next.js app on Cloudflare Pages. This guide is based on
the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with a few additional steps.
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
>
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
> edge functions, so we cannot deploy this project on Cloudflare Pages.
>
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
> parts of this application will not work.
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
>
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
> and convert the Node runtime to Edge runtime once Prisma supports it.
## Step 1: Fork the Repository
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with some additional steps.
Fork the repository to your own GitHub account.
## Step 1: Repository Forking
## Step 2: Connect Cloudflare Pages to Your GitHub Account
Fork the repository to your personal GitHub account.
1. Go to the Cloudflare Pages section and click the `Create a project` button.
2. Click `Connect To Git` and give Cloudflare Pages either All GitHub account Repo access or selected Repo access. We
recommend using selected Repo access and selecting the forked repo from step 1.
## Step 2: Linking Cloudflare Pages to Your GitHub Account
## Step 3: Setup Build and Deployments
1. Navigate to the Cloudflare Pages section and click on the `Create a project` button.
2. Click `Connect To Git` and grant Cloudflare Pages access to either all GitHub account repositories or selected repositories.
We recommend using selected Repo access and selecting the forked repository from step 1.
1. Once you select the forked GitHub repo, click the `Begin Setup` button.
2. On this page, set your `Project name`, `Production branch` (e.g., main), and your Build settings.
3. Select `Next.js` from the `Framework preset` dropdown menu.
4. Leave the preset filled Build command and Build output directory as preset defaults.
5. Set `Environmental variables` (advanced) on this page to configure some variables as follows:
## Step 3: Configuring Build and Deployments
| Variable | Value |
|---------------------------|---------|
| `GO_VERSION` | `1.16` |
| `NEXT_TELEMETRY_DISABLED` | `1` |
| `NODE_VERSION` | `17` |
| `PHP_VERSION` | `7.4` |
| `PYTHON_VERSION` | `3.7` |
| `RUBY_VERSION` | `2.7.1` |
1. After selecting the forked GitHub repository, click the **Begin Setup** button
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
3. Choose `Next.js` from the **Framework preset** dropdown menu
4. Set a custom **Build Command**:
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
- see the tradeoffs for this deletion on the notice at the top
5. Keep the **Build output directory** as default
6. Click the **Save and Deploy** button
6. Click the `Save and Deploy` button.
## Step 4: Monitoring the Deployment Process
## Step 4: Monitor the Deployment Process
Observe the process as it initializes your build environment, clones the GitHub repository, builds the application, and deploys it
to the Cloudflare Network. Once complete, proceed to the project you created.
Watch the process run to initialize your build environment, clone the GitHub repo, build the application, and deploy to
the Cloudflare Network. Once that is done, proceed to the project you created.
## Step 5: Required: Set the `nodejs_compat` compatibility flag
## Step 5: Set up a Custom Domain
1. Navigate to the [Settings > Functions](https://dash.cloudflare.com/?to=/:account/pages/view/:pages-project/settings/functions) page of your newly created project
2. Scroll to `Compatibility flags` and enter "`nodejs_compat`" for both **Production** and **Preview** environments.
It should look like this: ![](pixels/config-deploy-cloudflare-compat2.png)
3. Re-deploy your project for the new flags to take effect
## Step 6: (Optional) Custom Domain Configuration
Use the `Custom domains` tab to set up your domain via CNAME.
## Step 6: Configure Access Policy and Web Analytics
## Step 7: (Optional) Access Policy and Web Analytics Configuration
Go to the `Settings` page and enable the following settings:
Navigate to the `Settings` page and enable the following settings:
1. Access Policy: Restrict [preview deployments](https://developers.cloudflare.com/pages/platform/preview-deployments/)
to members of your Cloudflare account via one-time pin and restrict primary `*.YOURPROJECT.pages.dev` domain.
See [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more information.
Refer to [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more details.
2. Enable Web Analytics.
Now you have successfully deployed your Next.js app on Cloudflare Pages.
Congratulations! You have successfully deployed your Next.js app on Cloudflare Pages.
+36 -15
View File
@@ -1,26 +1,47 @@
# Deploy `big-AGI` with Docker 🐳
# Deploying `big-AGI` with Docker
Deploy the big-AGI application using Docker containers for a consistent, efficient, and automated deployment process. Enjoy faster development cycles, easier collaboration, and seamless environment management. 🚀
Utilize Docker containers to deploy the big-AGI application for an efficient and automated deployment process.
Docker ensures faster development cycles, easier collaboration, and seamless environment management.
Docker is a platform for developing, packaging, and deploying applications as lightweight containers, ensuring consistent behavior across environments.
## 🔧 Local Build & Deployment
## `big-AGI` Docker Components
1. **Clone big-AGI**
2. **Build the Docker Image**: Build a local docker image from the provided Dockerfile. The command is typically `docker build -t big-agi .`
3. **Run the Docker Container**: Start a Docker container using the built image with the command `docker run -d -p 3000:3000 big-agi`
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a Docker image of the application.
> Note: If the Docker container is built without setting environment variables,
> the frontend UI will be unaware of them, despite the backend being able to use them at runtime.
> Therefore, ensure all necessary environment variables are set during the build process.
### Dockerfile
## Documentation
The [`Dockerfile`](../Dockerfile) sets up a Node.js environment, installs dependencies, and creates a production-ready version of the application.
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a
Docker image of the application.
### GitHub Actions Workflow
### Dockerfile: Containers
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates building and publishing the Docker image when changes are pushed to the `main` branch.
> A local build is recommended, as the 'ghcr' container is built without environment variables.
## Deploy Steps
The [`Dockerfile`](../Dockerfile) is used to create a Docker image. It establishes a Node.js environment,
installs dependencies, and creates a production-ready version of the application as a local container.
1. Clone the big-AGI repository
2. Navigate to the project directory
3. Build the Docker image using the provided Dockerfile
4. Run the Docker container with the built image
### GitHub Actions workflow
Embrace the benefits of Docker for a reliable and efficient big-AGI deployment. 🎉
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates the
building and publishing of the Docker images to the GitHub Container Registry (ghcr) when changes are
pushed to the `main` branch.
### Docker Compose
In addition, the repository also includes a `docker-compose.yaml` file, configured to run the pre-built
'ghcr image'. This file is used to define the `big-agi` service, the ports to expose, and the command to run.
If you have Docker Compose installed, you can run the Docker container with `docker-compose up`
to pull the Docker image (if it hasn't been pulled already) and start a Docker container. If you want to
update the image to the latest version, you can run `docker-compose pull` before starting the service.
```bash
docker-compose up -d
```
Leverage Docker's capabilities for a reliable and efficient big-AGI deployment.
+111
View File
@@ -0,0 +1,111 @@
# Environment Variables
This document provides an explanation of the environment variables used in the big-AGI application.
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
which take place over _defaults_. This file is kept in sync with [`../src/common/types/env.d.ts`](../src/common/types/env.d.ts).
### Setting Environment Variables
Environment variables can be set by creating a `.env` file in the root directory of the project.
> For Docker deployment, ensure all necessary environment variables are set **both during build and run**.
> If the Docker container is built without setting environment variables, the frontend UI will be unaware
> of them, despite the backend being able to use them at runtime.
The following is an example `.env` for copy-paste convenience:
```bash
# Database
POSTGRES_PRISMA_URL=
POSTGRES_URL_NON_POOLING=
# LLMs
OPENAI_API_KEY=
OPENAI_API_HOST=
OPENAI_API_ORG_ID=
AZURE_OPENAI_API_ENDPOINT=
AZURE_OPENAI_API_KEY=
ANTHROPIC_API_KEY=
ANTHROPIC_API_HOST=
OLLAMA_API_HOST=
OPENROUTER_API_KEY=
# Model Observability: Helicone
HELICONE_API_KEY=
# Text-To-Speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# Google Custom Search
GOOGLE_CLOUD_API_KEY=
GOOGLE_CSE_ID=
# Text-To-Image
PRODIA_API_KEY=
```
## Variables Documentation
### Database
To enable features such as Chat Link Shring, you need to connect the backend to a database. We require
serverless Postgres, which is available on Vercel, Neon and more.
Also make sure that you run `npx prisma db:push` to create the initial schema on the database for the
first time (or update it on a later stage).
| Variable | Description |
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `POSTGRES_PRISMA_URL` | The URL of the Postgres database used by Prisma - example: `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15` |
| `POSTGRES_URL_NON_POOLING` | The URL of the Postgres database without pooling |
### LLMs
The following variables when set will enable the corresponding LLMs on the server-side, without
requiring the user to enter an API key
| Variable | Description | Required |
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-ollama.md](config-ollama.md) | |
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
### Model Observability: Helicone
Helicone provides observability to your LLM calls. It is a paid service, with a generous free tier.
It is currently supported for:
- **Anthropic**: by setting the Helicone API key, Helicone is automatically activated
- **OpenAI**: you also need to set `OPENAI_API_HOST` to `oai.hconeai.com`, to enable routing
| Variable | Description |
|--------------------|--------------------------|
| `HELICONE_API_KEY` | The API key for Helicone |
### Specials
Enable the app to Talk, Draw, and Google things up.
| Variable | Description |
|:-------------------------|:------------------------------------------------------------------------------------------------------------------------|
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
---
Binary file not shown.

Before

Width:  |  Height:  |  Size: 283 KiB

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 255 KiB

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 626 KiB

After

Width:  |  Height:  |  Size: 618 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 370 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.2 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 MiB

After

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

After

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 192 KiB

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 49 KiB

+22 -8
View File
@@ -1,14 +1,12 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
let nextConfig = {
reactStrictMode: true,
env: {
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
modularizeImports: {
'@mui/icons-material': {
transform: '@mui/icons-material/{{member}}',
},
},
webpack(config, { isServer, dev }) {
webpack: (config, _options) => {
// @mui/joy: anything material gets redirected to Joy
config.resolve.alias['@mui/material'] = '@mui/joy';
@@ -20,6 +18,22 @@ const nextConfig = {
return config;
},
// NOTE: the following shall be replaced by runtime config
env: {
HAS_SERVER_DB_PRISMA: !!process.env.POSTGRES_PRISMA_URL && !!process.env.POSTGRES_URL_NON_POOLING,
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
HAS_SERVER_KEY_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
HAS_SERVER_KEY_AZURE_OPENAI: !!process.env.AZURE_OPENAI_API_KEY && !!process.env.AZURE_OPENAI_API_ENDPOINT,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_HOST_OLLAMA: !!process.env.OLLAMA_API_HOST,
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
HAS_SERVER_KEY_OPENROUTER: !!process.env.OPENROUTER_API_KEY,
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
},
};
// conditionally enable the nextjs bundle analyzer
if (process.env.ANALYZE_BUNDLE)
nextConfig = require('@next/bundle-analyzer')()(nextConfig);
module.exports = nextConfig;
+2212 -1602
View File
File diff suppressed because it is too large Load Diff
+55 -29
View File
@@ -1,45 +1,71 @@
{
"name": "big-agi",
"version": "0.9.1",
"version": "1.4.0",
"private": true,
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"dev": "next dev",
"dev": "next dev --turbo",
"build": "next build",
"start": "next start",
"lint": "next lint"
"lint": "next lint",
"env:pull": "npx vercel env pull .env.development.local",
"postinstall": "prisma generate",
"db:push": "prisma db push",
"db:studio": "prisma studio"
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@emotion/react": "^11.10.8",
"@emotion/server": "^11.10.0",
"@emotion/styled": "^11.10.8",
"@mui/icons-material": "^5.11.16",
"@mui/joy": "^5.0.0-alpha.77",
"@tanstack/react-query": "^4.29.5",
"@vercel/analytics": "^1.0.0",
"eventsource-parser": "^1.0.0",
"next": "^13.3.2",
"pdfjs-dist": "^3.5.141",
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.1",
"@emotion/server": "^11.11.0",
"@emotion/styled": "^11.11.0",
"@mui/icons-material": "^5.14.16",
"@mui/joy": "^5.0.0-beta.14",
"@next/bundle-analyzer": "~14.0.2",
"@prisma/client": "^5.5.2",
"@sanity/diff-match-patch": "^3.1.1",
"@tanstack/react-query": "^4.36.1",
"@trpc/client": "^10.43.3",
"@trpc/next": "^10.43.3",
"@trpc/react-query": "^10.43.3",
"@trpc/server": "^10.43.3",
"@vercel/analytics": "^1.1.1",
"browser-fs-access": "^0.35.0",
"eventsource-parser": "^1.1.1",
"idb-keyval": "^6.2.1",
"next": "~14.0.2",
"pdfjs-dist": "3.11.174",
"plantuml-encoder": "^1.4.0",
"prismjs": "^1.29.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^8.0.7",
"remark-gfm": "^3.0.1",
"uuid": "^9.0.0",
"zustand": "^4.3.7"
"react-katex": "^3.0.1",
"react-markdown": "^9.0.0",
"react-timeago": "^7.2.0",
"remark-gfm": "^4.0.0",
"superjson": "^2.2.1",
"tesseract.js": "^5.0.3",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zustand": "~4.3.9"
},
"devDependencies": {
"@types/node": "^18.16.3",
"@types/prismjs": "^1.26.0",
"@types/react": "^18.2.0",
"@types/react-dom": "^18.2.1",
"@types/uuid": "^9.0.1",
"eslint": "^8.39.0",
"eslint-config-next": "^13.3.2",
"prettier": "^2.8.8",
"typescript": "^5.0.4"
"@types/node": "^20.9.0",
"@types/plantuml-encoder": "^1.4.2",
"@types/prismjs": "^1.26.3",
"@types/react": "^18.2.37",
"@types/react-dom": "^18.2.15",
"@types/react-katex": "^3.0.3",
"@types/react-timeago": "^4.1.6",
"@types/uuid": "^9.0.7",
"@typescript-eslint/eslint-plugin": "^6.10.0",
"@typescript-eslint/parser": "^6.10.0",
"eslint": "^8.53.0",
"eslint-config-next": "~14.0.2",
"prettier": "^3.0.3",
"prisma": "^5.5.2",
"typescript": "^5.2.2"
},
"engines": {
"node": "^20.0.0 || ^18.0.0"
}
}
+22 -6
View File
@@ -6,9 +6,13 @@ import { CacheProvider, EmotionCache } from '@emotion/react';
import { CssBaseline, CssVarsProvider } from '@mui/joy';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import '@/common/styles/GithubMarkdown.css';
import { Brand } from '@/common/brand';
import { createEmotionCache, theme } from '@/common/theme';
import { apiQuery } from '~/common/util/trpc.client';
import 'katex/dist/katex.min.css';
import '~/common/styles/CodePrism.css'
import '~/common/styles/GithubMarkdown.css';
import { Brand } from '~/common/brand';
import { createEmotionCache, theme } from '~/common/theme';
// Client-side cache, shared for the whole session of the user in the browser.
@@ -18,8 +22,17 @@ export interface MyAppProps extends AppProps {
emotionCache?: EmotionCache;
}
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient());
function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient({
defaultOptions: {
queries: {
retry: false,
},
mutations: {
retry: false,
},
},
}));
return <>
<CacheProvider value={emotionCache}>
<Head>
@@ -37,4 +50,7 @@ export default function MyApp({ Component, emotionCache = clientSideEmotionCache
</CacheProvider>
<VercelAnalytics debug={false} />
</>;
}
}
// enables the react-query api invocation
export default apiQuery.withTRPC(MyApp);
+6 -6
View File
@@ -4,13 +4,14 @@ import { default as Document, DocumentContext, DocumentProps, Head, Html, Main,
import createEmotionServer from '@emotion/server/create-instance';
import { getInitColorSchemeScript } from '@mui/joy/styles';
import { Brand } from '@/common/brand';
import { Brand } from '~/common/brand';
import { bodyFontClassName, createEmotionCache } from '~/common/theme';
import { MyAppProps } from './_app';
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
interface MyDocumentProps extends DocumentProps {
emotionStyleTags: JSX.Element[];
emotionStyleTags: React.JSX.Element[];
}
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
@@ -19,7 +20,6 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<Head>
{/* Meta (missing Title, set by the App or Page) */}
<meta name='description' content={Brand.Meta.Description} />
<meta name='keywords' content={Brand.Meta.Keywords} />
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
{/* Favicons & PWA */}
@@ -32,7 +32,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
{/* Opengraph */}
<meta property='og:title' content={Brand.Meta.Title} />
<meta property='og:title' content={Brand.Title.Common} />
<meta property='og:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
<meta property='og:url' content={Brand.URIs.Home} />
@@ -42,7 +42,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{/* Twitter */}
<meta property='twitter:card' content='summary_large_image' />
<meta property='twitter:url' content={Brand.URIs.Home} />
<meta property='twitter:title' content={Brand.Meta.Title} />
<meta property='twitter:title' content={Brand.Title.Common} />
<meta property='twitter:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
-77
View File
@@ -1,77 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
function parseApiParameters(apiKey?: string) {
return {
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
apiHeaders: {
'Content-Type': 'application/json',
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
},
};
}
async function rethrowElevenLabsError(response: Response) {
if (!response.ok) {
let errorPayload: object | null = null;
try {
errorPayload = await response.json();
} catch (e) {
// ignore
}
console.error('Error in ElevenLabs API:', errorPayload);
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
}
}
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'GET',
headers: apiHeaders,
});
await rethrowElevenLabsError(response);
return await response.json();
}
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'POST',
headers: apiHeaders,
body: JSON.stringify(body),
signal,
});
await rethrowElevenLabsError(response);
return response;
}
export default async function handler(req: NextRequest) {
try {
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
text: text,
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
};
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error) {
console.error('Error posting to ElevenLabs', error);
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-48
View File
@@ -1,48 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
import { getFromElevenLabs } from './speech';
export default async function handler(req: NextRequest) {
try {
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
// bring category != 'premade to the top
voicesList.voices.sort((a, b) => {
if (a.category === 'premade' && b.category !== 'premade') return 1;
if (a.category !== 'premade' && b.category === 'premade') return -1;
return 0;
});
// map to our own response format
const response: ElevenLabs.API.Voices.Response = {
voices: voicesList.voices.map((voice, idx) => ({
id: voice.voice_id,
name: voice.name,
description: voice.description,
previewUrl: voice.preview_url,
category: voice.category,
default: idx === 0,
})),
};
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
} catch (error) {
console.error('Error fetching voices from ElevenLabs:', error);
return new NextResponse(
JSON.stringify({
type: 'error',
error: error?.toString() || error || 'Network issue',
}),
{ status: 500 },
);
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-27
View File
@@ -1,27 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest) {
try {
const requestBodyJson = await req.json();
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
return new NextResponse(JSON.stringify({
message: upstreamResponse.choices[0].message,
} satisfies OpenAI.API.Chat.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-30
View File
@@ -1,30 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
// keep working on this
const requestBodyJson = await req.json();
const { api } = await toApiChatRequest(requestBodyJson);
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
// flatten IDs (most recent first)
return new NextResponse(JSON.stringify({
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
} satisfies OpenAI.API.Models.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-117
View File
@@ -1,117 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { createParser } from 'eventsource-parser';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
// Handle the abort event when the connection is closed by the client
signal.addEventListener('abort', () => {
console.log('Client closed the connection.');
});
// begin event streaming from the OpenAI API
const encoder = new TextEncoder();
let upstreamResponse: Response;
try {
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
} catch (error: any) {
console.log(error);
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
return new ReadableStream({
start: controller => {
controller.enqueue(encoder.encode(message));
controller.close();
},
});
}
// decoding and re-encoding loop
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
let hasBegun = false;
// stream response (SSE) from OpenAI is split into multiple chunks. this function
// will parse the event into a text stream, and re-emit it to the client
const upstreamParser = createParser(event => {
// ignore reconnect interval
if (event.type !== 'event')
return;
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
if (event.data === '[DONE]') {
controller.close();
return;
}
try {
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
// ignore any 'role' delta update
if (json.choices[0].delta?.role)
return;
// stringify and send the first packet as a JSON object
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
}
// transmit the text stream
const text = json.choices[0].delta?.content || '';
controller.enqueue(encoder.encode(text));
} catch (error) {
// maybe parse error
console.error('Error parsing OpenAI response', error);
controller.error(error);
}
});
// https://web.dev/streams/#asynchronous-iteration
const decoder = new TextDecoder();
for await (const upstreamChunk of upstreamResponse.body as any)
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
};
return new ReadableStream({
start: onReadableStreamStart,
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
});
}
export default async function handler(req: NextRequest): Promise<Response> {
try {
const requestBodyJson = await req.json();
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
return new NextResponse(chatResponseStream);
} catch (error: any) {
if (error.name === 'AbortError') {
console.log('Fetch request aborted in handler');
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
} else if (error.code === 'ECONNRESET') {
console.log('Connection reset by the client in handler');
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
} else {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
};
//noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-88
View File
@@ -1,88 +0,0 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
});
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch('https://api.prodia.com/v1/job', {
method: 'POST',
headers: {
...prodiaHeaders(apiKey),
'Content-Type': 'application/json',
},
body: JSON.stringify(jobRequest),
});
if (response.status !== 200) {
console.log('Bad Prodia Response:', await response.text());
throw new Error(`Bad Prodia Response: ${response.status}`);
}
return await response.json();
}
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
headers: prodiaHeaders(apiKey),
});
if (response.status !== 200)
throw new Error(`Bad Prodia Response: ${response.status}`);
return await response.json();
}
export default async function handler(req: NextRequest) {
// timeout, in seconds
const timeout = 15;
const tStart = Date.now();
try {
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
// crate the job, getting back a job ID
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
model: prodiaModelId,
prompt,
...(!!cfgScale && { cfg_scale: cfgScale }),
...(!!steps && { steps }),
...(!!negativePrompt && { negative_prompt: negativePrompt }),
...(!!seed && { seed }),
};
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
// poll the job status until it's done
let sleepDelay = 2000;
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
await new Promise(resolve => setTimeout(resolve, sleepDelay));
job = await getJobStatus(apiKey, job.job);
if (sleepDelay > 250)
sleepDelay /= 2;
}
// check for success
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
if (job.status !== 'succeeded' || !job.imageUrl)
throw new Error(`Prodia image generation failed within ${elapsed}s`);
// respond with the image URL
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
return new NextResponse(JSON.stringify(response));
} catch (error) {
console.error('Handler failed:', error);
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
return new NextResponse(JSON.stringify(response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-48
View File
@@ -1,48 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
// for lack of an API
const HARDCODED_MODELS: Prodia.API.Models.Response = {
models: [
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
],
};
// sort by priority
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
// noinspection JSUnusedLocalSymbols
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-50
View File
@@ -1,50 +0,0 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
/**
* 'Proxy' that uploads a file to paste.gg.
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
*/
export default async function handler(req: NextRequest) {
try {
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
throw new Error('Invalid options');
const paste = await pasteGgPost(title, fileName, fileContent, origin);
console.log(`Posted to paste.gg`, paste);
if (paste?.status !== 'success')
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
return new NextResponse(JSON.stringify({
type: 'success',
url: `https://paste.gg/${paste.result.id}`,
expires: paste.result.expires || 'never',
deletionKey: paste.result.deletion_key || 'none',
created: paste.result.created_at,
} satisfies PasteGG.API.Publish.Response));
} catch (error) {
console.error('Error posting to paste.gg', error);
return new NextResponse(JSON.stringify({
type: 'error',
error: error?.toString() || 'Network issue',
} satisfies PasteGG.API.Publish.Response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-47
View File
@@ -1,47 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { Search } from '@/modules/search/search.types';
import { objectToQueryString } from '@/modules/search/search.client';
export default async function handler(req: NextRequest): Promise<NextResponse> {
const { searchParams } = new URL(req.url);
const customSearchParams: Search.Wire.RequestParams = {
q: searchParams.get('query') || '',
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
num: 5,
};
try {
if (!customSearchParams.key || !customSearchParams.cx) {
// noinspection ExceptionCaughtLocallyJS
throw new Error('Missing API Key or Custom Search Engine ID');
}
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
if (data.error) {
// noinspection ExceptionCaughtLocallyJS
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
}
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
title: result.title,
link: result.link,
snippet: result.snippet,
})) || [];
return new NextResponse(JSON.stringify(apiResponse));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+14
View File
@@ -0,0 +1,14 @@
import * as React from 'react';
import { AppCall } from '../src/apps/call/AppCall';
import { AppLayout } from '~/common/layout/AppLayout';
export default function CallPage() {
return (
<AppLayout>
<AppCall />
</AppLayout>
);
}
+9 -44
View File
@@ -1,53 +1,18 @@
import * as React from 'react';
import { Container, useTheme } from '@mui/joy';
import { AppChat } from '../src/apps/chat/AppChat';
import { useShowNewsOnUpdate } from '../src/apps/news/news.hooks';
import { NoSSR } from '@/common/components/NoSSR';
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
import { useSettingsStore } from '@/common/state/store-settings';
import { Chat } from '../src/apps/chat/Chat';
import { SettingsModal } from '../src/apps/settings/SettingsModal';
import { AppLayout } from '~/common/layout/AppLayout';
export default function Home() {
// state
const [settingsShown, setSettingsShown] = React.useState(false);
// external state
const theme = useTheme();
const apiKey = useSettingsStore(state => state.apiKey);
const centerMode = useSettingsStore(state => state.centerMode);
// show the Settings Dialog at startup if the API key is required but not set
React.useEffect(() => {
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
setSettingsShown(true);
}, [apiKey]);
export default function ChatPage() {
// show the News page on updates
useShowNewsOnUpdate();
return (
/**
* Note the global NoSSR wrapper
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
*/
<NoSSR>
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
boxShadow: {
xs: 'none',
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
},
}}>
<Chat onShowSettings={() => setSettingsShown(true)} />
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
</Container>
</NoSSR>
<AppLayout>
<AppChat />
</AppLayout>
);
}
+14
View File
@@ -0,0 +1,14 @@
import * as React from 'react';
import { AppLabs } from '../src/apps/labs/AppLabs';
import { AppLayout } from '~/common/layout/AppLayout';
export default function LabsPage() {
return (
<AppLayout suspendAutoModelsSetup>
<AppLabs />
</AppLayout>
);
}
+141
View File
@@ -0,0 +1,141 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { Alert, Box, Button, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import { setComposerStartupText } from '../src/apps/chat/components/composer/store-composer';
import { AppLayout } from '~/common/layout/AppLayout';
import { LogoProgress } from '~/common/components/LogoProgress';
import { asValidURL } from '~/common/util/urlUtils';
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* See the /public/manifest.json for how this is configured. Parameters:
* - text: the text to share
* - url: the URL to share
* - if the URL is a valid URL, it will be downloaded and the content will be shared
* - if the URL is not a valid URL, it will be shared as text
* - title: the title of the shared content
*/
function AppShareTarget() {
// state
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
const [intentText, setIntentText] = React.useState<string | null>(null);
const [intentURL, setIntentURL] = React.useState<string | null>(null);
const [isDownloading, setIsDownloading] = React.useState(false);
// external state
const { query, push: routerPush, replace: routerReplace } = useRouter();
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
setComposerStartupText(text);
void routerReplace('/');
}, [routerReplace]);
// Detect the share Intent from the query
React.useEffect(() => {
// skip when query is not parsed yet
if (!Object.keys(query).length)
return;
// single item from the query
let queryTextItem: string[] | string | null = query.url || query.text || null;
if (Array.isArray(queryTextItem))
queryTextItem = queryTextItem[0];
// check if the item is a URL
const url = asValidURL(queryTextItem);
if (url)
setIntentURL(url);
else if (queryTextItem)
setIntentText(queryTextItem);
else
setErrorMessage('No text or url. Received: ' + JSON.stringify(query));
}, [query.url, query.text, query]);
// Text -> Composer
React.useEffect(() => {
if (intentText)
queueComposerTextAndLaunchApp(intentText);
}, [intentText, queueComposerTextAndLaunchApp]);
// URL -> download -> Composer
React.useEffect(() => {
if (intentURL) {
setIsDownloading(true);
// TEMP: until the Browse module is ready, just use the URL, verbatim
queueComposerTextAndLaunchApp(intentURL);
setIsDownloading(false);
/*callBrowseFetchSinglePage(intentURL)
.then(pageContent => {
if (pageContent)
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + pageContent + '\n```\n');
else
setErrorMessage('Could not read any data');
})
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
.finally(() => setIsDownloading(false));*/
}
}, [intentURL, queueComposerTextAndLaunchApp]);
return (
<Box sx={{
backgroundColor: 'background.level2',
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
}}>
{/* Logo with Circular Progress */}
<LogoProgress showProgress={isDownloading} />
{/* Title */}
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
</Typography>
{/* Possible Error */}
{errorMessage && <>
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
<Typography>{errorMessage}</Typography>
</Alert>
<Button
variant='solid' color='danger'
onClick={() => routerPush('/')}
endDecorator={<ArrowBackIcon />}
sx={{ mt: 2 }}
>
Cancel
</Button>
</>}
{/* URL under analysis */}
<Typography level='body-xs'>
{intentURL}
</Typography>
</Box>
);
}
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* Example URL: https://get.big-agi.com/launch?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
*/
export default function LaunchPage() {
return (
<AppLayout>
<AppShareTarget />
</AppLayout>
);
}
+18
View File
@@ -0,0 +1,18 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { AppChatLink } from '../../../src/apps/link/AppChatLink';
import { AppLayout } from '~/common/layout/AppLayout';
export default function ChatLinkPage() {
const { query } = useRouter();
const chatLinkId = query?.chatLinkId as string ?? '';
return (
<AppLayout suspendAutoModelsSetup>
<AppChatLink linkId={chatLinkId} />
</AppLayout>
);
}
+18
View File
@@ -0,0 +1,18 @@
import * as React from 'react';
import { AppNews } from '../src/apps/news/AppNews';
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
import { AppLayout } from '~/common/layout/AppLayout';
export default function NewsPage() {
// update the last seen news version
useMarkNewsAsSeen();
return (
<AppLayout suspendAutoModelsSetup>
<AppNews />
</AppLayout>
);
}
+14
View File
@@ -0,0 +1,14 @@
import * as React from 'react';
import { AppPersonas } from '../src/apps/personas/AppPersonas';
import { AppLayout } from '~/common/layout/AppLayout';
export default function PersonasPage() {
return (
<AppLayout>
<AppPersonas />
</AppLayout>
);
}
+63
View File
@@ -0,0 +1,63 @@
// Prisma is the ORM for server-side (API) access to the database
//
// This file defines the schema for the database.
// - make sure to run 'prisma generate' after making changes to this file
// - make sure to run 'prisma db push' to sync the remote database with the schema
//
// Database is optional: when the environment variables are not set, the database is not used at all,
// and the storage of data in Big-AGI is limited to client-side (browser) storage.
//
// The database is used for:
// - the 'sharing' function, to let users share the chats with each other
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("POSTGRES_PRISMA_URL") // uses connection pooling
directUrl = env("POSTGRES_URL_NON_POOLING") // uses a direct connection
}
//
// Storage of Linked Data
//
model LinkStorage {
id String @id @default(uuid())
ownerId String
visibility LinkStorageVisibility
dataType LinkStorageDataType
dataTitle String?
dataSize Int
data Json
upVotes Int @default(0)
downVotes Int @default(0)
flagsCount Int @default(0)
readCount Int @default(0)
writeCount Int @default(1)
// time-based expiration
expiresAt DateTime?
// manual deletion
deletionKey String
isDeleted Boolean @default(false)
deletedAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
enum LinkStorageVisibility {
PUBLIC
UNLISTED
PRIVATE
}
enum LinkStorageDataType {
CHAT_V1
}
+14 -4
View File
@@ -1,8 +1,8 @@
{
"name": "big-AGI",
"short_name": "AGI",
"theme_color": "#434356",
"background_color": "#B9B9C6",
"short_name": "big-AGI",
"theme_color": "#32383E",
"background_color": "#9FA6AD",
"description": "Personal AGI App",
"display": "standalone",
"start_url": "/",
@@ -23,5 +23,15 @@
"sizes": "1024x1024",
"type": "image/png"
}
]
],
"share_target": {
"action": "/launch",
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
"title": "title",
"text": "text",
"url": "url"
}
}
}
Binary file not shown.
Binary file not shown.
Binary file not shown.
+1 -1
View File
File diff suppressed because one or more lines are too long
+51
View File
@@ -0,0 +1,51 @@
import * as React from 'react';
import { useRouter } from 'next/router';
import { Container, Sheet } from '@mui/joy';
import { AppCallQueryParams } from '~/common/routes';
import { InlineError } from '~/common/components/InlineError';
import { CallUI } from './CallUI';
import { CallWizard } from './CallWizard';
export const APP_CALL_ENABLED = false;
export function AppCall() {
// external state
const { query } = useRouter();
// derived state
const { conversationId, personaId } = query as any as AppCallQueryParams;
const validInput = !!conversationId && !!personaId;
return (
<Sheet variant='solid' color='neutral' invertedColors sx={{
display: 'flex', flexDirection: 'column', justifyContent: 'center',
flexGrow: 1,
overflowY: 'auto',
minHeight: 96,
}}>
<Container maxWidth='sm' sx={{
display: 'flex', flexDirection: 'column',
alignItems: 'center',
minHeight: '80dvh', justifyContent: 'space-evenly',
gap: { xs: 2, md: 4 },
}}>
{!validInput && <InlineError error={`Something went wrong. ${JSON.stringify(query)}`} />}
{validInput && (
<CallWizard conversationId={conversationId}>
<CallUI conversationId={conversationId} personaId={personaId} />
</CallWizard>
)}
</Container>
</Sheet>
);
}
+392
View File
@@ -0,0 +1,392 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { useRouter } from 'next/router';
import { Box, Card, ListItemDecorator, MenuItem, Switch, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import CallEndIcon from '@mui/icons-material/CallEnd';
import CallIcon from '@mui/icons-material/Call';
import ChatOutlinedIcon from '@mui/icons-material/ChatOutlined';
import MicIcon from '@mui/icons-material/Mic';
import MicNoneIcon from '@mui/icons-material/MicNone';
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
import { useChatLLMDropdown } from '../chat/components/applayout/useLLMDropdown';
import { EXPERIMENTAL_speakTextStream } from '~/modules/elevenlabs/elevenlabs.client';
import { SystemPurposeId, SystemPurposes } from '../../data';
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
import { streamChat } from '~/modules/llms/transports/streamChat';
import { useVoiceDropdown } from '~/modules/elevenlabs/useVoiceDropdown';
import { Link } from '~/common/components/Link';
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
import { conversationTitle, createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { playSoundUrl, usePlaySoundUrl } from '~/common/util/audioUtils';
import { useLayoutPluggable } from '~/common/layout/store-applayout';
import { CallAvatar } from './components/CallAvatar';
import { CallButton } from './components/CallButton';
import { CallMessage } from './components/CallMessage';
import { CallStatus } from './components/CallStatus';
function CallMenuItems(props: {
pushToTalk: boolean,
setPushToTalk: (pushToTalk: boolean) => void,
override: boolean,
setOverride: (overridePersonaVoice: boolean) => void,
}) {
// external state
const { voicesDropdown } = useVoiceDropdown(false, !props.override);
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
return <>
<MenuItem onClick={handlePushToTalkToggle}>
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
Push to talk
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleChangeVoiceToggle}>
<ListItemDecorator><RecordVoiceOverIcon /></ListItemDecorator>
Change Voice
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem>
<ListItemDecorator>{' '}</ListItemDecorator>
{voicesDropdown}
</MenuItem>
<MenuItem component={Link} href='https://github.com/enricoros/big-agi/issues/175' target='_blank'>
<ListItemDecorator><ChatOutlinedIcon /></ListItemDecorator>
Voice Calls Feedback
</MenuItem>
</>;
}
export function CallUI(props: {
conversationId: string,
personaId: string,
}) {
// state
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
const [pushToTalk, setPushToTalk] = React.useState(true);
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
const responseAbortController = React.useRef<AbortController | null>(null);
// external state
const { push: routerPush } = useRouter();
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
const { chatTitle, messages } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
chatTitle: conversation ? conversationTitle(conversation) : 'no conversation',
messages: conversation ? conversation.messages : [],
};
}, shallow);
const persona = SystemPurposes[props.personaId as SystemPurposeId] ?? undefined;
const personaCallStarters = persona?.call?.starters ?? undefined;
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
const personaSystemMessage = persona?.systemMessage ?? undefined;
// hooks and speech
const [speechInterim, setSpeechInterim] = React.useState<SpeechResult | null>(null);
const onSpeechResultCallback = React.useCallback((result: SpeechResult) => {
setSpeechInterim(result.done ? null : { ...result });
if (result.done) {
const transcribed = result.transcript.trim();
if (transcribed.length >= 1)
setCallMessages(messages => [...messages, createDMessage('user', transcribed)]);
}
}, []);
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000);
// derived state
const isRinging = stage === 'ring';
const isConnected = stage === 'connected';
const isDeclined = stage === 'declined';
const isEnded = stage === 'ended';
/// Sounds
// pickup / hangup
React.useEffect(() => {
!isRinging && playSoundUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
}, [isRinging, isConnected]);
// ringtone
usePlaySoundUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
/// CONNECTED
const handleCallStop = () => {
stopRecording();
setStage('ended');
};
// [E] pickup -> seed message and call timer
// FIXME: Overriding the voice will reset the call - not a desired behavior
React.useEffect(() => {
if (!isConnected) return;
// show the call timer
setCallElapsedTime('00:00');
const start = Date.now();
const interval = setInterval(() => {
const elapsedSeconds = Math.floor((Date.now() - start) / 1000);
const minutes = Math.floor(elapsedSeconds / 60);
const seconds = elapsedSeconds % 60;
setCallElapsedTime(`${minutes < 10 ? '0' : ''}${minutes}:${seconds < 10 ? '0' : ''}${seconds}`);
}, 1000);
// seed the first message
const phoneMessages = personaCallStarters || ['Hello?', 'Hey!'];
const firstMessage = phoneMessages[Math.floor(Math.random() * phoneMessages.length)];
setCallMessages([createDMessage('assistant', firstMessage)]);
// fire/forget
void EXPERIMENTAL_speakTextStream(firstMessage, personaVoiceId);
return () => clearInterval(interval);
}, [isConnected, personaCallStarters, personaVoiceId]);
// [E] persona streaming response - upon new user message
React.useEffect(() => {
// only act when we have a new user message
if (!isConnected || callMessages.length < 1 || callMessages[callMessages.length - 1].role !== 'user')
return;
switch (callMessages[callMessages.length - 1].text) {
// do not respond
case 'Stop.':
return;
// command: close the call
case 'Goodbye.':
setStage('ended');
setTimeout(() => {
void routerPush('/');
}, 2000);
return;
// command: regenerate answer
case 'Retry.':
case 'Try again.':
setCallMessages(messages => messages.slice(0, messages.length - 2));
return;
// command: restart chat
case 'Restart.':
setCallMessages([]);
return;
}
// bail if no llm selected
if (!chatLLMId) return;
// temp fix: when the chat has no messages, only assume a single system message
const chatMessages: { role: VChatMessageIn['role'], text: string }[] = messages.length > 0
? messages
: personaSystemMessage
? [{ role: 'system', text: personaSystemMessage }]
: [];
// 'prompt' for a "telephone call"
// FIXME: can easily run ouf of tokens - if this gets traction, we'll fix it
const callPrompt: VChatMessageIn[] = [
{ role: 'system', content: 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.' },
...chatMessages.map(message => ({ role: message.role, content: message.text })),
{ role: 'system', content: 'You are now on the phone call related to the chat above. Respect your personality and answer with short, friendly and accurate thoughtful lines.' },
...callMessages.map(message => ({ role: message.role, content: message.text })),
];
// perform completion
responseAbortController.current = new AbortController();
let finalText = '';
let error: any | null = null;
streamChat(chatLLMId, callPrompt, responseAbortController.current.signal, (updatedMessage: Partial<DMessage>) => {
const text = updatedMessage.text?.trim();
if (text) {
finalText = text;
setPersonaTextInterim(text);
}
}).catch((err: DOMException) => {
if (err?.name !== 'AbortError')
error = err;
}).finally(() => {
setPersonaTextInterim(null);
setCallMessages(messages => [...messages, createDMessage('assistant', finalText + (error ? ` (ERROR: ${error.message || error.toString()})` : ''))]);
// fire/forget
void EXPERIMENTAL_speakTextStream(finalText, personaVoiceId);
});
return () => {
responseAbortController.current?.abort();
responseAbortController.current = null;
};
}, [isConnected, callMessages, chatLLMId, messages, personaVoiceId, personaSystemMessage, routerPush]);
// [E] Message interrupter
const abortTrigger = isConnected && isRecordingSpeech;
React.useEffect(() => {
if (abortTrigger && responseAbortController.current) {
responseAbortController.current.abort();
responseAbortController.current = null;
}
// TODO.. abort current speech
}, [abortTrigger]);
// [E] continuous speech recognition (reload)
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !isRecordingAudio;
React.useEffect(() => {
if (shouldStartRecording)
startRecording();
}, [shouldStartRecording, startRecording]);
// more derived state
const personaName = persona?.title ?? 'Unknown';
const isMicEnabled = isSpeechEnabled;
const isTTSEnabled = true;
const isEnabled = isMicEnabled && isTTSEnabled;
// pluggable UI
const menuItems = React.useMemo(() =>
<CallMenuItems
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
override={overridePersonaVoice} setOverride={setOverridePersonaVoice} />
, [overridePersonaVoice, pushToTalk],
);
useLayoutPluggable(chatLLMDropdown, null, menuItems);
return <>
<Typography
level='h1'
sx={{
fontSize: { xs: '2.5rem', md: '3rem' },
textAlign: 'center',
mx: 2,
}}
>
{isConnected ? personaName : 'Hello'}
</Typography>
<CallAvatar
symbol={persona?.symbol || '?'}
imageUrl={persona?.imageUri}
isRinging={isRinging}
onClick={() => setAvatarClickCount(avatarClickCount + 1)}
/>
<CallStatus
callerName={isConnected ? undefined : personaName}
statusText={isRinging ? 'is calling you' : isDeclined ? 'call declined' : isEnded ? 'call ended' : callElapsedTime}
regardingText={chatTitle}
micError={!isMicEnabled} speakError={!isTTSEnabled}
/>
{/* Live Transcript, w/ streaming messages, audio indication, etc. */}
{(isConnected || isEnded) && (
<Card variant='soft' sx={{
flexGrow: 1,
minHeight: '15dvh', maxHeight: '24dvh',
overflow: 'auto',
width: '100%',
borderRadius: 'lg',
flexDirection: 'column-reverse',
}}>
{/* Messages in reverse order, for auto-scroll from the bottom */}
<Box sx={{ display: 'flex', flexDirection: 'column-reverse', gap: 1 }}>
{/* Listening... */}
{isRecording && (
<CallMessage
text={<>{speechInterim?.transcript ? speechInterim.transcript + ' ' : ''}<i>{speechInterim?.interimTranscript}</i></>}
variant={isRecordingSpeech ? 'solid' : 'outlined'}
role='user'
/>
)}
{/* Persona streaming text... */}
{!!personaTextInterim && (
<CallMessage
text={personaTextInterim}
variant='solid' color='neutral'
role='assistant'
/>
)}
{/* Messages (last 6 messages, in reverse order) */}
{callMessages.slice(-6).reverse().map((message) =>
<CallMessage
key={message.id}
text={message.text}
variant={message.role === 'assistant' ? 'solid' : 'soft'} color='neutral'
role={message.role} />,
)}
</Box>
</Card>
)}
{/* Call Buttons */}
<Box sx={{ width: '100%', display: 'flex', justifyContent: 'space-evenly' }}>
{/* [ringing] Decline / Accept */}
{isRinging && <CallButton Icon={CallEndIcon} text='Decline' color='danger' onClick={() => setStage('declined')} />}
{isRinging && isEnabled && <CallButton Icon={CallIcon} text='Accept' color='success' variant='soft' onClick={() => setStage('connected')} />}
{/* [Calling] Hang / PTT (mute not enabled yet) */}
{isConnected && <CallButton Icon={CallEndIcon} text='Hang up' color='danger' onClick={handleCallStop} />}
{isConnected && (pushToTalk
? <CallButton Icon={MicIcon} onClick={toggleRecording}
text={isRecordingSpeech ? 'Listening...' : isRecording ? 'Listening' : 'Push To Talk'}
variant={isRecordingSpeech ? 'solid' : isRecording ? 'soft' : 'outlined'} />
: null
// <CallButton disabled={true} Icon={MicOffIcon} onClick={() => setMicMuted(muted => !muted)}
// text={micMuted ? 'Muted' : 'Mute'}
// color={micMuted ? 'warning' : undefined} variant={micMuted ? 'solid' : 'outlined'} />
)}
{/* [ended] Back / Call Again */}
{(isEnded || isDeclined) && <Link noLinkStyle href='/'><CallButton Icon={ArrowBackIcon} text='Back' variant='soft' /></Link>}
{(isEnded || isDeclined) && <CallButton Icon={CallIcon} text='Call Again' color='success' variant='soft' onClick={() => setStage('connected')} />}
</Box>
{/* DEBUG state */}
{avatarClickCount > 10 && (avatarClickCount % 2 === 0) && (
<Card variant='outlined' sx={{ maxHeight: '25dvh', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
Special commands: Stop, Retry, Try Again, Restart, Goodbye.
{JSON.stringify({ isSpeechEnabled, isRecordingAudio, speechInterim }, null, 2)}
</Card>
)}
{/*{isEnded && <Card variant='solid' size='lg' color='primary'>*/}
{/* <CardContent>*/}
{/* <Typography>*/}
{/* Please rate the call quality, 1 to 5 - Just a Joke*/}
{/* </Typography>*/}
{/* </CardContent>*/}
{/*</Card>}*/}
</>;
}
+211
View File
@@ -0,0 +1,211 @@
import * as React from 'react';
import { keyframes } from '@emotion/react';
import { Box, Button, Card, CardContent, IconButton, ListItemDecorator, Typography } from '@mui/joy';
import ArrowForwardIcon from '@mui/icons-material/ArrowForward';
import ChatIcon from '@mui/icons-material/Chat';
import CheckIcon from '@mui/icons-material/Check';
import CloseIcon from '@mui/icons-material/Close';
import MicIcon from '@mui/icons-material/Mic';
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
import WarningIcon from '@mui/icons-material/Warning';
import { navigateBack } from '~/common/routes';
import { openLayoutPreferences } from '~/common/layout/store-applayout';
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
import { useChatStore } from '~/common/state/store-chats';
import { useUICounter } from '~/common/state/store-ui';
const cssRainbowBackgroundKeyframes = keyframes`
100%, 0% {
background-color: rgb(128, 0, 0);
}
8% {
background-color: rgb(102, 51, 0);
}
16% {
background-color: rgb(64, 64, 0);
}
25% {
background-color: rgb(38, 76, 0);
}
33% {
background-color: rgb(0, 89, 0);
}
41% {
background-color: rgb(0, 76, 41);
}
50% {
background-color: rgb(0, 64, 64);
}
58% {
background-color: rgb(0, 51, 102);
}
66% {
background-color: rgb(0, 0, 128);
}
75% {
background-color: rgb(63, 0, 128);
}
83% {
background-color: rgb(76, 0, 76);
}
91% {
background-color: rgb(102, 0, 51);
}`;
function StatusCard(props: { icon: React.JSX.Element, hasIssue: boolean, text: string, button?: React.JSX.Element }) {
return (
<Card sx={{ width: '100%' }}>
<CardContent sx={{ flexDirection: 'row' }}>
<ListItemDecorator>
{props.icon}
</ListItemDecorator>
<Typography level='title-md' color={props.hasIssue ? 'warning' : undefined} sx={{ flexGrow: 1 }}>
{props.text}
{props.button}
</Typography>
<ListItemDecorator>
{props.hasIssue ? <WarningIcon color='warning' /> : <CheckIcon color='success' />}
</ListItemDecorator>
</CardContent>
</Card>
);
}
export function CallWizard(props: { strict?: boolean, conversationId: string, children: React.ReactNode }) {
// state
const [chatEmptyOverride, setChatEmptyOverride] = React.useState(false);
const [recognitionOverride, setRecognitionOverride] = React.useState(false);
// external state
const recognition = useCapabilityBrowserSpeechRecognition();
const synthesis = useCapabilityElevenLabs();
const chatIsEmpty = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return !(conversation?.messages?.length);
});
const { novel, touch } = useUICounter('call-wizard');
// derived state
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
const overriddenRecognition = recognitionOverride || recognition.mayWork;
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
const fatalGood = overriddenRecognition && synthesis.mayWork;
if (!novel && fatalGood)
return props.children;
const handleOverrideChatEmpty = () => setChatEmptyOverride(true);
const handleOverrideRecognition = () => setRecognitionOverride(true);
const handleConfigureElevenLabs = () => {
openLayoutPreferences(3);
};
const handleFinishButton = () => {
if (!allGood)
return navigateBack();
touch();
};
return <>
<Box sx={{ flexGrow: 0.5 }} />
<Typography level='title-lg' sx={{ fontSize: '3rem', fontWeight: 200, lineHeight: '1.5em', textAlign: 'center' }}>
Welcome to<br />
<Typography
component='span'
sx={{
backgroundColor: 'primary.solidActiveBg', mx: -0.5, px: 0.5,
animation: `${cssRainbowBackgroundKeyframes} 15s linear infinite`,
}}>
your first call
</Typography>
</Typography>
<Box sx={{ flexGrow: 0.5 }} />
<Typography level='body-lg'>
{/*Before you receive your first call, */}
Let&apos;s get you all set up.
</Typography>
{/* Chat Empty status */}
<StatusCard
icon={<ChatIcon />}
hasIssue={!overriddenEmptyChat}
text={overriddenEmptyChat ? 'Great! Your chat has messages.' : 'The chat is empty. Calls are effective when the caller has context.'}
button={overriddenEmptyChat ? undefined : (
<Button variant='outlined' onClick={handleOverrideChatEmpty} sx={{ mx: 1 }}>
Ignore
</Button>
)}
/>
{/* Add the speech to text feature status */}
<StatusCard
icon={<MicIcon />}
text={
((overriddenRecognition && !recognition.warnings.length) ? 'Speech recognition should be good to go.' : 'There might be a speech recognition issue.')
+ (recognition.isApiAvailable ? '' : ' Your browser does not support the speech recognition API.')
+ (recognition.isDeviceNotSupported ? ' Your device does not provide this feature.' : '')
+ (recognition.warnings.length ? ' ⚠️ ' + recognition.warnings.join(' · ') : '')
}
button={overriddenRecognition ? undefined : (
<Button variant='outlined' onClick={handleOverrideRecognition} sx={{ mx: 1 }}>
Ignore
</Button>
)}
hasIssue={!overriddenRecognition}
/>
{/* Text to Speech status */}
<StatusCard
icon={<RecordVoiceOverIcon />}
text={
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
}
button={synthesis.mayWork ? undefined : (
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
Configure
</Button>
)}
hasIssue={!synthesis.mayWork}
/>
{/*<Typography>*/}
{/* 1. To start a call, click the "Accept" button when you receive an incoming call.*/}
{/* 2. If your mic is enabled, you'll see a "Push to Talk" button. Press and hold it to speak, then release it to stop speaking.*/}
{/* 3. If your mic is disabled, you can still type your messages in the chat and the assistant will respond.*/}
{/* 4. During the call, you can control the voice synthesis settings from the menu in the top right corner.*/}
{/* 5. To end the call, click the "Hang up" button.*/}
{/*</Typography>*/}
<Box sx={{ flexGrow: 2 }} />
{/* bottom: text & button */}
<Box sx={{ display: 'flex', justifyContent: 'space-around', alignItems: 'center', width: '100%', gap: 2, px: 0.5 }}>
<Typography level='body-lg'>
{allGood ? 'Ready, Set, Call' : 'Please resolve the issues above before proceeding with the call'}
</Typography>
<IconButton
size='lg' variant={allGood ? 'soft' : 'solid'} color={allGood ? 'success' : 'danger'}
onClick={handleFinishButton} sx={{ borderRadius: '50px' }}
>
{allGood ? <ArrowForwardIcon sx={{ fontSize: '1.5em' }} /> : <CloseIcon sx={{ fontSize: '1.5em' }} />}
</IconButton>
</Box>
<Box sx={{ flexGrow: 0.5 }} />
</>;
}
+48
View File
@@ -0,0 +1,48 @@
import * as React from 'react';
import { keyframes } from '@emotion/react';
import { Avatar, Box } from '@mui/joy';
const cssScaleKeyframes = keyframes`
0% {
transform: scale(1);
}
50% {
transform: scale(1.2);
}
100% {
transform: scale(1);
}`;
export function CallAvatar(props: { symbol: string, imageUrl?: string, isRinging: boolean, onClick: () => void }) {
return (
<Avatar
variant='soft' color='neutral'
onClick={props.onClick}
src={props.imageUrl}
sx={{
'--Avatar-size': { xs: '160px', md: '200px' },
'--variant-borderWidth': '4px',
boxShadow: !props.imageUrl ? 'md' : null,
fontSize: { xs: '100px', md: '120px' },
}}
>
{/* As fallback, show the large Persona Symbol */}
{!props.imageUrl && (
<Box
sx={{
...(props.isRinging
? { animation: `${cssScaleKeyframes} 1.4s ease-in-out infinite` }
: {}),
}}
>
{props.symbol}
</Box>
)}
</Avatar>
);
}
+43
View File
@@ -0,0 +1,43 @@
import * as React from 'react';
import { Box, ColorPaletteProp, IconButton, Typography, VariantProp } from '@mui/joy';
/**
* Large button to operate the call, e.g.
* --------
* | 🎤 |
* | Mute |
* --------
*/
export function CallButton(props: {
Icon: React.FC, text: string,
variant?: VariantProp, color?: ColorPaletteProp, disabled?: boolean,
onClick?: () => void,
}) {
return (
<Box
onClick={() => !props.disabled && props.onClick?.()}
sx={{
display: 'flex', flexDirection: 'column', alignItems: 'center',
gap: { xs: 1, md: 2 },
}}
>
<IconButton
disabled={props.disabled} variant={props.variant || 'solid'} color={props.color}
sx={{
'--IconButton-size': { xs: '4.2rem', md: '5rem' },
borderRadius: '50%',
// boxShadow: 'lg',
}}>
<props.Icon />
</IconButton>
<Typography level='title-md' variant={props.disabled ? 'soft' : undefined}>
{props.text}
</Typography>
</Box>
);
}
+33
View File
@@ -0,0 +1,33 @@
import * as React from 'react';
import { Chip, ColorPaletteProp, VariantProp } from '@mui/joy';
import { SxProps } from '@mui/system';
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
export function CallMessage(props: {
text?: string | React.JSX.Element,
variant?: VariantProp, color?: ColorPaletteProp,
role: VChatMessageIn['role'],
sx?: SxProps,
}) {
return (
<Chip
color={props.color} variant={props.variant}
sx={{
alignSelf: props.role === 'user' ? 'end' : 'start',
whiteSpace: 'break-spaces',
borderRadius: 'lg',
mt: 'auto',
// boxShadow: 'md',
py: 1,
...(props.sx || {}),
}}
>
{props.text}
</Chip>
);
}
+47
View File
@@ -0,0 +1,47 @@
import * as React from 'react';
import { Box, Typography } from '@mui/joy';
import { InlineError } from '~/common/components/InlineError';
/**
* A status message for the call, such as:
*
* $Name
* "Connecting..." or "Call ended",
* re: $Regarding
*/
export function CallStatus(props: {
callerName?: string,
statusText: string,
regardingText?: string,
micError: boolean, speakError: boolean,
// llmComponent?: React.JSX.Element,
}) {
return (
<Box sx={{ display: 'flex', flexDirection: 'column' }}>
{!!props.callerName && <Typography level='h3' sx={{ textAlign: 'center' }}>
<b>{props.callerName}</b>
</Typography>}
{/*{props.llmComponent}*/}
<Typography level='body-md' sx={{ textAlign: 'center' }}>
{props.statusText}
</Typography>
{!!props.regardingText && <Typography level='body-md' sx={{ textAlign: 'center', mt: 0 }}>
re: {props.regardingText}
</Typography>}
{props.micError && <InlineError
severity='danger' error='But this browser does not support speech recognition... 🤦‍♀️ - Try Chrome on Windows?' />}
{props.speakError && <InlineError
severity='danger' error='And text-to-speech is not configured... 🤦‍♀️ - Configure it in Settings?' />}
</Box>
);
}
+272
View File
@@ -0,0 +1,272 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
import { CmdRunReact } from '~/modules/aifn/react/react';
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
import { useModelsStore } from '~/modules/llms/store-llms';
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { useGlobalShortcut } from '~/common/components/useGlobalShortcut';
import { useLayoutPluggable } from '~/common/layout/store-applayout';
import { ChatDrawerItems } from './components/applayout/ChatDrawerItems';
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
import { ChatMessageList } from './components/ChatMessageList';
import { ChatModeId } from './components/composer/store-composer';
import { CmdAddRoleMessage, extractCommands } from './commands';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/Ephemerals';
import { TradeConfig, TradeModal } from './trade/TradeModal';
import { runAssistantUpdatingState } from './editors/chat-stream';
import { runImageGenerationUpdatingState } from './editors/image-generate';
import { runReActUpdatingState } from './editors/react-tangent';
const SPECIAL_ID_ALL_CHATS = 'all-chats';
export function AppChat() {
// state
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
const [flattenConversationId, setFlattenConversationId] = React.useState<string | null>(null);
// external state
const { activeConversationId, isConversationEmpty, hasAnyContent, duplicateConversation, deleteAllConversations, setMessages, systemPurposeId, setAutoTitle } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
const isConversationEmpty = conversation ? !conversation.messages.length : true;
const hasAnyContent = state.conversations.length > 1 || !isConversationEmpty;
return {
activeConversationId: state.activeConversationId,
isConversationEmpty,
hasAnyContent,
duplicateConversation: state.duplicateConversation,
deleteAllConversations: state.deleteAllConversations,
setMessages: state.setMessages,
systemPurposeId: conversation?.systemPurposeId ?? null,
setAutoTitle: state.setAutoTitle,
};
}, shallow);
const handleExecuteConversation = async (chatModeId: ChatModeId, conversationId: string, history: DMessage[]) => {
const { chatLLMId } = useModelsStore.getState();
if (!chatModeId || !conversationId || !chatLLMId) return;
// "/command ...": overrides the chat mode
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const [command, prompt] = [pieces[0].value, pieces[1].value];
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatLLMId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatLLMId);
}
if (CmdAddRoleMessage.includes(command)) {
lastMessage.role = command.startsWith('/s') ? 'system' : command.startsWith('/a') ? 'assistant' : 'user';
lastMessage.sender = 'Bot';
lastMessage.text = prompt;
return setMessages(conversationId, history);
}
}
}
// synchronous long-duration tasks, which update the state as they go
if (chatLLMId && systemPurposeId) {
switch (chatModeId) {
case 'immediate':
case 'immediate-follow-up':
return await runAssistantUpdatingState(conversationId, history, chatLLMId, systemPurposeId, true, chatModeId === 'immediate-follow-up');
case 'write-user':
return setMessages(conversationId, history);
case 'react':
if (!lastMessage?.text)
break;
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatLLMId);
case 'draw-imagine':
case 'draw-imagine-plus':
if (!lastMessage?.text)
break;
const imagePrompt = chatModeId == 'draw-imagine-plus'
? await imaginePromptFromText(lastMessage.text) || 'An error sign.'
: lastMessage.text;
setMessages(conversationId, history.map(message => message.id !== lastMessage.id ? message : {
...message,
text: `${CmdRunProdia[0]} ${imagePrompt}`,
}));
return await runImageGenerationUpdatingState(conversationId, imagePrompt);
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
console.log('handleExecuteConversation: issue running', chatModeId, conversationId, lastMessage);
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleExecuteChatHistory = async (conversationId: string, history: DMessage[]) =>
await handleExecuteConversation('immediate', conversationId, history);
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation('draw-imagine-plus', conversationId, [...conversation.messages, createDMessage('user', messageText)]);
};
const handleComposerNewMessage = async (chatModeId: ChatModeId, conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(chatModeId, conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleRegenerateAssistant = async () => {
const conversation = activeConversationId ? _findConversation(activeConversationId) : null;
if (conversation?.messages?.length) {
const lastMessage = conversation.messages[conversation.messages.length - 1];
if (lastMessage.role === 'assistant') {
const newMessages = [...conversation.messages];
newMessages.pop();
return await handleExecuteConversation('immediate', conversation.id, newMessages);
}
}
};
useGlobalShortcut('r', true, true, handleRegenerateAssistant);
const handleClearConversation = (conversationId: string) => setClearConfirmationId(conversationId);
const handleConfirmedClearConversation = () => {
if (clearConfirmationId) {
setMessages(clearConfirmationId, []);
setAutoTitle(clearConfirmationId, '');
setClearConfirmationId(null);
}
};
const handleDeleteAllConversations = () => setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
const handleConfirmedDeleteConversation = () => {
if (deleteConfirmationId) {
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
deleteAllConversations();
}// else
// deleteConversation(deleteConfirmationId);
setDeleteConfirmationId(null);
}
};
const handleImportConversation = () => setTradeConfig({ dir: 'import' });
const handleExportConversation = (conversationId: string | null) => setTradeConfig({ dir: 'export', conversationId });
const handleFlattenConversation = (conversationId: string) => setFlattenConversationId(conversationId);
// Pluggable ApplicationBar components
const centerItems = React.useMemo(() =>
<ChatDropdowns conversationId={activeConversationId} />,
[activeConversationId],
);
const drawerItems = React.useMemo(() =>
<ChatDrawerItems
conversationId={activeConversationId}
onImportConversation={handleImportConversation}
onDeleteAllConversations={handleDeleteAllConversations}
/>,
[activeConversationId],
);
const menuItems = React.useMemo(() =>
<ChatMenuItems
conversationId={activeConversationId} isConversationEmpty={isConversationEmpty} hasConversations={hasAnyContent}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onClearConversation={handleClearConversation}
onDuplicateConversation={duplicateConversation}
onExportConversation={handleExportConversation}
onFlattenConversation={handleFlattenConversation}
/>,
[activeConversationId, duplicateConversation, hasAnyContent, isConversationEmpty, isMessageSelectionMode],
);
useLayoutPluggable(centerItems, drawerItems, menuItems);
return <>
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteChatHistory={handleExecuteChatHistory}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
backgroundColor: 'background.level1',
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
isDeveloperMode={systemPurposeId === 'Developer'}
onNewMessage={handleComposerNewMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
backgroundColor: 'background.surface',
borderTop: `1px solid`,
borderTopColor: 'divider',
p: { xs: 1, md: 2 },
}} />
{/* Import / Export */}
{!!tradeConfig && <TradeModal config={tradeConfig} onClose={() => setTradeConfig(null)} />}
{/* Flatten */}
{!!flattenConversationId && <FlattenerModal conversationId={flattenConversationId} onClose={() => setFlattenConversationId(null)} />}
{/* [confirmation] Reset Conversation */}
{!!clearConfirmationId && <ConfirmationModal
open onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
/>}
{/* [confirmation] Delete All */}
{!!deleteConfirmationId && <ConfirmationModal
open onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
: 'Are you sure you want to delete this conversation?'}
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Yes, delete all'
: 'Delete conversation'}
/>}
</>;
}
-193
View File
@@ -1,193 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
import { CmdRunReact } from '@/modules/search/search.client';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { PublishedModal } from '@/modules/pastegg/PublishedModal';
import { callPublish } from '@/modules/pastegg/pastegg.client';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { Link } from '@/common/components/Link';
import { conversationToMarkdown } from '@/common/util/conversationToMarkdown';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { extractCommands } from '@/common/util/extractCommands';
import { useComposerStore } from '@/common/state/store-composer';
import { useSettingsStore } from '@/common/state/store-settings';
import { ApplicationBar } from './components/appbar/ApplicationBar';
import { ChatMessageList } from './components/ChatMessageList';
import { Composer } from './components/composer/Composer';
import { Ephemerals } from './components/ephemerals/Ephemerals';
import { imaginePromptFromText } from './util/ai-functions';
import { runAssistantUpdatingState } from './util/agi-immediate';
import { runImageGenerationUpdatingState } from './util/imagine';
import { runReActUpdatingState } from './util/agi-react';
export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
// state
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
const [publishResponse, setPublishResponse] = React.useState<PasteGG.API.Publish.Response | null>(null);
// external state
const theme = useTheme();
const { sendModeId } = useComposerStore(state => ({ sendModeId: state.sendModeId }), shallow);
const { activeConversationId, setMessages, chatModelId, systemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
return {
activeConversationId: state.activeConversationId,
setMessages: state.setMessages,
chatModelId: conversation?.chatModelId ?? null,
systemPurposeId: conversation?.systemPurposeId ?? null,
};
}, shallow);
const handleExecuteConversation = async (conversationId: string, history: DMessage[]) => {
if (!conversationId) return;
// Command - last user message is a cmd
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
if (lastMessage?.role === 'user') {
const pieces = extractCommands(lastMessage.text);
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
const command = pieces[0].value;
const prompt = pieces[1].value;
if (CmdRunProdia.includes(command)) {
setMessages(conversationId, history);
return await runImageGenerationUpdatingState(conversationId, prompt);
}
if (CmdRunReact.includes(command) && chatModelId) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, prompt, chatModelId);
}
// if (CmdRunSearch.includes(command))
// return await run...
}
}
// synchronous long-duration tasks, which update the state as they go
if (sendModeId && chatModelId && systemPurposeId) {
switch (sendModeId) {
case 'immediate':
return await runAssistantUpdatingState(conversationId, history, chatModelId, systemPurposeId);
case 'react':
if (lastMessage?.text) {
setMessages(conversationId, history);
return await runReActUpdatingState(conversationId, lastMessage.text, chatModelId);
}
}
}
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
setMessages(conversationId, history);
};
const _findConversation = (conversationId: string) =>
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
const handleSendUserMessage = async (conversationId: string, userText: string) => {
const conversation = _findConversation(conversationId);
if (conversation)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', userText)]);
};
const handleImagineFromText = async (conversationId: string, messageText: string) => {
const conversation = _findConversation(conversationId);
if (conversation && chatModelId) {
const prompt = await imaginePromptFromText(messageText, chatModelId);
if (prompt)
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
}
};
const handlePublishConversation = (conversationId: string) => setPublishConversationId(conversationId);
const handleConfirmedPublishConversation = async () => {
if (publishConversationId) {
const conversation = _findConversation(publishConversationId);
setPublishConversationId(null);
if (conversation) {
const markdownContent = conversationToMarkdown(conversation, !useSettingsStore.getState().showSystemMessages);
const publishResponse = await callPublish('paste.gg', markdownContent);
setPublishResponse(publishResponse);
}
}
};
return (
<Box
sx={{
display: 'flex', flexDirection: 'column', height: '100vh',
...(props.sx || {}),
}}>
<ApplicationBar
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onPublishConversation={handlePublishConversation}
onShowSettings={props.onShowSettings}
sx={{
zIndex: 20, // position: 'sticky', top: 0,
// ...(process.env.NODE_ENV === 'development' ? { background: theme.vars.palette.danger.solidBg } : {}),
}} />
<ChatMessageList
conversationId={activeConversationId}
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
onExecuteConversation={handleExecuteConversation}
onImagineFromText={handleImagineFromText}
sx={{
flexGrow: 1,
background: theme.vars.palette.background.level2,
overflowY: 'auto', // overflowY: 'hidden'
minHeight: 96,
}} />
<Ephemerals
conversationId={activeConversationId}
sx={{
// flexGrow: 0.1,
flexShrink: 0.5,
overflowY: 'auto',
minHeight: 64,
}} />
<Composer
conversationId={activeConversationId} messageId={null}
isDeveloperMode={systemPurposeId === 'Developer'}
onSendMessage={handleSendUserMessage}
sx={{
zIndex: 21, // position: 'sticky', bottom: 0,
background: theme.vars.palette.background.surface,
borderTop: `1px solid ${theme.vars.palette.divider}`,
p: { xs: 1, md: 2 },
}} />
{/* Confirmation for Publishing */}
<ConfirmationModal
open={!!publishConversationId} onClose={() => setPublishConversationId(null)} onPositive={handleConfirmedPublishConversation}
confirmationText={<>
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
Are you sure you want to proceed?
</>} positiveActionText={'Understood, upload to paste.gg'}
/>
{/* Show the Published details */}
{!!publishResponse && (
<PublishedModal open onClose={() => setPublishResponse(null)} response={publishResponse} />
)}
</Box>
);
}
@@ -1,7 +1,10 @@
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
import { CmdRunReact, CmdRunSearch } from '@/modules/search/search.client';
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
import { CmdRunReact } from '~/modules/aifn/react/react';
import { CmdRunSearch } from '~/modules/google/search.client';
export const commands = [...CmdRunProdia, ...CmdRunSearch, ...CmdRunReact];
export const CmdAddRoleMessage: string[] = ['/assistant', '/a', '/system', '/s'];
export const commands = [...CmdRunProdia, ...CmdRunReact, ...CmdRunSearch, ...CmdAddRoleMessage];
export interface SentencePiece {
type: 'text' | 'cmd';
@@ -13,7 +16,7 @@ export interface SentencePiece {
* Used by rendering functions, as well as input processing functions.
*/
export function extractCommands(input: string): SentencePiece[] {
const regexFromTags = commands.map(tag => `\\${tag}`).join('\\b|') + '\\b';
const regexFromTags = commands.map(tag => `^\\${tag} `).join('\\b|') + '\\b';
const pattern = new RegExp(regexFromTags, 'g');
const result: SentencePiece[] = [];
let lastIndex = 0;
@@ -22,7 +25,7 @@ export function extractCommands(input: string): SentencePiece[] {
while ((match = pattern.exec(input)) !== null) {
if (match.index !== lastIndex)
result.push({ type: 'text', value: input.substring(lastIndex, match.index) });
result.push({ type: 'cmd', value: match[0] });
result.push({ type: 'cmd', value: match[0].trim() });
lastIndex = pattern.lastIndex;
// Remove the space after the matched tag
+99 -25
View File
@@ -1,32 +1,76 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, List } from '@mui/joy';
import { Box, List, Sheet, Switch, Tooltip, Typography } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { useChatLLM } from '~/modules/llms/store-llms';
import { createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { ChatMessage } from './message/ChatMessage';
import { ChatMessageSelectable, MessagesSelectionHeader } from './message/ChatMessageSelectable';
import { PurposeSelector } from './PurposeSelector';
import { CleanerMessage, MessagesSelectionHeader } from './message/CleanerMessage';
import { PersonaSelector } from './persona-selector/PersonaSelector';
/**
* [Experimental] A panel with tools for the chat
*/
function ToolsPanel(props: { showDiff: boolean, setShowDiff: (showDiff: boolean) => void }) {
return (
<Sheet
variant='outlined' invertedColors
sx={{
position: 'fixed', top: 64, left: 8, zIndex: 101,
boxShadow: 'md', borderRadius: '100px',
p: 2,
display: 'flex', flexFlow: 'row wrap', alignItems: 'center', justifyContent: 'space-between', gap: 2,
}}
>
<Typography level='title-md'>
🪛
</Typography>
<Tooltip title='Highlight differences'>
<Switch
checked={props.showDiff} onChange={() => props.setShowDiff(!props.showDiff)}
startDecorator={<Typography level='title-md'>Diff</Typography>}
/>
</Tooltip>
</Sheet>
);
}
/**
* A list of ChatMessages
*/
export function ChatMessageList(props: { conversationId: string | null, isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void, onExecuteConversation: (conversationId: string, history: DMessage[]) => void, onImagineFromText: (conversationId: string, userText: string) => void, sx?: SxProps }) {
export function ChatMessageList(props: {
conversationId: string | null,
showTools?: boolean,
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
onExecuteChatHistory: (conversationId: string, history: DMessage[]) => void,
onImagineFromText: (conversationId: string, userText: string) => Promise<any>,
sx?: SxProps
}) {
// state
const [diffing, setDiffing] = React.useState<boolean>(false);
const [selectedMessages, setSelectedMessages] = React.useState<Set<string>>(new Set());
// external state
const showSystemMessages = useSettingsStore(state => state.showSystemMessages);
const { editMessage, deleteMessage } = useChatStore(state => ({ editMessage: state.editMessage, deleteMessage: state.deleteMessage }), shallow);
const messages = useChatStore(state => {
const { experimentalLabs, showSystemMessages } = useUIPreferencesStore(state => ({
experimentalLabs: state.experimentalLabs,
showSystemMessages: state.showSystemMessages,
}));
const { messages, editMessage, deleteMessage, historyTokenCount } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return conversation ? conversation.messages : [];
return {
messages: conversation ? conversation.messages : [],
editMessage: state.editMessage, deleteMessage: state.deleteMessage,
historyTokenCount: conversation ? conversation.tokenCount : 0,
};
}, shallow);
const { chatLLM } = useChatLLM();
const handleMessageDelete = (messageId: string) =>
props.conversationId && deleteMessage(props.conversationId, messageId);
@@ -34,16 +78,20 @@ export function ChatMessageList(props: { conversationId: string | null, isMessag
const handleMessageEdit = (messageId: string, newText: string) =>
props.conversationId && editMessage(props.conversationId, messageId, { text: newText }, true);
const handleImagineFromText = (messageText: string) =>
props.conversationId && props.onImagineFromText(props.conversationId, messageText);
const handleImagineFromText = (messageText: string): Promise<any> => {
if (props.conversationId)
return props.onImagineFromText(props.conversationId, messageText);
else
return Promise.reject('No conversation');
};
const handleRestartFromMessage = (messageId: string, offset: number) => {
const truncatedHistory = messages.slice(0, messages.findIndex(m => m.id === messageId) + offset + 1);
props.conversationId && props.onExecuteConversation(props.conversationId, truncatedHistory);
props.conversationId && props.onExecuteChatHistory(props.conversationId, truncatedHistory);
};
const handleRunExample = (text: string) =>
props.conversationId && props.onExecuteConversation(props.conversationId, [...messages, createDMessage('user', text)]);
props.conversationId && props.onExecuteChatHistory(props.conversationId, [...messages, createDMessage('user', text)]);
// hide system messages if the user chooses so
@@ -54,7 +102,7 @@ export function ChatMessageList(props: { conversationId: string | null, isMessag
if (!filteredMessages.length)
return props.conversationId ? (
<Box sx={props.sx || {}}>
<PurposeSelector conversationId={props.conversationId} runExample={handleRunExample} />
<PersonaSelector conversationId={props.conversationId} runExample={handleRunExample} />
</Box>
) : null;
@@ -68,14 +116,14 @@ export function ChatMessageList(props: { conversationId: string | null, isMessag
const handleSelectAllMessages = (selected: boolean) => {
const newSelected = new Set<string>();
if (selected)
for (let message of messages)
for (const message of messages)
newSelected.add(message.id);
setSelectedMessages(newSelected);
};
const handleDeleteSelectedMessages = () => {
if (props.conversationId)
for (let selectedMessage of selectedMessages)
for (const selectedMessage of selectedMessages)
deleteMessage(props.conversationId, selectedMessage);
setSelectedMessages(new Set());
};
@@ -86,18 +134,36 @@ export function ChatMessageList(props: { conversationId: string | null, isMessag
// '&::-webkit-scrollbar': {
// md: {
// width: 8,
// background: theme.vars.palette.neutral.plainHoverBg,
// background: theme.palette.neutral.plainHoverBg,
// },
// },
// '&::-webkit-scrollbar-thumb': {
// background: theme.vars.palette.neutral.solidBg,
// background: theme.palette.neutral.solidBg,
// borderRadius: 6,
// },
// '&::-webkit-scrollbar-thumb:hover': {
// background: theme.vars.palette.neutral.solidHoverBg,
// background: theme.palette.neutral.solidHoverBg,
// },
// };
// pass the diff text to most recent assistant message, once done
const showTextTools = !!props.showTools || experimentalLabs;
let diffMessage: DMessage | undefined;
let diffText: string | undefined;
if (diffing && showTextTools) {
const [msgB, msgA] = filteredMessages.filter(m => m.role === 'assistant');
if (!msgB.typing && msgB?.text && msgA?.text) {
const textA = msgA.text, textB = msgB.text;
const lenA = textA.length, lenB = textB.length;
if (lenA > 80 && lenB > 80 && lenA > lenB / 2 && lenB > lenA / 2) {
diffMessage = msgB;
diffText = textA;
}
}
}
return (
<List sx={{
p: 0, ...(props.sx || {}),
@@ -109,27 +175,35 @@ export function ChatMessageList(props: { conversationId: string | null, isMessag
{filteredMessages.map((message, idx) =>
props.isMessageSelectionMode ? (
<ChatMessageSelectable
<CleanerMessage
key={'sel-' + message.id} message={message}
isBottom={idx === 0}
isBottom={idx === 0} remainingTokens={(chatLLM ? chatLLM.contextTokens : 0) - historyTokenCount}
selected={selectedMessages.has(message.id)} onToggleSelected={handleToggleSelected}
/>
) : (
<ChatMessage
key={'msg-' + message.id} message={message}
key={'msg-' + message.id} message={message} diffText={message === diffMessage ? diffText : undefined}
isBottom={idx === 0}
onMessageDelete={() => handleMessageDelete(message.id)}
onMessageEdit={newText => handleMessageEdit(message.id, newText)}
onMessageRunFrom={(offset: number) => handleRestartFromMessage(message.id, offset)}
onImagine={handleImagineFromText} />
onImagine={handleImagineFromText}
/>
),
)}
{showTextTools && <ToolsPanel showDiff={diffing} setShowDiff={setDiffing} />}
{/* Header at the bottom because of 'row-reverse' */}
{props.isMessageSelectionMode && (
<MessagesSelectionHeader
hasSelected={selectedMessages.size > 0}
isBottom={filteredMessages.length === 0}
sumTokens={historyTokenCount}
onClose={() => props.setIsMessageSelectionMode(false)}
onSelectAll={handleSelectAllMessages}
onDeleteMessages={handleDeleteSelectedMessages}
@@ -5,7 +5,7 @@ import { Box, Grid, IconButton, Sheet, Stack, styled, Typography, useTheme } fro
import { SxProps } from '@mui/joy/styles/types';
import CloseIcon from '@mui/icons-material/Close';
import { DEphemeral, useChatStore } from '@/common/state/store-chats';
import { DEphemeral, useChatStore } from '~/common/state/store-chats';
const StateLine = styled(Typography)(({ theme }) => ({
@@ -42,7 +42,7 @@ function ListRenderer({ name, list }: { name: string, list: any[] }) {
return <StateLine><b>{name}</b>[{list.length ? list.length : ''}]: {list.length ? '(not displayed)' : 'empty'}</StateLine>;
}
function ObjectRenderer({ name, value }: { name: string, value: object }) {
function ObjectRenderer({ name }: { name: string }) {
return <StateLine><b>{name}</b>: <i>object not displayed</i></StateLine>;
}
@@ -55,19 +55,19 @@ function StateRenderer(props: { state: object }) {
return (
<Stack>
<Typography level='body2' sx={{ mb: 1 }}>
<Typography level='body-sm' sx={{ mb: 1 }}>
Internal State
</Typography>
<Sheet>
{!entries && <Typography level='body2'>No state variables</Typography>}
{!entries && <Typography level='body-sm'>No state variables</Typography>}
{entries.map(([key, value]) =>
isPrimitive(value)
? <PrimitiveRender key={'state-' + key} name={key} value={value} />
: Array.isArray(value)
? <ListRenderer key={'state-' + key} name={key} list={value} />
: typeof value === 'object'
? <ObjectRenderer key={'state-' + key} name={key} value={value} />
: <Typography key={'state-' + key} level='body2'>{key}: {value}</Typography>,
? <ObjectRenderer key={'state-' + key} name={key} />
: <Typography key={'state-' + key} level='body-sm'>{key}: {value}</Typography>,
)}
</Sheet>
</Stack>
@@ -81,7 +81,7 @@ function EphemeralItem({ conversationId, ephemeral }: { conversationId: string,
sx={{
p: { xs: 1, md: 2 },
position: 'relative',
// border: (i < ephemerals.length - 1) ? `2px solid ${theme.vars.palette.divider}` : undefined,
// border: (i < ephemerals.length - 1) ? `2px solid ${theme.palette.divider}` : undefined,
'&:hover > button': { opacity: 1 },
}}>
@@ -138,10 +138,9 @@ export function Ephemerals(props: { conversationId: string | null, sx?: SxProps
return (
<Sheet
variant='soft' color='info' invertedColors
variant='soft' color='success' invertedColors
sx={{
border: `4px dashed ${theme.vars.palette.divider}`,
boxShadow: `inset 0 0 12px ${theme.vars.palette.background.popup}`,
border: `4px dashed ${theme.palette.divider}`,
...(props.sx || {}),
}}>
@@ -1,47 +0,0 @@
import * as React from 'react';
import { Option, Select } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
/**
* A Select component that blends-in nicely (cleaner, easier to the eyes)
*/
export const AppBarDropdown = <TValue extends string>(props: { value: TValue, items: Record<string, { title: string }>, onChange: (event: any, value: TValue | null) => void, sx?: SxProps }) =>
<Select
variant='solid' color='neutral' size='md'
value={props.value} onChange={props.onChange}
indicator={<KeyboardArrowDownIcon />}
slotProps={{
root: {
sx: {
backgroundColor: 'transparent',
},
},
listbox: {
variant: 'plain', color: 'neutral', size: 'lg',
disablePortal: false,
sx: {
minWidth: 160,
},
},
indicator: {
sx: {
opacity: 0.5,
},
},
}}
sx={{
mx: 0,
/*fontFamily: theme.vars.fontFamily.code,*/
fontWeight: 500,
...(props.sx || {}),
}}
>
{Object.keys(props.items).map((key: string) => (
<Option key={key} value={key}>
{props.items[key].title}
</Option>
))}
</Select>;
@@ -1,31 +0,0 @@
import * as React from 'react';
import { AppBarDropdown } from './AppBarDropdown';
import { SxProps } from '@mui/joy/styles/types';
/**
* Wrapper for AppBarDropdown that adds a symbol in front of the title
*/
type Props<TValue extends string> = {
value: TValue;
items: Record<string, { title: string, symbol: string }>;
onChange: (event: any, value: TValue | null) => void;
sx?: SxProps;
};
export const AppBarDropdownWithSymbol = <TValue extends string>({ value, items, onChange, sx }: Props<TValue>) => {
const itemsWithSymbol = Object.keys(items).map((key: string) => ({
key,
value: (!!items[key].symbol ? items[key].symbol + ' ' : '') + items[key].title,
}));
return (
<AppBarDropdown
value={value}
items={Object.fromEntries(itemsWithSymbol.map(({ key, value }) => [key, { title: value }]))}
onChange={onChange}
sx={sx}
/>
);
};
@@ -1,335 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Badge, Box, Button, IconButton, ListDivider, ListItem, ListItemDecorator, Menu, MenuItem, Sheet, Stack, SvgIcon, Switch, useColorScheme, useTheme } from '@mui/joy';
import { SxProps } from '@mui/joy/styles/types';
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
import ClearIcon from '@mui/icons-material/Clear';
import DarkModeIcon from '@mui/icons-material/DarkMode';
import ExitToAppIcon from '@mui/icons-material/ExitToApp';
import FileDownloadIcon from '@mui/icons-material/FileDownload';
import GitHubIcon from '@mui/icons-material/GitHub';
import MenuIcon from '@mui/icons-material/Menu';
import MoreVertIcon from '@mui/icons-material/MoreVert';
import SettingsOutlinedIcon from '@mui/icons-material/SettingsOutlined';
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
import { Brand } from '@/common/brand';
import { ChatModelId, ChatModels, SystemPurposeId, SystemPurposes } from '../../../../data';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { Link } from '@/common/components/Link';
import { cssRainbowColorKeyframes } from '@/common/theme';
import { downloadConversationJson, restoreConversationFromJson, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { AppBarDropdown } from './AppBarDropdown';
import { AppBarDropdownWithSymbol } from './AppBarDropdownWithSymbol';
import { ImportedModal, ImportedOutcome } from './ImportedModal';
import { PagesMenu } from './PagesMenu';
// missing from MUI, using Tabler for Discord
function DiscordIcon(props: { sx?: SxProps }) {
return <SvgIcon viewBox='0 0 24 24' width='24' height='24' stroke='currentColor' fill='none' stroke-linecap='round' stroke-linejoin='round' {...props}>
<path stroke='none' d='M0 0h24v24H0z' fill='none'></path>
<path d='M14.983 3l.123 .006c2.014 .214 3.527 .672 4.966 1.673a1 1 0 0 1 .371 .488c1.876 5.315 2.373 9.987 1.451 12.28c-1.003 2.005 -2.606 3.553 -4.394 3.553c-.94 0 -2.257 -1.596 -2.777 -2.969l-.02 .005c.838 -.131 1.69 -.323 2.572 -.574a1 1 0 1 0 -.55 -1.924c-3.32 .95 -6.13 .95 -9.45 0a1 1 0 0 0 -.55 1.924c.725 .207 1.431 .373 2.126 .499l.444 .074c-.477 1.37 -1.695 2.965 -2.627 2.965c-1.743 0 -3.276 -1.555 -4.267 -3.644c-.841 -2.206 -.369 -6.868 1.414 -12.174a1 1 0 0 1 .358 -.49c1.392 -1.016 2.807 -1.475 4.717 -1.685a1 1 0 0 1 .938 .435l.063 .107l.652 1.288l.16 -.019c.877 -.09 1.718 -.09 2.595 0l.158 .019l.65 -1.287a1 1 0 0 1 .754 -.54l.123 -.01zm-5.983 6a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15zm6 0a2 2 0 0 0 -1.977 1.697l-.018 .154l-.005 .149l.005 .15a2 2 0 1 0 1.995 -2.15z' strokeWidth='0' fill='currentColor'></path>
</SvgIcon>;
}
function BringTheLove(props: { text: string, link: string, icon: JSX.Element }) {
const [loved, setLoved] = React.useState(false);
const icon = loved ? '❤️' : props.icon; // '❤️' : '🤍';
return <Button
color='neutral'
component={Link} noLinkStyle href={props.link} target='_blank'
onClick={() => setLoved(true)}
endDecorator={icon}
sx={{
background: 'transparent',
// '&:hover': { background: props.theme.palette.neutral.solidBg },
'&:hover': { animation: `${cssRainbowColorKeyframes} 5s linear infinite` },
}}>
{props.text}
</Button>;
}
function SupportItem() {
const theme = useTheme();
const fadedColor = theme.palette.neutral.plainDisabledColor;
const iconColor = '';
return (
<ListItem
variant='solid' color='neutral'
sx={{
mb: -1, // absorb the bottom margin of the list
mt: 1,
// background: theme.palette.neutral.solidActiveBg,
display: 'flex', flexDirection: 'row', gap: 1,
justifyContent: 'space-between',
}}>
<Box
sx={{
mx: { xs: 1, sm: 2 },
fontWeight: 600,
color: fadedColor,
}}>
{Brand.Meta.SiteName}
</Box>
<BringTheLove text='Discord' icon={<DiscordIcon sx={{ color: iconColor }} />} link={Brand.URIs.SupportInvite} />
<BringTheLove text='GitHub' icon={<GitHubIcon sx={{ color: iconColor }} />} link={Brand.URIs.OpenRepo} />
</ListItem>
);
}
/**
* The top bar of the application, with the model and purpose selection, and menu/settings icons
*/
export function ApplicationBar(props: {
conversationId: string | null;
isMessageSelectionMode: boolean; setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void;
onPublishConversation: (conversationId: string) => void;
onShowSettings: () => void;
sx?: SxProps
}) {
// state
const [actionsMenuAnchor, setActionsMenuAnchor] = React.useState<HTMLElement | null>(null);
const [pagesMenuAnchor, setPagesMenuAnchor] = React.useState<HTMLElement | null>(null);
const [clearConfirmationId, setClearConfirmationId] = React.useState<string | null>(null);
const [conversationImportOutcome, setConversationImportOutcome] = React.useState<ImportedOutcome | null>(null);
const conversationFileInputRef = React.useRef<HTMLInputElement>(null);
// center buttons
const handleChatModelChange = (event: any, value: ChatModelId | null) =>
value && props.conversationId && setChatModelId(props.conversationId, value);
const handleSystemPurposeChange = (event: any, value: SystemPurposeId | null) =>
value && props.conversationId && setSystemPurposeId(props.conversationId, value);
// quick actions
const closeActionsMenu = () => setActionsMenuAnchor(null);
const { mode: colorMode, setMode: setColorMode } = useColorScheme();
const { showSystemMessages, setShowSystemMessages, zenMode } = useSettingsStore(state => ({
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
zenMode: state.zenMode,
}), shallow);
const handleDarkModeToggle = () => setColorMode(colorMode === 'dark' ? 'light' : 'dark');
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
const handleActionShowSettings = (e: React.MouseEvent) => {
e.stopPropagation();
props.onShowSettings();
closeActionsMenu();
};
// conversation actions
const { conversationsCount, isConversationEmpty, chatModelId, systemPurposeId, setMessages, setChatModelId, setSystemPurposeId, setAutoTitle, importConversation } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return {
conversationsCount: state.conversations.length,
isConversationEmpty: conversation ? !conversation.messages.length : true,
chatModelId: conversation ? conversation.chatModelId : null,
systemPurposeId: conversation ? conversation.systemPurposeId : null,
setMessages: state.setMessages,
setChatModelId: state.setChatModelId,
setSystemPurposeId: state.setSystemPurposeId,
setAutoTitle: state.setAutoTitle,
importConversation: state.importConversation,
};
}, shallow);
const handleConversationPublish = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
props.conversationId && props.onPublishConversation(props.conversationId);
};
const handleConversationDownload = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
const conversation = useChatStore.getState().conversations.find(conversation => conversation.id === props.conversationId);
if (conversation)
downloadConversationJson(conversation);
};
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
e.stopPropagation();
closeActionsMenu();
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
};
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
setClearConfirmationId(props.conversationId);
};
const handleConfirmedClearConversation = () => {
if (clearConfirmationId) {
setMessages(clearConfirmationId, []);
setAutoTitle(clearConfirmationId, '');
setClearConfirmationId(null);
}
};
// pages actions
const closePagesMenu = () => setPagesMenuAnchor(null);
const handleConversationUpload = () => conversationFileInputRef.current?.click();
const handleLoadConversations = async (e: React.ChangeEvent<HTMLInputElement>) => {
const files = e.target?.files;
if (!files || files.length < 1)
return;
// try to restore conversations from the selected files
const outcomes: ImportedOutcome = { conversations: [] };
for (const file of files) {
const fileName = file.name || 'unknown file';
try {
const conversation = restoreConversationFromJson(await file.text());
if (conversation) {
importConversation(conversation);
outcomes.conversations.push({ fileName, success: true, conversationId: conversation.id });
} else {
const fileDesc = `(${file.type}) ${file.size.toLocaleString()} bytes`;
outcomes.conversations.push({ fileName, success: false, error: `Invalid file: ${fileDesc}` });
}
} catch (error) {
console.error(error);
outcomes.conversations.push({ fileName, success: false, error: (error as any)?.message || error?.toString() || 'unknown error' });
}
}
// show the outcome of the import
setConversationImportOutcome(outcomes);
// this is needed to allow the same file to be selected again
e.target.value = '';
};
return <>
{/* Top Bar with 2 icons and Model/Purpose selectors */}
<Sheet
variant='solid' color='neutral' invertedColors
sx={{
p: 1,
display: 'flex', flexDirection: 'row', justifyContent: 'space-between',
...(props.sx || {}),
}}>
<IconButton variant='plain' onClick={event => setPagesMenuAnchor(event.currentTarget)}>
<Badge variant='solid' size='sm' badgeContent={conversationsCount < 2 ? 0 : conversationsCount}>
<MenuIcon />
</Badge>
</IconButton>
<Stack direction='row' sx={{ my: 'auto' }}>
{chatModelId && <AppBarDropdown items={ChatModels} value={chatModelId} onChange={handleChatModelChange} />}
{systemPurposeId && (zenMode === 'cleaner'
? <AppBarDropdown items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
: <AppBarDropdownWithSymbol items={SystemPurposes} value={systemPurposeId} onChange={handleSystemPurposeChange} />
)}
</Stack>
<IconButton variant='plain' onClick={event => setActionsMenuAnchor(event.currentTarget)}>
<MoreVertIcon />
</IconButton>
</Sheet>
{/* Left menu content */}
<PagesMenu
conversationId={props.conversationId}
pagesMenuAnchor={pagesMenuAnchor}
onClose={closePagesMenu}
onImportConversation={handleConversationUpload}
/>
{/* Right menu content */}
<Menu
variant='plain' color='neutral' size='lg' placement='bottom-end' sx={{ minWidth: 280 }}
open={!!actionsMenuAnchor} anchorEl={actionsMenuAnchor} onClose={closeActionsMenu}
disablePortal={false}>
<MenuItem onClick={handleDarkModeToggle}>
<ListItemDecorator><DarkModeIcon /></ListItemDecorator>
Dark
<Switch checked={colorMode === 'dark'} onChange={handleDarkModeToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleSystemMessagesToggle}>
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
System text
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<MenuItem onClick={handleActionShowSettings}>
<ListItemDecorator><SettingsOutlinedIcon /></ListItemDecorator>
Settings
</MenuItem>
<ListDivider />
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationPublish}>
<ListItemDecorator>
{/*<Badge size='sm' color='primary'>*/}
<ExitToAppIcon />
{/*</Badge>*/}
</ListItemDecorator>
Share via paste.gg
</MenuItem>
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationDownload}>
<ListItemDecorator>
<FileDownloadIcon />
</ListItemDecorator>
Export conversation
</MenuItem>
<ListDivider />
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleToggleMessageSelectionMode}>
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
Cleanup ...
</MenuItem>
<MenuItem disabled={!props.conversationId || isConversationEmpty} onClick={handleConversationClear}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Clear conversation
</MenuItem>
<SupportItem />
</Menu>
{/* Modals */}
<ConfirmationModal
open={!!clearConfirmationId} onClose={() => setClearConfirmationId(null)} onPositive={handleConfirmedClearConversation}
confirmationText={'Are you sure you want to discard all the messages?'} positiveActionText={'Clear conversation'}
/>
{!!conversationImportOutcome && (
<ImportedModal open outcome={conversationImportOutcome} onClose={() => setConversationImportOutcome(null)} />
)}
{/* Files */}
<input type='file' multiple hidden accept='.json' ref={conversationFileInputRef} onChange={handleLoadConversations} />
</>;
}
@@ -1,68 +0,0 @@
import * as React from 'react';
import { Box, Button, Divider, List, ListItem, Modal, ModalDialog, Typography } from '@mui/joy';
export interface ImportedOutcome {
conversations: {
fileName: string;
success: boolean;
conversationId?: string;
error?: string;
}[];
}
/**
* Displays the result of an import operation as a modal dialog.
*
* Import operations supported:
* - JSON Chat
*/
export function ImportedModal(props: { open: boolean, outcome: ImportedOutcome, onClose: () => void, }) {
const { conversations } = props.outcome;
const successes = conversations.filter(c => c.success);
const failures = conversations.filter(c => !c.success);
const hasAnyResults = successes.length > 0 || failures.length > 0;
const hasAnyFailures = failures.length > 0;
return (
<Modal open={props.open} onClose={props.onClose}>
<ModalDialog variant='outlined' color='neutral' sx={{ maxWidth: '100vw' }}>
<Typography level='h5'>
{hasAnyResults ? hasAnyFailures ? 'Import issues' : 'Import successful' : 'Import failed'}
</Typography>
<Divider sx={{ my: 2 }} />
{successes.length >= 1 && <>
<Typography>
Imported {successes.length} conversation{successes.length === 1 ? '' : 's'}.
</Typography>
<Typography>
{successes.length === 1 ? 'It' : 'They'} can be found in the Pages menu. Opening {successes.length === 1 ? 'it' : 'the last one'}.
</Typography>
</>}
{failures.length >= 1 && <>
<Typography variant='soft' color='danger'>
Issues importing {failures.length} conversation{failures.length === 1 ? '' : 's'}:
</Typography>
<List>
{failures.map((f, idx) =>
<ListItem color='warning' key={'fail-' + idx}>{f.fileName}: {f.error}</ListItem>,
)}
</List>
</>}
<Box sx={{ display: 'flex', gap: 1, justifyContent: 'flex-end', mt: 2 }}>
<Button variant='soft' color='neutral' onClick={props.onClose}>
Close
</Button>
</Box>
</ModalDialog>
</Modal>
);
}
@@ -1,162 +0,0 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, ListDivider, ListItemDecorator, Menu, MenuItem, Tooltip, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
import { MAX_CONVERSATIONS, useChatStore } from '@/common/state/store-chats';
import { useSettingsStore } from '@/common/state/store-settings';
import { PagesMenuItem } from './PagesMenuItem';
const SPECIAL_ID_ALL_CHATS = 'all-chats';
/**
* FIXME: use a proper Pages drawer instead of this menu
*/
export function PagesMenu(props: { conversationId: string | null, pagesMenuAnchor: HTMLElement | null, onClose: () => void, onImportConversation: () => void }) {
// state
const [deleteConfirmationId, setDeleteConfirmationId] = React.useState<string | null>(null);
// external state
const conversationIDs = useChatStore(state => state.conversations.map(conversation => conversation.id), shallow);
const { setActiveConversationId, createConversation, deleteConversation, newConversationId } = useChatStore(state => ({
setActiveConversationId: state.setActiveConversationId,
createConversation: state.createConversation,
deleteConversation: state.deleteConversation,
newConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
}), shallow);
const showSymbols = useSettingsStore(state => state.zenMode) !== 'cleaner';
const hasChats = conversationIDs.length > 0;
const singleChat = conversationIDs.length === 1;
const maxReached = conversationIDs.length >= MAX_CONVERSATIONS;
const handleNew = () => {
// if the first in the stack is a new conversation, just activate it
if (newConversationId)
setActiveConversationId(newConversationId);
else
createConversation();
props.onClose();
};
const handleConversationActivate = (conversationId: string) => setActiveConversationId(conversationId);
const handleConversationDelete = (e: React.MouseEvent, conversationId: string) => {
if (!singleChat) {
e.stopPropagation();
// NOTE: the old behavior was good, keeping it for reference - now we'll only ask for confirmation when deleting all chats
// // if the chat is empty, just delete it
// if (conversationId === newConversationId)
// deleteConversation(conversationId);
// // otherwise, ask for confirmation
// else {
// setActiveConversationId(conversationId);
// setDeleteConfirmationId(conversationId);
// }
if (conversationId)
deleteConversation(conversationId);
}
};
const handleConfirmedDeleteConversation = () => {
if (hasChats && deleteConfirmationId) {
if (deleteConfirmationId === SPECIAL_ID_ALL_CHATS) {
createConversation();
conversationIDs.forEach(conversationId => deleteConversation(conversationId));
} else
deleteConversation(deleteConfirmationId);
setDeleteConfirmationId(null);
}
};
const handleDeleteAll = (e: React.MouseEvent) => {
e.stopPropagation();
setDeleteConfirmationId(SPECIAL_ID_ALL_CHATS);
};
const NewPrefix = maxReached && <Tooltip title={`Maximum limit: ${MAX_CONVERSATIONS} chats. Proceeding will remove the oldest chat.`}><Box sx={{ mr: 2 }}></Box></Tooltip>;
return <>
<Menu
variant='plain' color='neutral' size='lg' placement='bottom-start' sx={{ minWidth: 320 }}
open={!!props.pagesMenuAnchor} anchorEl={props.pagesMenuAnchor} onClose={props.onClose}
disablePortal={false}>
{/*<ListItem>*/}
{/* <Typography level='body2'>*/}
{/* Active chats*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem onClick={handleNew} disabled={!!newConversationId && newConversationId === props.conversationId}>
<ListItemDecorator><AddIcon /></ListItemDecorator>
{NewPrefix}New
</MenuItem>
<ListDivider />
{conversationIDs.map(conversationId =>
<PagesMenuItem
key={'c-id-' + conversationId}
conversationId={conversationId}
isActive={conversationId === props.conversationId}
isSingle={singleChat}
showSymbols={showSymbols}
conversationActivate={handleConversationActivate}
conversationDelete={handleConversationDelete}
/>)}
<ListDivider />
<MenuItem onClick={props.onImportConversation}>
<ListItemDecorator>
<FileUploadIcon />
</ListItemDecorator>
Import conversation
</MenuItem>
<MenuItem disabled={!hasChats} onClick={handleDeleteAll}>
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
<Typography>
Delete all
</Typography>
</MenuItem>
{/*<ListItem>*/}
{/* <Typography level='body2'>*/}
{/* Scratchpad*/}
{/* </Typography>*/}
{/*</ListItem>*/}
{/*<MenuItem>*/}
{/* <ListItemDecorator />*/}
{/* <Typography sx={{ opacity: 0.5 }}>*/}
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
{/* </Typography>*/}
{/*</MenuItem>*/}
</Menu>
{/* Confirmations */}
<ConfirmationModal
open={!!deleteConfirmationId} onClose={() => setDeleteConfirmationId(null)} onPositive={handleConfirmedDeleteConversation}
confirmationText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
: 'Are you sure you want to delete this conversation?'}
positiveActionText={deleteConfirmationId === SPECIAL_ID_ALL_CHATS
? 'Yes, delete all'
: 'Delete conversation'}
/>
</>;
}
@@ -0,0 +1,161 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, ListDivider, ListItemDecorator, MenuItem, Typography } from '@mui/joy';
import AddIcon from '@mui/icons-material/Add';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import { OpenAIIcon } from '~/common/components/icons/OpenAIIcon';
import { closeLayoutDrawer } from '~/common/layout/store-applayout';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
import { ConversationItem } from './ConversationItem';
type ListGrouping = 'off' | 'persona';
export function ChatDrawerItems(props: {
conversationId: string | null
onDeleteAllConversations: () => void,
onImportConversation: () => void,
}) {
// local state
const [grouping] = React.useState<ListGrouping>('off');
// external state
const { conversationIDs, topNewConversationId, maxChatMessages, setActiveConversationId, createConversation, deleteConversation } = useChatStore(state => ({
conversationIDs: state.conversations.map(conversation => conversation.id),
topNewConversationId: state.conversations.length ? state.conversations[0].messages.length === 0 ? state.conversations[0].id : null : null,
maxChatMessages: state.conversations.reduce((longest, conversation) => Math.max(longest, conversation.messages.length), 0),
setActiveConversationId: state.setActiveConversationId,
createConversation: state.createConversation,
deleteConversation: state.deleteConversation,
}), shallow);
const { experimentalLabs, showSymbols } = useUIPreferencesStore(state => ({
experimentalLabs: state.experimentalLabs,
showSymbols: state.zenMode !== 'cleaner',
}), shallow);
const totalConversations = conversationIDs.length;
const hasChats = totalConversations > 0;
const singleChat = totalConversations === 1;
const softMaxReached = totalConversations >= 50;
const handleNew = () => {
// if the first in the stack is a new conversation, just activate it
if (topNewConversationId)
setActiveConversationId(topNewConversationId);
else
createConversation();
closeLayoutDrawer();
};
const handleConversationActivate = React.useCallback((conversationId: string, closeMenu: boolean) => {
setActiveConversationId(conversationId);
if (closeMenu)
closeLayoutDrawer();
}, [setActiveConversationId]);
const handleConversationDelete = React.useCallback((conversationId: string) => {
if (!singleChat && conversationId)
deleteConversation(conversationId);
}, [deleteConversation, singleChat]);
// grouping
let sortedIds = conversationIDs;
if (grouping === 'persona') {
const conversations = useChatStore.getState().conversations;
// group conversations by persona
const groupedConversations: { [personaId: string]: string[] } = {};
conversations.forEach(conversation => {
const persona = conversation.systemPurposeId;
if (persona) {
if (!groupedConversations[persona])
groupedConversations[persona] = [];
groupedConversations[persona].push(conversation.id);
}
});
// flatten grouped conversations
sortedIds = Object.values(groupedConversations).flat();
}
return <>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Active chats*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem disabled={!!topNewConversationId && topNewConversationId === props.conversationId} onClick={handleNew}>
<ListItemDecorator><AddIcon /></ListItemDecorator>
New
</MenuItem>
<ListDivider sx={{ mb: 0 }} />
<Box sx={{ flex: 1, overflowY: 'auto' }}>
{/*<ListItem sticky sx={{ justifyContent: 'space-between', boxShadow: 'sm' }}>*/}
{/* <Typography level='body-sm'>*/}
{/* Conversations*/}
{/* </Typography>*/}
{/* <ToggleButtonGroup variant='soft' size='sm' value={grouping} onChange={(_event, newValue) => newValue && setGrouping(newValue)}>*/}
{/* <IconButton value='off'>*/}
{/* <AccessTimeIcon />*/}
{/* </IconButton>*/}
{/* <IconButton value='persona'>*/}
{/* <PersonIcon />*/}
{/* </IconButton>*/}
{/* </ToggleButtonGroup>*/}
{/*</ListItem>*/}
{sortedIds.map(conversationId =>
<ConversationItem
key={'c-id-' + conversationId}
conversationId={conversationId}
isActive={conversationId === props.conversationId}
isSingle={singleChat}
showSymbols={showSymbols}
maxChatMessages={(experimentalLabs || softMaxReached) ? maxChatMessages : 0}
conversationActivate={handleConversationActivate}
conversationDelete={handleConversationDelete}
/>)}
</Box>
<ListDivider sx={{ mt: 0 }} />
<MenuItem onClick={props.onImportConversation}>
<ListItemDecorator>
<FileUploadIcon />
</ListItemDecorator>
Import chats
<OpenAIIcon sx={{ fontSize: 'xl', ml: 'auto' }} />
</MenuItem>
<MenuItem disabled={!hasChats} onClick={props.onDeleteAllConversations}>
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
<Typography>
Delete {totalConversations >= 2 ? `all ${totalConversations} chats` : 'chat'}
</Typography>
</MenuItem>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Scratchpad*/}
{/* </Typography>*/}
{/*</ListItem>*/}
{/*<MenuItem>*/}
{/* <ListItemDecorator />*/}
{/* <Typography sx={{ opacity: 0.5 }}>*/}
{/* Feature <Link href={`${Brand.URIs.OpenRepo}/issues/17`} target='_blank'>#17</Link>*/}
{/* </Typography>*/}
{/*</MenuItem>*/}
</>;
}
@@ -0,0 +1,24 @@
import * as React from 'react';
import { useChatLLMDropdown } from './useLLMDropdown';
import { usePersonaIdDropdown } from './usePersonaDropdown';
export function ChatDropdowns(props: {
conversationId: string | null
}) {
// state
const { chatLLMDropdown } = useChatLLMDropdown();
const { personaDropdown } = usePersonaIdDropdown(props.conversationId);
return <>
{/* Model selector */}
{chatLLMDropdown}
{/* Persona selector */}
{personaDropdown}
</>;
}
@@ -0,0 +1,125 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Badge, ListDivider, ListItemDecorator, MenuItem, Switch } from '@mui/joy';
import CheckBoxOutlineBlankOutlinedIcon from '@mui/icons-material/CheckBoxOutlineBlankOutlined';
import CheckBoxOutlinedIcon from '@mui/icons-material/CheckBoxOutlined';
import ClearIcon from '@mui/icons-material/Clear';
import CompressIcon from '@mui/icons-material/Compress';
import FileDownloadIcon from '@mui/icons-material/FileDownload';
import ForkRightIcon from '@mui/icons-material/ForkRight';
import SettingsSuggestIcon from '@mui/icons-material/SettingsSuggest';
import { closeLayoutMenu } from '~/common/layout/store-applayout';
import { useUICounter, useUIPreferencesStore } from '~/common/state/store-ui';
export function ChatMenuItems(props: {
conversationId: string | null, isConversationEmpty: boolean, hasConversations: boolean,
isMessageSelectionMode: boolean, setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
onClearConversation: (conversationId: string) => void,
onDuplicateConversation: (conversationId: string) => void,
onExportConversation: (conversationId: string | null) => void,
onFlattenConversation: (conversationId: string) => void,
}) {
// external state
const { novel: shareBadge, touch: shareTouch } = useUICounter('export-share');
const { showSystemMessages, setShowSystemMessages } = useUIPreferencesStore(state => ({
showSystemMessages: state.showSystemMessages, setShowSystemMessages: state.setShowSystemMessages,
}), shallow);
// derived state
const disabled = !props.conversationId || props.isConversationEmpty;
const handleSystemMessagesToggle = () => setShowSystemMessages(!showSystemMessages);
const handleConversationExport = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeLayoutMenu();
props.onExportConversation(!disabled ? props.conversationId : null);
shareTouch();
};
const handleConversationDuplicate = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeLayoutMenu();
props.conversationId && props.onDuplicateConversation(props.conversationId);
};
const handleConversationFlatten = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
closeLayoutMenu();
props.conversationId && props.onFlattenConversation(props.conversationId);
};
const handleToggleMessageSelectionMode = (e: React.MouseEvent) => {
e.stopPropagation();
closeLayoutMenu();
props.setIsMessageSelectionMode(!props.isMessageSelectionMode);
};
const handleConversationClear = (e: React.MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
props.conversationId && props.onClearConversation(props.conversationId);
};
return <>
{/*<ListItem>*/}
{/* <Typography level='body-sm'>*/}
{/* Conversation*/}
{/* </Typography>*/}
{/*</ListItem>*/}
<MenuItem onClick={handleSystemMessagesToggle}>
<ListItemDecorator><SettingsSuggestIcon /></ListItemDecorator>
System message
<Switch checked={showSystemMessages} onChange={handleSystemMessagesToggle} sx={{ ml: 'auto' }} />
</MenuItem>
<ListDivider inset='startContent' />
<MenuItem disabled={disabled} onClick={handleConversationDuplicate}>
<ListItemDecorator>
{/*<Badge size='sm' color='success'>*/}
<ForkRightIcon color='success' />
{/*</Badge>*/}
</ListItemDecorator>
Duplicate
</MenuItem>
<MenuItem disabled={disabled} onClick={handleConversationFlatten}>
<ListItemDecorator>
{/*<Badge size='sm' color='success'>*/}
<CompressIcon color='success' />
{/*</Badge>*/}
</ListItemDecorator>
Flatten
</MenuItem>
<ListDivider inset='startContent' />
<MenuItem disabled={disabled} onClick={handleToggleMessageSelectionMode}>
<ListItemDecorator>{props.isMessageSelectionMode ? <CheckBoxOutlinedIcon /> : <CheckBoxOutlineBlankOutlinedIcon />}</ListItemDecorator>
<span style={props.isMessageSelectionMode ? { fontWeight: 800 } : {}}>
Cleanup ...
</span>
</MenuItem>
<MenuItem disabled={!props.hasConversations} onClick={handleConversationExport}>
<ListItemDecorator>
<Badge color='danger' invisible={!shareBadge || !props.hasConversations}>
<FileDownloadIcon />
</Badge>
</ListItemDecorator>
Share / Export ...
</MenuItem>
<MenuItem disabled={disabled} onClick={handleConversationClear}>
<ListItemDecorator><ClearIcon /></ListItemDecorator>
Reset
</MenuItem>
</>;
}
@@ -6,45 +6,54 @@ import { SxProps } from '@mui/joy/styles/types';
import CloseIcon from '@mui/icons-material/Close';
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
import { InlineTextarea } from '@/common/components/InlineTextarea';
import { SystemPurposes } from '../../../../data';
import { conversationTitle, useChatStore } from '@/common/state/store-chats';
import { InlineTextarea } from '~/common/components/InlineTextarea';
import { conversationTitle, useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
const DEBUG_CONVERSATION_IDs = false;
export function PagesMenuItem(props: {
export function ConversationItem(props: {
conversationId: string,
isActive: boolean, isSingle: boolean, showSymbols: boolean,
conversationActivate: (conversationId: string) => void,
conversationDelete: (e: React.MouseEvent, conversationId: string) => void,
isActive: boolean, isSingle: boolean, showSymbols: boolean, maxChatMessages: number,
conversationActivate: (conversationId: string, closeMenu: boolean) => void,
conversationDelete: (conversationId: string) => void,
}) {
// state
const [isEditingTitle, setIsEditingTitle] = React.useState(false);
const [deleteArmed, setDeleteArmed] = React.useState(false);
const doubleClickToEdit = useUIPreferencesStore(state => state.doubleClickToEdit);
// bind to conversation
const conversation = useChatStore(state => {
const cState = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
return conversation && {
isNew: conversation.messages.length === 0,
messageCount: conversation.messages.length,
assistantTyping: !!conversation.abortController,
systemPurposeId: conversation.systemPurposeId,
title: conversationTitle(conversation),
title: conversationTitle(conversation, 'new conversation'),
setUserTitle: state.setUserTitle,
};
}, shallow);
// auto-close the menu when clicking away
// auto-close the arming menu when clicking away
// NOTE: there currently is a bug (race condition) where the menu closes on a new item right after opening
// because the isActive prop is not yet updated
React.useEffect(() => {
if (deleteArmed && !props.isActive)
setDeleteArmed(false);
}, [deleteArmed, props.isActive]);
// sanity check: shouldn't happen, but just in case
if (!conversation) return null;
if (!cState) return null;
const { isNew, messageCount, assistantTyping, setUserTitle, systemPurposeId, title } = cState;
const handleActivate = () => props.conversationActivate(props.conversationId, true);
const handleEditBegin = () => setIsEditingTitle(true);
@@ -53,33 +62,50 @@ export function PagesMenuItem(props: {
setUserTitle(props.conversationId, text);
};
const handleDeleteBegin = () => setDeleteArmed(true);
const handleDeleteBegin = (e: React.MouseEvent) => {
e.stopPropagation();
if (!props.isActive)
props.conversationActivate(props.conversationId, false);
else
setDeleteArmed(true);
};
const handleDeleteConfirm = (e: React.MouseEvent) => {
if (deleteArmed) {
setDeleteArmed(false);
props.conversationDelete(e, props.conversationId);
e.stopPropagation();
props.conversationDelete(props.conversationId);
}
};
const handleDeleteCancel = () => setDeleteArmed(false);
const { assistantTyping, setUserTitle, systemPurposeId, title } = conversation;
const textSymbol = SystemPurposes[systemPurposeId]?.symbol || '❓';
const buttonSx: SxProps = { ml: 1, ...(props.isActive ? { color: 'white' } : {}) };
const progress = props.maxChatMessages ? 100 * messageCount / props.maxChatMessages : 0;
return (
<MenuItem
variant={props.isActive ? 'solid' : 'plain'} color='neutral'
selected={props.isActive}
onClick={() => props.conversationActivate(props.conversationId)}
onClick={handleActivate}
sx={{
// py: 0,
position: 'relative',
border: 'none', // note, there's a default border of 1px and invisible.. hmm
'&:hover > button': { opacity: 1 },
}}
>
{/* Optional prgoress bar */}
{progress > 0 && (
<Box sx={{
backgroundColor: 'neutral.softActiveBg',
position: 'absolute', left: 0, bottom: 0, width: progress + '%', height: 4,
}} />
)}
{/* Icon */}
{props.showSymbols && <ListItemDecorator>
{assistantTyping
@@ -90,12 +116,12 @@ export function PagesMenuItem(props: {
sx={{
width: 24,
height: 24,
borderRadius: 8,
borderRadius: 'var(--joy-radius-sm)',
}}
/>
) : (
<Typography sx={{ fontSize: '18px' }}>
{conversation.isNew ? '' : textSymbol}
{isNew ? '' : textSymbol}
</Typography>
)}
</ListItemDecorator>}
@@ -103,7 +129,7 @@ export function PagesMenuItem(props: {
{/* Text */}
{!isEditingTitle ? (
<Box onDoubleClick={handleEditBegin} sx={{ flexGrow: 1 }}>
<Box onDoubleClick={() => doubleClickToEdit ? handleEditBegin() : null} sx={{ flexGrow: 1 }}>
{DEBUG_CONVERSATION_IDs ? props.conversationId.slice(0, 10) : title}{assistantTyping && '...'}
</Box>
@@ -113,6 +139,7 @@ export function PagesMenuItem(props: {
)}
{/* // TODO: Commented code */}
{/* Edit */}
{/*<IconButton*/}
{/* variant='plain' color='neutral'*/}
@@ -126,7 +153,7 @@ export function PagesMenuItem(props: {
{/* Delete Arming */}
{!props.isSingle && !deleteArmed && (
<IconButton
variant='outlined' color='neutral'
variant={props.isActive ? 'solid' : 'outlined'} color='neutral'
size='sm' sx={{ opacity: { xs: 1, sm: 0 }, transition: 'opacity 0.3s', ...buttonSx }}
onClick={handleDeleteBegin}>
<DeleteOutlineIcon />
@@ -138,10 +165,11 @@ export function PagesMenuItem(props: {
<IconButton size='sm' variant='solid' color='danger' sx={buttonSx} onClick={handleDeleteConfirm}>
<DeleteOutlineIcon />
</IconButton>
<IconButton size='sm' variant='plain' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
<IconButton size='sm' variant='solid' color='neutral' sx={buttonSx} onClick={handleDeleteCancel}>
<CloseIcon />
</IconButton>
</>}
</MenuItem>
);
}
@@ -0,0 +1,98 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { Box, ListItemButton, ListItemDecorator } from '@mui/joy';
import BuildCircleIcon from '@mui/icons-material/BuildCircle';
import SettingsIcon from '@mui/icons-material/Settings';
import { DLLM, DLLMId, DModelSourceId, useModelsStore } from '~/modules/llms/store-llms';
import { AppBarDropdown, DropdownItems } from '~/common/layout/AppBarDropdown';
import { KeyStroke } from '~/common/components/KeyStroke';
import { hideOnMobile } from '~/common/theme';
import { openLayoutLLMOptions, openLayoutModelsSetup } from '~/common/layout/store-applayout';
function AppBarLLMDropdown(props: {
llms: DLLM[],
llmId: DLLMId | null,
setLlmId: (llmId: DLLMId | null) => void,
placeholder?: string,
}) {
// build model menu items, filtering-out hidden models, and add Source separators
const llmItems: DropdownItems = {};
let prevSourceId: DModelSourceId | null = null;
for (const llm of props.llms) {
if (!llm.hidden || llm.id === props.llmId) {
if (!prevSourceId || llm.sId !== prevSourceId) {
if (prevSourceId)
llmItems[`sep-${llm.id}`] = { type: 'separator', title: llm.sId };
prevSourceId = llm.sId;
}
llmItems[llm.id] = { title: llm.label };
}
}
const handleChatLLMChange = (_event: any, value: DLLMId | null) => value && props.setLlmId(value);
const handleOpenLLMOptions = () => props.llmId && openLayoutLLMOptions(props.llmId);
return (
<AppBarDropdown
items={llmItems}
value={props.llmId} onChange={handleChatLLMChange}
placeholder={props.placeholder || 'Models …'}
appendOption={<>
{props.llmId && (
<ListItemButton key='menu-opt' onClick={handleOpenLLMOptions}>
<ListItemDecorator><SettingsIcon color='success' /></ListItemDecorator>
Options
</ListItemButton>
)}
<ListItemButton key='menu-llms' onClick={openLayoutModelsSetup}>
<ListItemDecorator><BuildCircleIcon color='success' /></ListItemDecorator>
<Box sx={{ flexGrow: 1, display: 'flex', justifyContent: 'space-between', gap: 1 }}>
Models
<KeyStroke light combo='Ctrl + Shift + M' sx={hideOnMobile} />
</Box>
</ListItemButton>
</>}
/>
);
}
export function useChatLLMDropdown() {
// external state
const { llms, chatLLMId, setChatLLMId } = useModelsStore(state => ({
llms: state.llms,
chatLLMId: state.chatLLMId,
setChatLLMId: state.setChatLLMId,
}), shallow);
const chatLLMDropdown = React.useMemo(
() => <AppBarLLMDropdown llms={llms} llmId={chatLLMId} setLlmId={setChatLLMId} />,
[llms, chatLLMId, setChatLLMId],
);
return { chatLLMId, chatLLMDropdown };
}
/*export function useTempLLMDropdown(props: { initialLlmId: DLLMId | null }) {
// local state
const [llmId, setLlmId] = React.useState<DLLMId | null>(props.initialLlmId);
// external state
const llms = useModelsStore(state => state.llms, shallow);
const chatLLMDropdown = React.useMemo(
() => <AppBarLLMDropdown llms={llms} llmId={llmId} setLlmId={setLlmId} />,
[llms, llmId, setLlmId],
);
return { llmId, chatLLMDropdown };
}*/
@@ -0,0 +1,81 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';
import { ListItemButton, ListItemDecorator } from '@mui/joy';
import CallIcon from '@mui/icons-material/Call';
import { APP_CALL_ENABLED } from '../../../call/AppCall';
import { SystemPurposeId, SystemPurposes } from '../../../../data';
import { AppBarDropdown } from '~/common/layout/AppBarDropdown';
import { launchAppCall } from '~/common/routes';
import { useChatStore } from '~/common/state/store-chats';
import { useUIPreferencesStore } from '~/common/state/store-ui';
function AppBarPersonaDropdown(props: {
systemPurposeId: SystemPurposeId | null,
setSystemPurposeId: (systemPurposeId: SystemPurposeId | null) => void,
onCall?: () => void,
}) {
// external state
const { zenMode } = useUIPreferencesStore(state => ({
zenMode: state.zenMode,
}), shallow);
const handleSystemPurposeChange = (_event: any, value: SystemPurposeId | null) => props.setSystemPurposeId(value);
// options
let appendOption: React.JSX.Element | undefined = undefined;
if (props.onCall) {
const enableCallOption = !!props.systemPurposeId;
appendOption = (
<ListItemButton color='primary' disabled={!enableCallOption} key='menu-call-persona' onClick={props.onCall} sx={{ minWidth: 160 }}>
<ListItemDecorator><CallIcon color={enableCallOption ? 'primary' : 'warning'} /></ListItemDecorator>
Call&nbsp; {!!props.systemPurposeId && SystemPurposes[props.systemPurposeId]?.symbol}
</ListItemButton>
);
}
return (
<AppBarDropdown
items={SystemPurposes} showSymbols={zenMode !== 'cleaner'}
value={props.systemPurposeId} onChange={handleSystemPurposeChange}
appendOption={appendOption}
/>
);
}
export function usePersonaIdDropdown(conversationId: string | null) {
// external state
const { systemPurposeId } = useChatStore(state => {
const conversation = state.conversations.find(conversation => conversation.id === conversationId);
return {
systemPurposeId: conversation?.systemPurposeId ?? null,
};
}, shallow);
const personaDropdown = React.useMemo(() => systemPurposeId
? <AppBarPersonaDropdown
systemPurposeId={systemPurposeId}
setSystemPurposeId={(systemPurposeId) => {
if (conversationId && systemPurposeId)
useChatStore.getState().setSystemPurposeId(conversationId, systemPurposeId);
}}
onCall={APP_CALL_ENABLED ? () => {
if (conversationId && systemPurposeId)
launchAppCall(conversationId, systemPurposeId);
} : undefined}
/> : null,
[conversationId, systemPurposeId],
);
return { personaDropdown };
}
@@ -0,0 +1,35 @@
import * as React from 'react';
import { Button, IconButton } from '@mui/joy';
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
import { hideOnDesktop, hideOnMobile } from '~/common/theme';
import { CameraCaptureModal } from './CameraCaptureModal';
const showOnDesktop = false; // process.env.NODE_ENV === 'development';
export function CameraCaptureButton(props: { onOCR: (ocrText: string) => void }) {
// state
const [open, setOpen] = React.useState(false);
return <>
{/* The Button */}
<IconButton variant='plain' color='neutral' onClick={() => setOpen(true)} sx={hideOnDesktop}>
<AddAPhotoIcon />
</IconButton>
{/* Also show a button on desktop while in development */}
{showOnDesktop && <Button
fullWidth variant='plain' color='neutral' onClick={() => setOpen(true)} startDecorator={<AddAPhotoIcon />}
sx={{ ...hideOnMobile, justifyContent: 'flex-start' }}>
OCR
</Button>}
{/* The actual capture dialog, which will stream the video */}
{open && <CameraCaptureModal onCloseModal={() => setOpen(false)} onOCR={props.onOCR} />}
</>;
}

Some files were not shown because too many files have changed in this diff Show More