mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-10 21:50:14 -07:00
Compare commits
2348 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 843bc5364f | |||
| f069c2e5ab | |||
| a1390b152f | |||
| 4e8c7d46f6 | |||
| 02944d2015 | |||
| 58726f0425 | |||
| 85f796fb1d | |||
| 311a9c2bf2 | |||
| 6768917d44 | |||
| 7beb412738 | |||
| cf724625cc | |||
| f60b2410dd | |||
| bbdc16b06a | |||
| 0fa2d06725 | |||
| 36cdc4b55f | |||
| c2b4a50bfa | |||
| 73f88d4715 | |||
| af919be2ac | |||
| facffbc6c8 | |||
| dd5b7cb8c2 | |||
| 3dc61109d7 | |||
| 9ef84260b0 | |||
| cf2df7d7f9 | |||
| 16a883526b | |||
| 7b66b1a2eb | |||
| a4adce5c79 | |||
| 9e4174df53 | |||
| b5975713a3 | |||
| 0cd04266b7 | |||
| 5cbd162454 | |||
| bea1600358 | |||
| 6a2e201cf5 | |||
| 960551933e | |||
| 8b38b6416d | |||
| fac4c39f48 | |||
| 4c930efbf0 | |||
| 5a2a47cb87 | |||
| 4912a03250 | |||
| 3b13580613 | |||
| 95905113ac | |||
| c6b34bb252 | |||
| e5387c2323 | |||
| d3b4447669 | |||
| d5c5eac9ec | |||
| 49b61495d0 | |||
| e8298e9d30 | |||
| b29681e1f7 | |||
| 1e0b9a2f0c | |||
| 442b8e95b1 | |||
| 27090d9e28 | |||
| c37b4fa076 | |||
| 83161bbe98 | |||
| 4b166120e6 | |||
| 04494ac752 | |||
| 979809ddb1 | |||
| 5d797c3339 | |||
| 2ff74f6b80 | |||
| 06b1195f9a | |||
| c337b70a42 | |||
| 5047354892 | |||
| ce4e405fc6 | |||
| 30c8d66cd1 | |||
| fb5c8aad29 | |||
| 08d221d00f | |||
| af918178f6 | |||
| ed19896e3c | |||
| 47ad135e4b | |||
| 0eff7825c8 | |||
| 5c8baee390 | |||
| 3f71facb49 | |||
| eba42cc8f2 | |||
| 53092cee51 | |||
| 4bf621f128 | |||
| 33505dbb8e | |||
| c81e1f144f | |||
| ee788b967b | |||
| 38ac8733f6 | |||
| 737a20ee06 | |||
| 19f48b8001 | |||
| 3471d6b4f5 | |||
| 2dc7ba72b3 | |||
| e12279dab0 | |||
| 2e0c79cb64 | |||
| aa697edb8c | |||
| c72e3c58dd | |||
| 1de30c8bd5 | |||
| 3a8eea6fb7 | |||
| b7fd0bdba7 | |||
| 58457cac50 | |||
| 0fbacee7dc | |||
| a498f28d14 | |||
| 5b9c6a2d0e | |||
| 4c7f50ab98 | |||
| ef03d33bbf | |||
| 22c9fc56c0 | |||
| c952fd734f | |||
| 310e99af23 | |||
| e78446904a | |||
| 760e9d8279 | |||
| 61a60c5b9f | |||
| 3054e1b88d | |||
| 6f4fabf147 | |||
| b0c791a055 | |||
| 748991249a | |||
| 1aea7122cc | |||
| 9a83b428f1 | |||
| 2cd38bc02b | |||
| e586142190 | |||
| a10d0dcf5d | |||
| 6fdff488a9 | |||
| 8af0d78127 | |||
| 177686a7fc | |||
| 09b6e47036 | |||
| 704187ba3e | |||
| 4ea8a06503 | |||
| 80fcc7d3e3 | |||
| a04c62da6f | |||
| fcb518a050 | |||
| a222626933 | |||
| a3ceade738 | |||
| 51d58223b4 | |||
| d37a603db2 | |||
| ea984f3ddf | |||
| a9d3e3dead | |||
| 5499e57205 | |||
| 6f8ee0247f | |||
| 05ee5cc3d1 | |||
| cb6b569330 | |||
| 53073ff109 | |||
| 26d362d7a6 | |||
| 91d99e1a63 | |||
| a20917c971 | |||
| af9bf9e5b3 | |||
| 46b473b8a0 | |||
| e2b4028223 | |||
| bac2a31782 | |||
| 3d20e6bf91 | |||
| 9337216092 | |||
| cd35d0ca55 | |||
| 6d591b98b8 | |||
| 486381ab9d | |||
| c619b4debb | |||
| 383a3085ec | |||
| 5a3bb3d817 | |||
| d1ba758887 | |||
| 6fef149997 | |||
| aad3b16ff2 | |||
| 819ba14523 | |||
| d3c25ca16a | |||
| 99a65f72ac | |||
| be9080d392 | |||
| f32d991413 | |||
| 94b68ebefa | |||
| 0450eaaceb | |||
| 408c5ce088 | |||
| d936629ead | |||
| 9bd1a66208 | |||
| 1a0c029ee8 | |||
| e7be228703 | |||
| 0ab4dc972f | |||
| 5f1ca8954f | |||
| 3ec1b033ce | |||
| 0caf27af9b | |||
| bd67e14fa4 | |||
| 494c3b542c | |||
| 8e0884eb64 | |||
| 73c4dc4ac8 | |||
| d77274058d | |||
| 0c8460419b | |||
| eabb589390 | |||
| 62f860ae93 | |||
| 605aae873c | |||
| 62e9ee5b05 | |||
| d686f5d143 | |||
| 3922f232ae | |||
| 6735b438d3 | |||
| fb1e30ab32 | |||
| 0ec06edb57 | |||
| 2a52673c56 | |||
| cc20d00d8a | |||
| 3d9201f7dc | |||
| 176732a6c0 | |||
| 39815b3af3 | |||
| bcce517089 | |||
| a4b50d0d97 | |||
| 2a124e7588 | |||
| a85556ab5b | |||
| cef93d6084 | |||
| 207e257778 | |||
| 12203daa22 | |||
| 27f8e9248d | |||
| 51384dc984 | |||
| bc76cbb5ad | |||
| 5a1ca83f6d | |||
| c9f585f808 | |||
| 9f559e1dbf | |||
| e458bca1a7 | |||
| 43d2226019 | |||
| 122bc34701 | |||
| e01358e268 | |||
| 847c84c3e6 | |||
| b11cac4328 | |||
| f617b06109 | |||
| 345ccf3369 | |||
| d111b8af62 | |||
| 8f964c5c49 | |||
| b6f3f4538f | |||
| f6dd30d5d8 | |||
| af8b79f849 | |||
| 0cfccc423b | |||
| f9a5d582d4 | |||
| 684e00d594 | |||
| 3cd2df0b50 | |||
| 02197f4ee6 | |||
| f9049a3fea | |||
| 462bddc271 | |||
| f79000cf39 | |||
| 1d95273f4d | |||
| 6c4579f434 | |||
| 4ef56ade21 | |||
| 7c1369d6e9 | |||
| 533d54b106 | |||
| cce0ca6560 | |||
| e87ce2593c | |||
| 431dc8b667 | |||
| 5caf614bf7 | |||
| ecf9703570 | |||
| e7641393a0 | |||
| 2201f6ff5a | |||
| 557e1ce293 | |||
| cbe9a6b9a5 | |||
| 9bbcb038d4 | |||
| 3602204420 | |||
| 6f485e5589 | |||
| 2f46a3dfaf | |||
| 267845bba3 | |||
| 6f33a8eebf | |||
| b0d2b09a2e | |||
| c699b6b16b | |||
| 1789bac28d | |||
| 60c05f615f | |||
| bd84523671 | |||
| eb21b9c770 | |||
| ff3ac11afb | |||
| 1ef8c3d02b | |||
| 2ebaf6279b | |||
| a5ee40e184 | |||
| b17a97eac7 | |||
| 63908bfaf6 | |||
| 3f9a419a19 | |||
| bae691e33e | |||
| 91539346ee | |||
| 4842ca81b3 | |||
| 9c77a1a4ab | |||
| 4af284be42 | |||
| 6aec68bb3c | |||
| d4e2b0834f | |||
| 24c2702f96 | |||
| 4691fc9bad | |||
| 8c6c60b6f1 | |||
| bc482407fe | |||
| ff05593db8 | |||
| 3d304d9374 | |||
| 1734f0c2f1 | |||
| 1b25e5df85 | |||
| ea8eb32b0b | |||
| 614a1f95de | |||
| d36bc28914 | |||
| deec48d7c1 | |||
| b318ec8d39 | |||
| b4b0e2befc | |||
| 51d3fe13da | |||
| 58220216d3 | |||
| cac75cca42 | |||
| 47f247907f | |||
| 81e04b7322 | |||
| 56a964b700 | |||
| 458341d79f | |||
| d1d212b075 | |||
| 59c9996489 | |||
| bf8221a2f1 | |||
| 787a11a040 | |||
| 05d114be2f | |||
| 3c04a7dbac | |||
| 1673e1148d | |||
| de416b035d | |||
| 08aaf2989d | |||
| a50964060c | |||
| 54b6108719 | |||
| 585e5c254a | |||
| 477808c9bb | |||
| 6c58a2b688 | |||
| c9854bf30f | |||
| cfed4bbd41 | |||
| 2dd6485b0e | |||
| bf1dd5b860 | |||
| 765c373f7d | |||
| 32d752e82b | |||
| 4623e438fa | |||
| 8a44ff396f | |||
| 086d7ecae4 | |||
| d6adebb711 | |||
| 8325fe7b3c | |||
| 7cf83f878b | |||
| 597ba26424 | |||
| 7bccea47f5 | |||
| 5770116779 | |||
| 0679144f69 | |||
| c9fd288b52 | |||
| 9ae449fcfd | |||
| 249f67f796 | |||
| e91c0bb554 | |||
| 5e306d9598 | |||
| 42ebc81cbb | |||
| f624c37db5 | |||
| 22b6f42936 | |||
| 760c66cac8 | |||
| 1d91e9da03 | |||
| 7eac409ec6 | |||
| 128558420c | |||
| ca3e664690 | |||
| 7eb37462d7 | |||
| 31e02c2d39 | |||
| 003a68b9b8 | |||
| f418708389 | |||
| d23a564035 | |||
| 7fe586244c | |||
| f1a597cdc6 | |||
| 9b68c8f58c | |||
| be5b57ea71 | |||
| 425c82f26d | |||
| 942421c1fb | |||
| b1184f6928 | |||
| ffeb6d1b98 | |||
| b2718b56b7 | |||
| 455f834957 | |||
| 8a14c80ff8 | |||
| e268e733c7 | |||
| 8933a8dfb3 | |||
| 9796cc525c | |||
| cdbf9a9190 | |||
| c26792292d | |||
| 4698e0ee03 | |||
| 68afcb2f4b | |||
| e8f61e46e3 | |||
| 317bb2b7c8 | |||
| d1b3c6b468 | |||
| b35eccc984 | |||
| a780c92047 | |||
| 5fc65698ba | |||
| c923b5ec4c | |||
| 609b2b9a7b | |||
| a257278004 | |||
| 273daed634 | |||
| a6862d8c58 | |||
| 323e5b4ea7 | |||
| 89217a5308 | |||
| a45e995d2f | |||
| 8700b4c8ca | |||
| 1f7f5fb488 | |||
| afde8ee864 | |||
| 3884c26b15 | |||
| 24dce7eae9 | |||
| 1db4e9b771 | |||
| b2ed7eae00 | |||
| 3169fd67e8 | |||
| 773ceb1396 | |||
| 8c62ee1720 | |||
| 5fa1f52922 | |||
| d2180c010c | |||
| b73df7b2ce | |||
| 971f737846 | |||
| a393353907 | |||
| 751f609554 | |||
| e8cd5c6552 | |||
| 86e387b270 | |||
| 32f15aa621 | |||
| bfc889a9e5 | |||
| bd907625a8 | |||
| 60004926d7 | |||
| ac751dfd1a | |||
| 6828eee17f | |||
| 19c97f397b | |||
| 0167a8bdd8 | |||
| 93e5044603 | |||
| 024d930677 | |||
| 98873446a8 | |||
| 5318b7a406 | |||
| 4a6c3cbcd2 | |||
| ac0a39c202 | |||
| 88d39345a5 | |||
| 7aa9cb07b2 | |||
| ef30c8d28d | |||
| 2727f690b4 | |||
| 5945c24301 | |||
| 7b6aff1f95 | |||
| cb0fe3aadd | |||
| 4f9d69f9c2 | |||
| c18aeabe06 | |||
| 550742323a | |||
| c71f789a08 | |||
| a9b4b195bf | |||
| 52e8177f42 | |||
| b0743efc48 | |||
| 6dfd652dac | |||
| 3f93cb2e6d | |||
| 8f7b9b7f19 | |||
| abff89ab6b | |||
| d4f03f743a | |||
| c3714f6651 | |||
| 9b4d0ddf2f | |||
| 2c9ac2f549 | |||
| c1292de2a0 | |||
| 21d5e4cd29 | |||
| a9495a3e15 | |||
| bff5b3d765 | |||
| a4ff37eecc | |||
| 460209f486 | |||
| 96c68c86a4 | |||
| 8b152fdff8 | |||
| 25c9a52873 | |||
| 44302d903c | |||
| c7b8668609 | |||
| 7d60df6266 | |||
| b7f898a5e5 | |||
| 04c4dbe4b8 | |||
| 8d04c494df | |||
| a6aadf76f3 | |||
| a685ef97bf | |||
| d46c29689f | |||
| 65ce07395b | |||
| cc1542fe95 | |||
| b70d57d878 | |||
| 5aa857362b | |||
| c92fc34051 | |||
| b01e66f12a | |||
| a88d20784a | |||
| 63486ed6cf | |||
| 3ceec773f2 | |||
| 817fa56ec4 | |||
| 088fb21a90 | |||
| 79c755a469 | |||
| a091d3f011 | |||
| c7c01a5d7c | |||
| cdc0f48973 | |||
| e884f6b962 | |||
| 485a9bea71 | |||
| f3c3b667ca | |||
| 3b0c4f31b6 | |||
| 5e54600766 | |||
| c3e54f69b7 | |||
| c4022d1c9b | |||
| 6e13a78a24 | |||
| c7cacd9727 | |||
| a77110f704 | |||
| 83a6069de5 | |||
| e9a1890e54 | |||
| bf928aa06e | |||
| b2dc50590c | |||
| 229e53ac32 | |||
| 51e8a47615 | |||
| e80b58a412 | |||
| 48ced8b079 | |||
| c07e2aea1e | |||
| f3194aa30e | |||
| cb3e4cd951 | |||
| f5d8d029ea | |||
| 7c946c4126 | |||
| ded4ea0d69 | |||
| c180c549fe | |||
| 1f30f1168f | |||
| 9446f15922 | |||
| e13b2c9cd9 | |||
| e9e14e0292 | |||
| added19656 | |||
| 4fa3c4d479 | |||
| 690738de9a | |||
| cb31d27e68 | |||
| e6658df123 | |||
| 0b7154a14c | |||
| 02c1838de5 | |||
| fc455fceb8 | |||
| 8d40cdd234 | |||
| 40145c669a | |||
| 34d2fc233f | |||
| 670ec0381a | |||
| 2128f255fe | |||
| b717bd9a9a | |||
| 8aab9311f5 | |||
| ff3e16ea67 | |||
| 1de039c315 | |||
| d05e1786d7 | |||
| e34b5a7372 | |||
| a1b3d1b508 | |||
| 1ebccdf420 | |||
| e5f674509c | |||
| 197a4ae5c0 | |||
| 64d2dcf39c | |||
| caf54c736b | |||
| 423c2cce28 | |||
| a1af51efcb | |||
| ffc1bf9c58 | |||
| a54bfdb342 | |||
| 03861d2dbd | |||
| 8c080da6bf | |||
| a8c98056b6 | |||
| 78e663f955 | |||
| 70546a5039 | |||
| 30f78b33cb | |||
| 712e8c1f16 | |||
| 933dfdfb53 | |||
| 9ce86b029f | |||
| 13580cc69d | |||
| a7dee0002d | |||
| c84b2df3fa | |||
| d9471a8684 | |||
| ef630c2272 | |||
| e188c71652 | |||
| 910260c2c8 | |||
| 22752abc38 | |||
| 92bc3a5d64 | |||
| 1383752cc1 | |||
| 66af16fb81 | |||
| fc019d7b46 | |||
| ac4f0fcb12 | |||
| a6c2bc663d | |||
| e62ffa02e9 | |||
| a003600839 | |||
| ea73feb06d | |||
| 3bdf69e1b7 | |||
| 590fe78bd1 | |||
| 76187ba0e7 | |||
| 5eba375f4d | |||
| 8fa6a8251f | |||
| 75fa046f30 | |||
| 08a8cd1430 | |||
| 3afbb78a39 | |||
| fca6ccd816 | |||
| 8d351822c1 | |||
| 7d274a31fe | |||
| e36dde0d25 | |||
| 51cc6e5ae5 | |||
| 28d911c617 | |||
| b1e9fe58fb | |||
| 16ba014ade | |||
| e9d5a20c1a | |||
| 6e0036f9c4 | |||
| d7e189aa1c | |||
| ea2b444fb2 | |||
| cd1efaf26e | |||
| e47f0e5d43 | |||
| 5284d37984 | |||
| 1bf6fa0e4d | |||
| fc294c82f1 | |||
| 7b1dc49dda | |||
| d15ddeea24 | |||
| eaac213859 | |||
| 02c1460351 | |||
| 2fff35b7d9 | |||
| c5b9072bde | |||
| 8a570e912a | |||
| 1dcc40afb8 | |||
| c2092f8035 | |||
| 886c4b411e | |||
| 8888fd40cd | |||
| 31cd01bccf | |||
| c59b221004 | |||
| cb3cc3e74c | |||
| 9e90015fcc | |||
| 95e0517056 | |||
| 2b2f47915f | |||
| 9acd178ce1 | |||
| f381f80184 | |||
| c83be61343 | |||
| f6e49d31ec | |||
| cc0429a362 | |||
| b35901d94c | |||
| c0df1a23f4 | |||
| 495619af2c | |||
| 72dfadf106 | |||
| 5825909e45 | |||
| d3f6d87ee0 | |||
| c4f4c5ddad | |||
| 2921d7ca27 | |||
| 2021cbc988 | |||
| e9e29861b2 | |||
| 8e6da36059 | |||
| 5e1469e12e | |||
| bd7465f8b1 | |||
| 570397a616 | |||
| b3b5f1daef | |||
| 25ec3ae47c | |||
| 5ba5e3da58 | |||
| 9296c14ca0 | |||
| 310b5d3422 | |||
| 1c5967112e | |||
| 49a3d8ee71 | |||
| cf8b61e8d9 | |||
| 967ae5723e | |||
| 03421acf2f | |||
| d43896cc5a | |||
| b283124a2f | |||
| 8c39be01f8 | |||
| fb2bd4ccd8 | |||
| 5b826ffc45 | |||
| 0b2ab365d3 | |||
| 93fc54992c | |||
| 60b7326deb | |||
| d6e6139244 | |||
| 0892911ddc | |||
| 30267ac50c | |||
| ffef0ef31d | |||
| fc047087ce | |||
| 81d4966535 | |||
| 004d63fda1 | |||
| 23e2dbb354 | |||
| 28e9899b97 | |||
| 7441d41550 | |||
| 99e2d5597a | |||
| 74321a44ca | |||
| 7b664affb7 | |||
| c411835f3b | |||
| 7b62c946a5 | |||
| 252e2fcd29 | |||
| aa2731bccc | |||
| 282c439963 | |||
| e99459aba0 | |||
| 4c35cbbe34 | |||
| cab3537ae2 | |||
| c3f211389b | |||
| a4de84a842 | |||
| 2bf1eaaa0f | |||
| 7f5ddd1629 | |||
| ed798fec65 | |||
| 90386f5794 | |||
| 8ada8811bf | |||
| b24badabef | |||
| 4e20cb12cd | |||
| 245da9e6cc | |||
| a800b34aa7 | |||
| 50c3941f42 | |||
| 6e5d5ee36c | |||
| 2c8b713ff3 | |||
| 8162a6706d | |||
| 952f6883fa | |||
| 373f3e3698 | |||
| 17791f631f | |||
| 6987c67cc7 | |||
| 65a59e5d2d | |||
| 05b9a6d412 | |||
| 6608f4f164 | |||
| 93378ad6b0 | |||
| bd4a60203e | |||
| c9e6a62641 | |||
| 68d797fa99 | |||
| 08011d8cf2 | |||
| 2f91bf7f52 | |||
| d5182c05c1 | |||
| 8e0947a833 | |||
| 1d88fc37b0 | |||
| 46bd8e6f4d | |||
| b95b427331 | |||
| 9b574c60eb | |||
| a8b39cc0a4 | |||
| cdbc7dd9b8 | |||
| 08dfec4fcf | |||
| 7f4553225b | |||
| f37e65a91e | |||
| c022f8a68c | |||
| daa7a506a5 | |||
| f3dcf39c15 | |||
| 06cbef16d4 | |||
| ab31bcd3e3 | |||
| 563a99864f | |||
| 39b8abc2c6 | |||
| f3dd837076 | |||
| d6b3a5259d | |||
| 9fea1d5c64 | |||
| 0adb5355c7 | |||
| 01d807b61e | |||
| 285bb812d0 | |||
| d897155d6e | |||
| 7154426279 | |||
| 4526084e4d | |||
| 0c5c786ae3 | |||
| 8a2c4aa356 | |||
| 4cba819edd | |||
| 4db42a2b29 | |||
| fc0ee5b698 | |||
| 2c0c3f1c70 | |||
| 3f3976b73c | |||
| 82d5dcced5 | |||
| f4eaed694a | |||
| 05d9869326 | |||
| 2675934ff8 | |||
| fb6e19d3ea | |||
| f1151d54e1 | |||
| 6a0fa4f9fa | |||
| 20d96fffc8 | |||
| ad6c06308a | |||
| 84ee4171a4 | |||
| 6bc4f8a1e4 | |||
| 8876aa0866 | |||
| 691d2e7228 | |||
| 7a12755de9 | |||
| 8573f56d03 | |||
| 8f3e683321 | |||
| 64867b0b67 | |||
| e42d060e57 | |||
| 2ca9ab8a0c | |||
| fdc0c6b371 | |||
| 8f8779c3cd | |||
| 851877ad8b | |||
| 8df74529ad | |||
| 353f51ebf0 | |||
| 6c5cb08118 | |||
| 54fee92b15 | |||
| 776431c801 | |||
| 9f893ce999 | |||
| 820447670c | |||
| b43c49cd64 | |||
| f9c3558975 | |||
| 1b75250824 | |||
| 3fa3bb5d03 | |||
| ef0ff55f1f | |||
| 66aa8ed177 | |||
| 519286bc69 | |||
| 9882f45fd2 | |||
| 634f6216a0 | |||
| 69574a7d1c | |||
| eddd4b9be8 | |||
| 9a9c31ff53 | |||
| 41ee7a1c85 | |||
| 2f9bbf373c | |||
| d662e10ebb | |||
| cd31092333 | |||
| 1eae7ab6f3 | |||
| ba378f852f | |||
| 5cfd1e557d | |||
| df31d79eaf | |||
| 12d7304325 | |||
| 41424cbdfd | |||
| 05dda519a2 | |||
| 120d39282e | |||
| 8e7d0fd13b | |||
| 3d979fdfbb | |||
| 6ab47ae3cb | |||
| a4977b4924 | |||
| bac9c692b8 | |||
| 6ab15356e1 | |||
| 73cc7121c3 | |||
| 1aeef06f49 | |||
| 3b16bcf01d | |||
| f6351fda41 | |||
| 007e91480d | |||
| 163ef9296e | |||
| fa042f7d68 | |||
| 8a11040dde | |||
| a88971d557 | |||
| 5867e5fcc5 | |||
| 20e587d6d3 | |||
| 6bfa8471cd | |||
| 5c10bce2f4 | |||
| f1663f6668 | |||
| 90c27e0e74 | |||
| b5eac0d907 | |||
| 4eabe2cb3a | |||
| a1c0d30a06 | |||
| 63c9f65040 | |||
| f58a066bff | |||
| 952ea6357a | |||
| 6695973035 | |||
| 3dc28635f4 | |||
| 0bde01a85f | |||
| b9840c2074 | |||
| 8228a76875 | |||
| 46b370a2e3 | |||
| 820e9513ba | |||
| bd71d64db3 | |||
| 9d4baf827c | |||
| d6843d7fcf | |||
| babb1dd962 | |||
| aa32e396a7 | |||
| 1068efcb49 | |||
| 576c7f1458 | |||
| 37c857b055 | |||
| 794dfb44d1 | |||
| 929bb6dc66 | |||
| 28337e31eb | |||
| 09a38c0e4b | |||
| 645b8fb9cd | |||
| 541588948c | |||
| bdd6fcfbbc | |||
| 9e50286c66 | |||
| 418e4649dc | |||
| 4a70f20f4a | |||
| d6eabfcb6d | |||
| d88889d760 | |||
| 85146d8af0 | |||
| 9612572f07 | |||
| 4bb1dddf4d | |||
| b066a86962 | |||
| 6086455782 | |||
| 9020b3cbad | |||
| 5822dea270 | |||
| c445f59664 | |||
| 737e4cb4f9 | |||
| dba7368d01 | |||
| 314c4cd8cc | |||
| 3e46f99e14 | |||
| e0cc552b8d | |||
| 6b5be403af | |||
| 269d5989bc | |||
| edfe3d9b65 | |||
| ffb2c42a26 | |||
| b7de19b020 | |||
| 77cd659b39 | |||
| fbba9d8357 | |||
| f464a9efdf | |||
| 7ec4290582 | |||
| 3f887a1d3a | |||
| ffd76dc587 | |||
| d7f3594a73 | |||
| 32fa5f206b | |||
| 70d2c09e81 | |||
| 17f03806d0 | |||
| b6aba0efa4 | |||
| 65a5e06935 | |||
| f459cb9805 | |||
| f5470aca5d | |||
| c26af97fe7 | |||
| 766ec458a2 | |||
| 48ff78580c | |||
| 396f7524d7 | |||
| da19ef42f5 | |||
| 91abe5aa43 | |||
| 682435321b | |||
| 76f0d60224 | |||
| 628b88ef9f | |||
| 6a792814ce | |||
| 05ce15d677 | |||
| 4a9d0d4f8e | |||
| 16f0552682 | |||
| 9e3819b9c7 | |||
| 233a0d4b35 | |||
| bd95b808ae | |||
| 96132c4585 | |||
| 3edacef572 | |||
| 36889c1695 | |||
| cd2c6c1d8f | |||
| d8c78b1a00 | |||
| 74a22c26cf | |||
| f742eba4c1 | |||
| 36c2812157 | |||
| d353fc4c63 | |||
| 98bd3d6da0 | |||
| cd5ec8d295 | |||
| f91c6456bd | |||
| 67af87968e | |||
| 58ea3e1b35 | |||
| a9435c10e8 | |||
| a86860fe76 | |||
| a3d707f78a | |||
| c502426249 | |||
| 2fb5ffcecf | |||
| 6d995c1253 | |||
| a860c1c490 | |||
| 481d9cc745 | |||
| 7e53a7bc2b | |||
| 4df10e3782 | |||
| 396da65178 | |||
| 87e8faf383 | |||
| 9eb3e6d398 | |||
| 332c4fdf82 | |||
| 4d247344d5 | |||
| 4e4738d4f6 | |||
| dbfa7b0932 | |||
| e90231d58d | |||
| 9bc7d40425 | |||
| d2d5c0621b | |||
| e41d57c914 | |||
| 7c5336cba3 | |||
| d041e4e2bf | |||
| 7fba6255ff | |||
| dc226d9ac0 | |||
| c01a937d7d | |||
| ee6646a66f | |||
| b73aa16001 | |||
| 92c875459a | |||
| 011fbbe834 | |||
| a921ea6fe5 | |||
| 82bcc6d5d5 | |||
| f6d52da034 | |||
| cd3159cacf | |||
| 1af4e18cb3 | |||
| 7b6eb94bf7 | |||
| 8cc6d65dd4 | |||
| 54e5f9a1bc | |||
| fa28305141 | |||
| 1e56b36eae | |||
| e2253cde7f | |||
| 6a4bfc1cf2 | |||
| dfc0d5088d | |||
| 8f154305e9 | |||
| 09b96a01bf | |||
| 1ce0c631b4 | |||
| 61a5b6d5eb | |||
| ca62bad217 | |||
| 13f352a901 | |||
| 775af756fd | |||
| 5c4545877d | |||
| 9c820dcaf1 | |||
| 49f0bf4802 | |||
| fbb2f106f0 | |||
| cb46d3d536 | |||
| 84289c4ade | |||
| b35ffd9983 | |||
| 8197fed036 | |||
| f6c40cdce6 | |||
| b8cca72cf1 | |||
| d20cafa22b | |||
| 421a5ae681 | |||
| 49157b9efa | |||
| c11684a9cf | |||
| 12aa812b37 | |||
| 3667425c61 | |||
| fd0ab93744 | |||
| a0b549855f | |||
| c70c89c2e8 | |||
| 32c5c00d55 | |||
| 013d0e0217 | |||
| f0bf866654 | |||
| 2c14cb1113 | |||
| 15abecfbb6 | |||
| 827d64d49a | |||
| 01c45b2286 | |||
| d3e5c196f9 | |||
| 71978b94f2 | |||
| 79da87d823 | |||
| 1c19f36783 | |||
| a4d4e351e5 | |||
| 45ef2afccb | |||
| 9ef5b61722 | |||
| ff008d1034 | |||
| 3cd38f471e | |||
| 1581d46be7 | |||
| 32571e15eb | |||
| d69adaa6af | |||
| 246968098a | |||
| 861c4ef370 | |||
| bfe94e98f2 | |||
| 9152318ef6 | |||
| 302694bdad | |||
| 14602a1411 | |||
| 044baa5fc2 | |||
| 3fa09194a7 | |||
| d3aa10f9d1 | |||
| e2b2d5974f | |||
| d99668aa40 | |||
| 5f8d5678fa | |||
| 14f245df2b | |||
| f104fb64fd | |||
| 3c2d7a636a | |||
| 31b215e58b | |||
| 53ae177396 | |||
| 3e1bb3bb3d | |||
| eac150f590 | |||
| 5466b8a265 | |||
| c3d10c355f | |||
| d96a8c14b9 | |||
| be94f31a85 | |||
| f7ce349125 | |||
| a4516b5fa6 | |||
| 7c1f30c3c7 | |||
| df67be4b03 | |||
| 578bb93d8b | |||
| b4c5a24864 | |||
| c4a38a6cf6 | |||
| e58f6cc48e | |||
| 8a0c4747c7 | |||
| 8bef4b9aae | |||
| 66382ed980 | |||
| 8984b65a51 | |||
| efea6dafbd | |||
| 6d4d05e8f7 | |||
| 560a07b4fe | |||
| fbaff3bde3 | |||
| 2a01f929f1 | |||
| d1d0c32a92 | |||
| 3a513e2a4d | |||
| 9b32c4b8c5 | |||
| 64542af5af | |||
| 1db35feeca | |||
| 7392063e25 | |||
| e6745b16f6 | |||
| be09b452f0 | |||
| 42588444a5 | |||
| dc48bd1222 | |||
| b59eb6cbfb | |||
| a75a31ff04 | |||
| a0f97e9cd8 | |||
| fe6e7245de | |||
| a46a9bf76c | |||
| 925e500dc2 | |||
| 22f0a70272 | |||
| 220cc60f7d | |||
| 3964fca4b2 | |||
| 8fdbb21300 | |||
| c42c9545d2 | |||
| 0de37e337b | |||
| 3ecf7f6016 | |||
| da7a62945c | |||
| c876390e27 | |||
| 9bbc2a2e00 | |||
| 2b18cbc3b9 | |||
| 388391ddae | |||
| 3e4e6b2f4b | |||
| e6a65bdf8e | |||
| 0e09cf3d84 | |||
| 5634aa0cac | |||
| 07916be684 | |||
| 8d20b4675b | |||
| d906669ea4 | |||
| 5d7b00f0dc | |||
| 740d76c15c | |||
| ca4d21d4b8 | |||
| e4defc1baf | |||
| 9ea859081d | |||
| 87d8320b31 | |||
| 84aea90860 | |||
| 95f35cb5cf | |||
| c79ba097c0 | |||
| 8ea1f02c86 | |||
| 674c9c8c25 | |||
| 98a3e7e185 | |||
| ee00c53ada | |||
| 0553f64fe8 | |||
| ff06f6f04c | |||
| 3f45617e06 | |||
| 9d93c8c55a | |||
| 73eaf740db | |||
| 48426d5022 | |||
| c79237b419 | |||
| b0abaf4d9e | |||
| ec92a8d31a | |||
| a4600a4d1d | |||
| ad6a465ce7 | |||
| 0820bb5af6 | |||
| 73f8488d22 | |||
| 2b3c1c38f3 | |||
| 59f379f46b | |||
| 2bc6ecbe4c | |||
| 8274a34841 | |||
| 6e7197caa3 | |||
| 7c78d48b6c | |||
| b149eb7fa2 | |||
| ba79a3c42c | |||
| 4445ac295f | |||
| 09c2a8b072 | |||
| 92e371837d | |||
| 7fad41dc8a | |||
| 0be8ac7e09 | |||
| de6e8a047c | |||
| 92955f92bf | |||
| 5327866836 | |||
| 54b8836faa | |||
| eb39db9974 | |||
| 087e6e2eaf | |||
| 295d91b310 | |||
| f75bcb78d7 | |||
| ffb32d8720 | |||
| 879458d692 | |||
| 96eece3a3e | |||
| dc75136131 | |||
| 57c43b3c4e | |||
| 4c5b7677e6 | |||
| 43890150e5 | |||
| bc86214c5e | |||
| ef1f412019 | |||
| 1249efb53b | |||
| 8bc81e45ce | |||
| 810f316185 | |||
| 5b49e801d1 | |||
| 3269e10da9 | |||
| 53a57fd7ff | |||
| dbbf25c3af | |||
| a2ff00f53b | |||
| 4904383838 | |||
| 8221444308 | |||
| 7cd94b3163 | |||
| 52cdf7da4e | |||
| 6ff010ae0e | |||
| 6d81150975 | |||
| 0fdcc4c64d | |||
| f272c9cb12 | |||
| 5354f83736 | |||
| f4b2f36ac0 | |||
| 5fca834c20 | |||
| fff48335ae | |||
| f39a1825cf | |||
| c1b10405a5 | |||
| 37ba583cf2 | |||
| 4beb7de83f | |||
| cb8202e327 | |||
| 90c90f78b6 | |||
| e700c27256 | |||
| 7372287b5c | |||
| d059948f62 | |||
| 1cb6491d17 | |||
| 3a6e8a5f27 | |||
| c0cd820880 | |||
| 7b5655dd6d | |||
| 0f4c108614 | |||
| 86f4cc66d1 | |||
| ca38e7f160 | |||
| 99bd54ca79 | |||
| 9a3ef83078 | |||
| c1d3c5d350 | |||
| a36e202c80 | |||
| b713b65a35 | |||
| 925445c729 | |||
| ce8140ce22 | |||
| d2f60e51c7 | |||
| c66885d25c | |||
| 8d4ca7b547 | |||
| 280b32b3a9 | |||
| 522bd890c1 | |||
| 88e1f51099 | |||
| 8774b222d9 | |||
| b9ef1d608c | |||
| a0d25a1d48 | |||
| 92cd9e5930 | |||
| 3099b0d0ec | |||
| 4a5ce94d29 | |||
| b47a1fd562 | |||
| 10bef4f75c | |||
| 41c571caf5 | |||
| a21b049437 | |||
| f06fbec8df | |||
| 24b6b4e1a9 | |||
| df8f9b3e3a | |||
| 85a55bcc4c | |||
| facb2e3f2b | |||
| f6e79510c9 | |||
| 528055929a | |||
| 7a1774a2ba | |||
| 66749ded0a | |||
| 6f74dc6c72 | |||
| b8d27346e0 | |||
| e1e73cd260 | |||
| a1bf15c316 | |||
| e69bf34ed6 | |||
| fa1a977870 | |||
| 7ed4ccb66c | |||
| 76a90ede24 | |||
| 89e8c24f46 | |||
| 430c7602d4 | |||
| 51b9fbac0f | |||
| 63eba761c5 | |||
| e80fb7aa73 | |||
| 8b2b98fc10 | |||
| c9712c72a0 | |||
| d0ad4095c0 | |||
| 1c00286a70 | |||
| 8687c6b08b | |||
| 7bdf467833 | |||
| 39736fbd27 | |||
| f5e34e8096 | |||
| b2246ed922 | |||
| a499e8463c | |||
| 708ae291cc | |||
| 0d4db0322b | |||
| 39ae2e47f9 | |||
| 25159669df | |||
| 4e24281e18 | |||
| d9bdeeb6b3 | |||
| b2847e7026 | |||
| 3f6bd90f64 | |||
| 6b5984deac | |||
| 2dfaec9216 | |||
| ddbc5e65e8 | |||
| 5dae51d2a1 | |||
| 75215955be | |||
| 79ee764a9f | |||
| dce27e89a1 | |||
| 448df4baf8 | |||
| dafd09084a | |||
| cae7d06256 | |||
| a27eae46f6 | |||
| 9f067c07f0 | |||
| 1f0be73695 | |||
| ce6d42dcdd | |||
| 439740adba | |||
| cff36c0c31 | |||
| 7c9edaf186 | |||
| bbc736d72a | |||
| 47439b9907 | |||
| c3274d66c9 | |||
| d4836914dd | |||
| 4a44393878 | |||
| 123ebc0f26 | |||
| 0a133a764b | |||
| c1d807a516 | |||
| aad715f7e1 | |||
| f1ec94111a | |||
| 07fcecc5b5 | |||
| c56328009e | |||
| f8cbb6faa2 | |||
| c07eb4014f | |||
| 94c1b35cee | |||
| 2277fd0880 | |||
| a2313186e4 | |||
| 3351d61ca7 | |||
| 905d438075 | |||
| ba3290f4e1 | |||
| a828ea45aa | |||
| 7c484ea5d8 | |||
| 5b68608d5b | |||
| 08ef5396f3 | |||
| 662ef5ae4f | |||
| 23a1e9b335 | |||
| b79f8a1508 | |||
| a793fa041e | |||
| c5ef92f1f7 | |||
| 7ccf22c2f4 | |||
| 67df71ab45 | |||
| 0636ca76ea | |||
| 2f2e4e36be | |||
| 913c821eae | |||
| 43f2bacf58 | |||
| ae0cf1a89e | |||
| 0b2d037385 | |||
| cd5cef1c25 | |||
| 8c5f70a339 | |||
| f5ecfd1d74 | |||
| e0de908741 | |||
| cd2ccff0d7 | |||
| 3df6c62dce | |||
| 463af67d17 | |||
| 80f345b179 | |||
| bf212ca83f | |||
| 0185712cbf | |||
| a5199a23d9 | |||
| 011c382360 | |||
| 5c9ce84249 | |||
| 9e89ba9b10 | |||
| cb8cefb0ea | |||
| 7607b8fec5 | |||
| 05a96c5aca | |||
| 762b0c11ff | |||
| c903f9bc5c | |||
| c190ae89ce | |||
| 1b6b491eee | |||
| 4e9c0ba489 | |||
| 13fcb932d1 | |||
| f9f2c3d2b2 | |||
| bdab75c336 | |||
| 5996934f60 | |||
| 2f8659fc38 | |||
| 1e1206ab7e | |||
| 4682afc985 | |||
| 8722e1be6c | |||
| fbd6fd3e7c | |||
| cfba3ce834 | |||
| a4ad1e8295 | |||
| aa441b0656 | |||
| 39a7e30880 | |||
| 74b69f9ea4 | |||
| 3094540b93 | |||
| 513500b16e | |||
| 51c41473a5 | |||
| e79df4a347 | |||
| 53a4a66e9e | |||
| aaf2de278f | |||
| d2e8bad75f | |||
| 98bcbba7ca | |||
| 61258163e2 | |||
| 80b393ca14 | |||
| b57c292581 | |||
| 044e2f9b57 | |||
| b14e9c91c6 | |||
| 58fe41edc3 | |||
| 73a089e177 | |||
| ada9e07c2f | |||
| 3b9e42948e | |||
| 2e822b1eeb | |||
| 8f67c3e398 | |||
| 82289c0564 | |||
| 16e5e08d21 | |||
| 62671ae04f | |||
| 266a5c6408 | |||
| e9264c782f | |||
| 37eb046c10 | |||
| 6e75f7dbee | |||
| e420fa9661 | |||
| 505649e360 | |||
| 3d93c856ba | |||
| 9fe5697fd4 | |||
| 7fde3473ea | |||
| 56a2d68c71 | |||
| 3d140604f8 | |||
| 0a2167fa6a | |||
| 79e95379ec | |||
| 3f740f3800 | |||
| a4a0ecc0e5 | |||
| 686ad2ed7b | |||
| d8db79b4e5 | |||
| d33449f4af | |||
| 7e6a12bddf | |||
| ec80413be0 | |||
| c2af65facd | |||
| 46266ac825 | |||
| 91dc25e1c6 | |||
| 7f9dafd749 | |||
| 989d0e5741 | |||
| 3277c009fa | |||
| 85f1fe088d | |||
| 3c554c92d6 | |||
| f95d071197 | |||
| da887d58db | |||
| 5273a3c84f | |||
| f51712867f | |||
| ecac1dffec | |||
| 28817bee72 | |||
| 3fd41329ea | |||
| f734f0b5f7 | |||
| 3b34a8b96d | |||
| 74e6ee4b2d | |||
| 61929527a3 | |||
| a3e216c956 | |||
| d4203f728e | |||
| 616376f4ac | |||
| 1a309c9bdf | |||
| 253fc3b213 | |||
| a79fd0a10c | |||
| 04df3dcba8 | |||
| 00fbf77dbd | |||
| 9a34c1e376 | |||
| e248104d4b | |||
| c10558f230 | |||
| 5be41b8199 | |||
| d6b6e30cf5 | |||
| 825ca7ba87 | |||
| 5c2a8a4996 | |||
| 58aef2a97d | |||
| e983f9d8a9 | |||
| 7e95dcc1cb | |||
| 69a21a82ac | |||
| 95d2fee63d | |||
| 6f22a71555 | |||
| a30409fcfc | |||
| 217346f572 | |||
| 4472671470 | |||
| b1a026bdd1 | |||
| 312fae5f6d | |||
| 46235aa28a | |||
| 6fe0e297eb | |||
| 3b3214ef5e | |||
| ebc28ed8a4 | |||
| 54c23a9907 | |||
| 8fce40be80 | |||
| 5c5a213c4c | |||
| 3b730680cb | |||
| d7765ae578 | |||
| ab21d5c308 | |||
| 392319a300 | |||
| bee5f950b9 | |||
| 3fc1e3f643 | |||
| ee10b39866 | |||
| 867265fd31 | |||
| 68109a4a37 | |||
| 874401ef8c | |||
| 303a3f2c7d | |||
| 915f338378 | |||
| fd2e1fe34b | |||
| e2e7453431 | |||
| b07573ec4f | |||
| 66c279e895 | |||
| 06e879b884 | |||
| f205dafe4d | |||
| fec18d7039 | |||
| 5ef09455da | |||
| c799869e3b | |||
| 448f5a85d0 | |||
| 9909a537c2 | |||
| 9772a18bf4 | |||
| 0ac80b26bd | |||
| 1f5e25a57b | |||
| 8e5f7ef977 | |||
| ed21c8affd | |||
| 023228c2c5 | |||
| 68f4118bde | |||
| 0edc839857 | |||
| ee6f560388 | |||
| c100355b7b | |||
| 4f7402c343 | |||
| 5ac73e9599 | |||
| c1e46e00d9 | |||
| 7a05f0f9ab | |||
| afcd511893 | |||
| 8f42900e8e | |||
| bcc12876d7 | |||
| e1c2f85bda | |||
| 6989a807d6 | |||
| d92739c793 | |||
| 2fcb80b932 | |||
| 03b0e88ef7 | |||
| a5a73ddbef | |||
| eb57147ed3 | |||
| 0cf12d2a8f | |||
| 06d332e785 | |||
| a75eaaec69 | |||
| 513ee36027 | |||
| 975f425ae4 | |||
| c310ca9c5c | |||
| 21a6f0aa50 | |||
| c2c3fdf7d4 | |||
| ce0880bf5b | |||
| eed099bfed | |||
| 08b37efb55 | |||
| 8443445ed0 | |||
| d011599060 | |||
| 0dd043cb6a | |||
| 1ebd1d9e15 | |||
| 202aef8916 | |||
| 30acf51410 | |||
| d4b01398c7 | |||
| 4dde3d0fe7 | |||
| 8aa6fd7c8e | |||
| e2e6e6d641 | |||
| 20aa91b9a6 | |||
| 7bfd82ae4f | |||
| c5101ee4cf | |||
| 378f390941 | |||
| 3bc8360959 | |||
| af124e7cd9 | |||
| 71633ff441 | |||
| daf2e58c99 | |||
| 3818af2156 | |||
| dd0fd2edcf | |||
| 07304c6d0e | |||
| 4db1708fae | |||
| 0952926265 | |||
| a695484921 | |||
| 55c3eb4cf0 | |||
| 8e42356956 | |||
| 255ef64b37 | |||
| e3f1307b30 | |||
| 93beda7fff | |||
| 91251985db | |||
| b41cb74f45 | |||
| 303b90d1ee | |||
| 86f80a320d | |||
| d4e158a8b6 | |||
| f58eae623a | |||
| bc5493ed50 | |||
| 4e51f26ef2 | |||
| 04226eb686 | |||
| f9743fd04b | |||
| b9746ef100 | |||
| 92e56c3c84 | |||
| aa134d7f21 | |||
| f2bea1867c | |||
| a55acf5146 | |||
| 869b9b994d | |||
| 93fca32e9a | |||
| 1d7dfd53f4 | |||
| a68f35d909 | |||
| f800639e1a | |||
| ed45a01267 | |||
| b0634e272d | |||
| d90f012140 | |||
| 41363a534f | |||
| 44d53e581b | |||
| 5aeb034945 | |||
| 13a95db7a4 | |||
| 1705461e80 | |||
| 3fa7d61c7e | |||
| 0b8268fea3 | |||
| 22ffc74371 | |||
| 31edb6a881 | |||
| b8245095c9 | |||
| ed26e57352 | |||
| ea8a757b19 | |||
| b5d1e5f6c9 | |||
| 142a4495a6 | |||
| 7a9a21c02e | |||
| a60c84987d | |||
| 3150900e13 | |||
| 85e4946ff5 | |||
| dbf6ad70f5 | |||
| bf7a16559b | |||
| fa4c78c9c2 | |||
| 9d99f46f3c | |||
| 5dc86c5649 | |||
| fa82083670 | |||
| fa3bff3e6d | |||
| 9d68b26868 | |||
| 47a0214105 | |||
| 82ea6fef3d | |||
| eec61adad1 | |||
| ada9fb10e8 | |||
| c2bd9c3310 | |||
| ba93062638 | |||
| 61366b7096 | |||
| e1dd9c0117 | |||
| 407d3d8db4 | |||
| 5a2fa26dad | |||
| fd22faeef8 | |||
| 76c5ef46d0 | |||
| 1e725984cd | |||
| 12c6b6f59b | |||
| 4e1d7f0b82 | |||
| 0635edbfff | |||
| 07e2ab07ab | |||
| 134d82c673 | |||
| 947f9c8355 | |||
| 5e6575a63d | |||
| bef61a8547 | |||
| 7eb8c08e6e | |||
| aed5272b6c | |||
| 13e0779ced | |||
| 702006f6ea | |||
| b4fad03c46 | |||
| 77e43a4a7e | |||
| cfd21e7abb | |||
| db490bf4fb | |||
| bc6f3401f8 | |||
| e5c0079f0e | |||
| a68d80f7aa | |||
| 872c9e9e3b | |||
| 0e51924e5e | |||
| c9460a07ef | |||
| f8d80730fe | |||
| c2e0cd844b | |||
| 5493896392 | |||
| 1ad3cb460e | |||
| 721e23de68 | |||
| 97b9f5a232 | |||
| 1a9f5a4fda | |||
| b2153a14d8 | |||
| 8d6499a91c | |||
| 6d6fbac01f | |||
| d576e2387e | |||
| 4e255a355f | |||
| 94401f95d7 | |||
| 739f613881 | |||
| 5dc24557e6 | |||
| 65842a976e | |||
| c6dfc66a14 | |||
| bc54967720 | |||
| 1112aa292f | |||
| 31bb06293d | |||
| 0139f0421b | |||
| 4f63e98e7f | |||
| c04e147ca7 | |||
| b88feeac2c | |||
| 0902c35e13 | |||
| e02ee99d26 | |||
| 313313db1f | |||
| b7bdae00f8 | |||
| b699a665a1 | |||
| b28a282aba | |||
| a30d2ca025 | |||
| f7f3929342 | |||
| 35abb6e69d | |||
| b759be62ea | |||
| 9a2db4a6e9 | |||
| 5bff478d06 | |||
| 3a7402b03d | |||
| d076e73de6 | |||
| 1d98a994d0 | |||
| 3957fae782 | |||
| 72c07faedf | |||
| be3b6ee394 | |||
| 61910827e6 | |||
| 6582beaf2a | |||
| 840223af6f | |||
| a084b71682 | |||
| 1dbe30af3d | |||
| e57fbb88bf | |||
| a5002b4c12 | |||
| c139884671 | |||
| 2b97b0e0cf | |||
| 2e4176d41c | |||
| 40d62b6f2d | |||
| 43d7e19dfb | |||
| ef06071ab1 | |||
| 18578a63ec | |||
| aab0beba93 | |||
| 7d32de50a6 | |||
| 57d91e330e | |||
| a81da26452 | |||
| 803f6bbdea | |||
| 10a3669551 | |||
| d910fbcae1 | |||
| e2a6ee94b0 | |||
| 055a2134e0 | |||
| 30310a51ff | |||
| be648017f5 | |||
| e737272a39 | |||
| d7a5c50ce3 | |||
| a51d5c315f | |||
| 8c1af95b0e | |||
| c4d61fdd21 | |||
| 6301f1f6b5 | |||
| edbe2e55bc | |||
| 604cf43627 | |||
| e124669545 | |||
| 9ee7c6dddd | |||
| 5136261c8e | |||
| c9ebb44442 | |||
| 95d9976a2c | |||
| 1d177c960f | |||
| 81a34ca96c | |||
| 9749b44dbb | |||
| 6dfe2a92a1 | |||
| 44646001c1 | |||
| 088e67c235 | |||
| 0d41c92c01 | |||
| e966674d39 | |||
| ff74a8ed9c | |||
| 64fd32de9a | |||
| 6584bb4cd1 | |||
| a9065d1a1e | |||
| a22832f741 | |||
| 663a33a895 | |||
| 5f7508633b | |||
| 6a99f65979 | |||
| a983f25fb9 | |||
| 7119d92321 | |||
| 5f1a52d620 | |||
| 42d58ed202 | |||
| 20f0dd5b80 | |||
| d95e8b70b9 | |||
| 69d7f3f195 | |||
| 61b2bedf5e | |||
| ab217596d8 | |||
| c6d3bbd7b9 | |||
| ce7699c06b | |||
| ca3df18d99 | |||
| 0f96c9f825 | |||
| d6e41c1026 | |||
| bc1d0ef6e9 | |||
| 352d1425ca | |||
| f92941f4a2 | |||
| 4b6f6728fa | |||
| d12771d408 | |||
| 7a679dd7d8 | |||
| 72ae27e419 | |||
| b5722ac9f5 | |||
| 60b7a20b71 | |||
| 33ea55ec9d | |||
| 294b1c1ea3 | |||
| 75e19914cc | |||
| e24bd418b5 | |||
| 66c1af8333 | |||
| a0917b4533 | |||
| 74731d512f | |||
| e0e8a94031 | |||
| 67306ec0f7 | |||
| a42cfe26e7 | |||
| 9c63614367 | |||
| ccfc129e44 | |||
| ad3b500781 | |||
| 2894c07049 | |||
| e189d3e174 | |||
| b9ead56ec4 | |||
| 48c4ac18ab | |||
| 48d1bc7635 | |||
| 9112cef5f3 | |||
| ff0183b7e6 | |||
| 14ef63b4d2 | |||
| eac6228dde | |||
| 0d28934f37 | |||
| 57b694a93d | |||
| c2a1fcc942 | |||
| 3fdd2fb04d | |||
| 8d9c8f681e | |||
| bdfd8fb526 | |||
| 0d88217a78 | |||
| b15e27e1d3 | |||
| 7db5d84e4d | |||
| c0a37d618a | |||
| b20db5ff50 | |||
| 43e8d5639c | |||
| 92ce0af012 | |||
| fe76cfdd8b | |||
| 738ff07e6a | |||
| e11d3d7407 | |||
| 70dd92f54d | |||
| 75381a2798 | |||
| 29bddbc6ed | |||
| 2ca9baf6ba | |||
| a796a98cd4 | |||
| 02749c290c | |||
| ec13a9664c | |||
| a6d6f69d4e | |||
| 6d4fb2b444 | |||
| a9e3da8b21 | |||
| 56adb0aa88 | |||
| aa9dc1a06f | |||
| e503335026 | |||
| 350aac79b1 | |||
| 56a36987c6 | |||
| 5ef00eb42a | |||
| 6aa52cf5e6 | |||
| 6118d0f940 | |||
| bab7afdfba | |||
| 865cf0652b | |||
| 7126a952b9 | |||
| 66eb325779 | |||
| a55411c150 | |||
| 77eb6fa97c | |||
| 440b13fa48 | |||
| 02f30524a3 | |||
| 26ad736aa1 | |||
| 040588d708 | |||
| 5a635bb532 | |||
| e8014fccb3 | |||
| 85586fdf58 | |||
| d819e03c79 | |||
| 0f2def82c1 | |||
| 634ae0e213 | |||
| d0d2051edf | |||
| a2e0ddcf81 | |||
| 82be58b54a | |||
| ba18891696 | |||
| f2df042c0a | |||
| 3547119577 | |||
| 66519ac33e | |||
| a8ae3aa124 | |||
| 28a00bfb29 | |||
| 8c46abbac3 | |||
| ae7376a708 | |||
| b2d0844959 | |||
| a88ca25708 | |||
| 61acf9e56b | |||
| 8b6ffca2cb | |||
| 56e3aa3835 | |||
| 11bbe22d80 | |||
| 777a6bb29d | |||
| 16b91ba63a | |||
| 0e0ed3d657 | |||
| 910cbb542e | |||
| 17cd63d445 | |||
| 89a4283868 | |||
| 195e167414 | |||
| d5a4fadebd | |||
| 21178f4974 | |||
| 80b22e6c2d | |||
| 9e02e0aabd | |||
| 3572b94e8f | |||
| c53fbe8c73 | |||
| 16450a347e | |||
| 9a12164082 | |||
| 3ba3ab41d2 | |||
| 1ed31199ae | |||
| fc9caa79f8 | |||
| 0c19d011cb | |||
| b4eddbbc30 | |||
| f522f5bbc6 | |||
| fde08e6793 | |||
| 4e8e7fa6cf | |||
| a79806e86c | |||
| 8c0868418c | |||
| b90919a4df | |||
| 7f2842f9ba | |||
| a7f0771ca9 | |||
| 47315ed4a5 | |||
| d4df2f989b | |||
| 3c369e11ae | |||
| 16ba957f3a | |||
| 88a8b10b95 | |||
| 3063c9950c | |||
| fdc5845d90 | |||
| 6f66e2a2bb | |||
| a57a41e676 | |||
| 5209d2c416 | |||
| 64138cdcd2 | |||
| 80a5db3e91 | |||
| fb7dfdf341 | |||
| b0f0e35170 | |||
| cb0cc8b370 | |||
| 9e8a8cb7db | |||
| c4959776dc | |||
| d50d489de7 | |||
| 9472de0246 | |||
| 5d2fff8e53 | |||
| d68ca9df1e | |||
| 04a437e9a6 | |||
| f7fb8c780b | |||
| a7ebf8a014 | |||
| e950ced1a1 | |||
| 15d5a9cb58 | |||
| 5c9747d8eb | |||
| d308739643 | |||
| 6c5db40bd0 | |||
| f3212291dd | |||
| 140a829291 | |||
| e30d938425 | |||
| 521b6a414f | |||
| a20d0f970e | |||
| 66c1307112 | |||
| 241a25599f | |||
| 3f610bf122 | |||
| 858cc41a89 | |||
| 244917faf9 | |||
| 08964188ea | |||
| 07c96661e7 | |||
| 048940d383 | |||
| 85ad0e1e86 | |||
| f2f06f5d44 | |||
| b8e0ef5340 | |||
| 07608b3fe3 | |||
| e808509331 | |||
| 2d9a4fccfa | |||
| 49cf263408 | |||
| 69e7dc7481 | |||
| a76fe34a64 | |||
| 7a3882fe28 | |||
| 6954b79178 | |||
| 2032d045ca | |||
| e4ee0c3ab6 | |||
| 3218d00850 | |||
| 7f0b4f79ff | |||
| 945ba0a34c | |||
| 676797f0ac | |||
| 8e89d5dbfc | |||
| 150eb4e9e2 | |||
| 14766629a2 | |||
| f475c8ae6c | |||
| 11badbf22c | |||
| aedc051523 | |||
| b5336eb63c | |||
| 0c85ddd82d | |||
| f0386a21c6 | |||
| a7518937f0 | |||
| 87b012f0be | |||
| 6a7a34c0b0 | |||
| 9ce29138d2 | |||
| 95a6e09158 | |||
| 0962b79149 | |||
| 51ba5304a6 | |||
| d7137d1311 | |||
| d8babc91d5 | |||
| 3649a79f07 | |||
| 3992acd9d4 | |||
| b6f130e00b | |||
| 63c475e24f | |||
| d8d4f4e8f3 | |||
| e4a2bf8b71 | |||
| 19a1110bcf | |||
| 1997599b33 | |||
| 467f24022b | |||
| 3147f9b087 | |||
| 79e5931a45 | |||
| 1c9cefb61b | |||
| 318bf80ad6 | |||
| bc3a757764 | |||
| 31459c0121 | |||
| 87bd9ff08b | |||
| 972284ec20 | |||
| 8aaec8e13b | |||
| f668fb85b2 | |||
| a5e4ab8f9e | |||
| a58db6c2bf | |||
| cc8c5a4b7c | |||
| 7027474942 | |||
| 41dd1e4b81 | |||
| dd24b33cce | |||
| a703edab58 | |||
| 57346617a5 | |||
| 3a8bfb0bb1 | |||
| cd4e6f0f5e | |||
| 1a3037b756 | |||
| ef32834e10 | |||
| a684a0fd3b | |||
| 44505d0e44 | |||
| d1589cf665 | |||
| 4a7b4fbabf | |||
| ac1b3d7938 | |||
| 1686e662b4 | |||
| 67c97e7bd2 | |||
| 805c925e0d | |||
| 8ffba9cdb5 | |||
| 5d5290f69d | |||
| 563403a7f8 | |||
| 5cbf013a8e | |||
| 8bee761bb4 | |||
| 8bc482abe9 | |||
| 51fd83cd7f | |||
| a0811c6d25 | |||
| 77e8497100 | |||
| b46aaa388b | |||
| eeeba2febe | |||
| 75921d08d1 | |||
| 7764d18a8b | |||
| 797293ad8d | |||
| 7c7f1bcd5f | |||
| 50a430b353 | |||
| 5b562c6671 | |||
| cb0bf2d2e7 | |||
| 0b042bb2b5 | |||
| b91fbeb978 | |||
| d0b84e7ca3 | |||
| 0edeeb54b4 | |||
| e1b2a28f7d | |||
| 347c7be899 | |||
| c71d88d3bf | |||
| 0d4cbe462f | |||
| a05110cd93 | |||
| 8f6ebe8301 | |||
| 818775a12b | |||
| 80b60cdaa8 | |||
| 69118df912 | |||
| ff65382e06 | |||
| 420b8c49c6 | |||
| 0f9c02e249 | |||
| 4890a90641 | |||
| 653f0991e0 | |||
| a40efb4780 | |||
| feea74268d | |||
| 631582ccbb | |||
| 4f048a9907 | |||
| a8752ccde0 | |||
| feafad0d77 | |||
| 6faa468ed3 | |||
| ab55804039 | |||
| 05d9bb3bab | |||
| 39ae8cd250 | |||
| 5d34e3eb88 | |||
| ee20441307 | |||
| b12920ae67 | |||
| f9ab682559 | |||
| d042f7b396 | |||
| d8e4c8a78c | |||
| 1e2dcce664 | |||
| ab4af50daf | |||
| 26c83764d9 | |||
| 85ac64dea1 | |||
| 7305c9d354 | |||
| b99f8e6b14 | |||
| eb7e2ab92a | |||
| f7edbfb5af | |||
| 7c918e4735 | |||
| 7d4d1e13a0 | |||
| dbe58e30c4 | |||
| d2aa97b889 | |||
| 0eac3e3aca | |||
| 75d61d0604 | |||
| 2f7b053f96 | |||
| 5ab5a85b73 | |||
| 1d7da8fa8c | |||
| 727b2edf74 | |||
| 6caff0ca59 | |||
| b41f930d08 | |||
| 5a70d926cb | |||
| dbfe7b734c | |||
| 8acf5df3aa | |||
| f3b882ca2f | |||
| 94adf3cda6 | |||
| bfacaa6cf8 | |||
| 0033debb90 | |||
| 20f2bda6ed | |||
| bcc278c9cf | |||
| 75ccac221d | |||
| d90dd90a4a | |||
| d9156ce66c | |||
| 61457681e1 | |||
| bf5019108e | |||
| 622edec2fb | |||
| dac02f81c0 | |||
| d8037ebd8d | |||
| fba1bac8d2 | |||
| 510fbd293b | |||
| ab8c974e6f | |||
| 870f5afcfb | |||
| 6192bda94f | |||
| 3f701fcee3 | |||
| 524d049d74 | |||
| 983e964e36 | |||
| 84f989d6da | |||
| 49356fa769 | |||
| 2a6a03da64 | |||
| fd17860dd8 | |||
| 46fea48b6e | |||
| 54ef248df5 | |||
| 2dfb8990d2 | |||
| a50ac8167b | |||
| 86baab6858 | |||
| 67c18bb0af | |||
| c4584c27ef | |||
| 0022439bba | |||
| 5a81ef573c | |||
| 6f7ea5c7df | |||
| 926452bd55 | |||
| b5eeb6945c | |||
| 241ba623cc | |||
| cbd3099fa5 | |||
| 49e12e2a0b | |||
| 4b405af0e4 | |||
| 578ef40106 | |||
| f6e76b0fb9 | |||
| 17549bfe29 | |||
| 7915aed388 | |||
| e26c23e238 | |||
| fb5da15245 | |||
| 0021e4f354 | |||
| afa850231c | |||
| 935dc7ddaf | |||
| ac08eec0e4 | |||
| 5deb062e5f | |||
| 8e33fdbae5 | |||
| 403e6fbe37 | |||
| 071c43997e | |||
| 04f9512c2a | |||
| b9bc4421a3 | |||
| b2efd5af0a | |||
| 264a2f9449 | |||
| 561959e960 | |||
| 41a5f9a775 | |||
| 9a61e04293 | |||
| 3f1e01c6f9 | |||
| 12eabf86cf | |||
| 82d39d3256 | |||
| a1921e6fa4 | |||
| a5463fabe5 | |||
| 26f71ddedd | |||
| bdc2f7e8e1 | |||
| 2083be39da | |||
| 521419a5aa | |||
| 5bf9270d5d | |||
| 2b55921830 | |||
| 707ffa162e | |||
| 19848da7c3 | |||
| 334df849b3 | |||
| 801d34692b | |||
| 0aa70f2b80 | |||
| 5ad11a8b75 | |||
| 3f1bed3b6e | |||
| ca3668dd60 | |||
| b3ae2b1cbc | |||
| f6abca0663 | |||
| 084ff69239 | |||
| 8d31be462a | |||
| 6d010c0ef1 | |||
| dfc37fb2d4 | |||
| 56cd7b0b4f | |||
| 0060739bd2 | |||
| e98f86d878 | |||
| 1683790315 | |||
| 3c32c906de | |||
| d8c9c50743 | |||
| 2fc6febfaf | |||
| f49c679005 | |||
| 67206a3c4d | |||
| ed23f1d243 | |||
| 3b8c6c8c06 | |||
| e0c956e3e7 | |||
| 6efff8b285 | |||
| 4422c6c803 | |||
| 511b9241f5 | |||
| 89549ebeef | |||
| bdb24f6da1 | |||
| d7bc03f0a9 | |||
| 64c18e3f68 | |||
| 7bba7e0c32 | |||
| e48b3f0f8e | |||
| 31da502123 | |||
| 9c64bbdd60 | |||
| f4c1b0c1da | |||
| c761e9fe38 | |||
| e66aaaf98a | |||
| 58b5811d9e | |||
| 3b3429d77a | |||
| 98eb1a6694 | |||
| 91929a3217 | |||
| 5eecbc43be | |||
| 609502c545 | |||
| d0b420f9a1 | |||
| 1222c53a1a | |||
| 7b2d51e6c9 | |||
| 46cb286839 | |||
| 2e6f0c06fb | |||
| 31c138dacb | |||
| e428683ec7 | |||
| b6462225a7 | |||
| dfc110ca05 | |||
| f55bd26f2e | |||
| 603b6b90df | |||
| 2c132ae2cf | |||
| c7f4ad5a31 | |||
| b9d5593895 | |||
| 6a833fc141 | |||
| 4e1ad84831 | |||
| e90bcdf1a3 | |||
| dfbb346180 | |||
| 2d5b97f68f | |||
| 32826f1e4d | |||
| b1ed1d624a | |||
| 06c4040334 | |||
| b71c389f5c | |||
| 5557de6dc3 | |||
| ccdcd24d22 | |||
| c410a655ea | |||
| 2fd84ae57c | |||
| b760b717ef | |||
| acf9bd8663 | |||
| 7327f1440e | |||
| 87d8c10905 | |||
| ee45f3cae9 | |||
| 195255ce9a | |||
| 0e4fda0c5a | |||
| f1babdee60 | |||
| a703d85688 | |||
| 0cd677cb39 | |||
| 9fe11fb6e2 | |||
| 58451b17dc | |||
| cba924a31a | |||
| 74e50d1cb2 | |||
| bd1c01b4e1 | |||
| 541fa4aa28 | |||
| 4dd03c7bd6 | |||
| 3a2de83920 | |||
| 2ef5d339c6 | |||
| 6355098703 | |||
| a10a953097 | |||
| 99293d9841 | |||
| 6d409e4df5 | |||
| 2fceef4f0c | |||
| 7577e64085 | |||
| 4a9750865f | |||
| fba0685266 | |||
| e3fa1c740d | |||
| de190f6d41 | |||
| 7a5bc39376 | |||
| c0b67653de | |||
| c6b1bd2f3a | |||
| ae5c30af6b | |||
| a513378d73 | |||
| 5b63c12958 | |||
| f3fec33085 | |||
| 3a071af42d | |||
| a06a863745 | |||
| 93f2cf4bce | |||
| 0b70728f04 | |||
| b12f422db6 | |||
| 13681deaa1 | |||
| d2d43af0df | |||
| 500f053afd | |||
| 8cf9b06d7b | |||
| 88002fd78b | |||
| c4684d2dab | |||
| e46a244fea | |||
| c940de6cd7 | |||
| c391ecc7a9 | |||
| d65ad7324d | |||
| a68ffd5339 | |||
| 59736d19af | |||
| 9967f09566 | |||
| 3d7e4ebb71 | |||
| c9457f7610 | |||
| 13aef1fd89 | |||
| a9548747cd | |||
| 0da4cd6eb1 | |||
| 083246bea1 | |||
| 9f372ebd72 | |||
| cdf4c96ed6 | |||
| c757b57e07 | |||
| 6629585b32 | |||
| ad96d6ce66 | |||
| 5877dc1e24 | |||
| 908a6b808b | |||
| fbd41fae7f | |||
| f9ff37c820 | |||
| eed91491aa | |||
| 6faf9db2ba | |||
| 713fd7fc22 | |||
| d86ce3ac2f | |||
| 076163ccfd | |||
| 8f74c26f77 | |||
| 1b37ed61e3 | |||
| c6a421e61b | |||
| 550a60f4af | |||
| 01a6901bfe | |||
| e655aa5bbd | |||
| f02409c5a9 | |||
| 8524473488 | |||
| 0b039c6453 | |||
| 62250abe8b | |||
| 5b0fc66cb1 | |||
| ffa15c274b | |||
| 09596000d7 | |||
| 8e7a5e7d60 | |||
| fc6d485fa3 | |||
| 0ed2e7e175 | |||
| cb0a54fe2b | |||
| d9cf91d2f0 | |||
| 3ec820f212 | |||
| 474f743d28 | |||
| 3f1b508752 | |||
| 2c49a1d8b9 | |||
| ab441659b2 | |||
| 84d843b356 | |||
| 9b3af38326 | |||
| 8226a638d9 | |||
| 4cd2c5878c | |||
| 8242198068 | |||
| 59be5dc807 | |||
| de6b6012ba | |||
| 5928c84cf4 | |||
| b393469584 | |||
| 6f5cef3a6c | |||
| 5234d78719 | |||
| aebe64ef3d | |||
| 224a40dcb7 | |||
| 5ddb6bf718 | |||
| 11cb61874d | |||
| 00ed22ad28 | |||
| e263922b43 | |||
| a4172a74d1 | |||
| b1fb2aeeb3 | |||
| 4f3c2b7b8c | |||
| ec493ee91b | |||
| 2200bb9ee8 | |||
| 588129436d | |||
| fed51d9959 | |||
| e6af5e77f8 | |||
| 2eb230d366 | |||
| a66ecd7660 | |||
| 46a9459b7d | |||
| 0a34dae6c0 | |||
| 2209a76f25 | |||
| ba2e27dc7e | |||
| 5f5cedb428 | |||
| a4da127078 | |||
| 109d0ffab6 | |||
| 3af2eb1b59 | |||
| 51d3f37058 | |||
| 3b76018db9 | |||
| 271d42c09f | |||
| ddfb7f0e88 | |||
| 3cb8ce1b3b | |||
| 42b00f4942 | |||
| 749c7ce796 | |||
| 27ff214d04 | |||
| 46ff3c293a | |||
| c034e9f2ee | |||
| b2c5cebc08 | |||
| 0017a6b0f9 | |||
| a2c9df06de | |||
| 4152510452 | |||
| d253f7279a | |||
| b186caa1d0 | |||
| f99ac2f471 | |||
| 409af6e23e | |||
| 36d81e027b | |||
| 2a0cb6125a | |||
| b65ef1289a | |||
| e67f1fb974 | |||
| 292d7c9e05 | |||
| 617cb79299 | |||
| dbad11ad9a | |||
| 04cb6d2538 | |||
| b6ff3852a0 | |||
| 70a68bb676 | |||
| e04fc80b62 | |||
| 35d63e7894 | |||
| 9e71358ae2 | |||
| 0891b103e0 | |||
| 2480904929 | |||
| da903d1879 | |||
| eafc009ff0 | |||
| 3023bcaf95 | |||
| 2d29953318 | |||
| 6b9ec4bc05 | |||
| 540176059a | |||
| 9051354c58 | |||
| 26985aeacb | |||
| c2a84c7f93 | |||
| 51975f6748 | |||
| 6fdc16c33f | |||
| ed4f347563 | |||
| a1cdb3b273 | |||
| 8b8088b74a | |||
| 94e9f2678d | |||
| 05965e749a | |||
| 1a9cea263f | |||
| 966c402ecc | |||
| d5e0a3e4f6 | |||
| 2fafca7dfd | |||
| bfbd1bcfed | |||
| c1d476a991 | |||
| f7b78ca855 | |||
| 0e1429b604 | |||
| 57f2ca6460 | |||
| e1d8dabd3d | |||
| d498287f76 | |||
| 8a3026e43e | |||
| 133f26c691 | |||
| 9b169d1f43 | |||
| 2c331f9a65 | |||
| b9e8559002 | |||
| a8f843fea5 | |||
| a0da3b564f | |||
| bdc5e09ecc | |||
| d88e16dccf | |||
| 77680fcdc9 | |||
| 6afcc42c38 | |||
| 0bf7b86217 | |||
| fa306338aa | |||
| 5921a099d9 | |||
| e6dd1f0c48 | |||
| ae8602a769 | |||
| 8d86636a95 | |||
| 87a9191013 | |||
| e847933c3c | |||
| ad7280c065 | |||
| b124bac190 | |||
| 6f926f4849 | |||
| 48df9d4af6 | |||
| a5d0c183a7 | |||
| 37354484c2 | |||
| eeae13d4ba | |||
| c84b474632 | |||
| a207030899 | |||
| b97e28ad3b | |||
| b307adda99 | |||
| 069421f47a | |||
| 8f1a11757f | |||
| 3fa5f07f51 | |||
| 8b8a200b83 | |||
| 2c87d3e714 | |||
| ddf3b54917 | |||
| 846da8e17d | |||
| 0d0d414fc8 | |||
| 0c01bce460 | |||
| 37c83ce039 | |||
| 9e504d577e | |||
| ab70692c49 | |||
| d48f594147 | |||
| 3e4e634c97 | |||
| 0e17a0bcd0 | |||
| 32e0d32dea | |||
| 1ecf355346 | |||
| 2ff15b54af | |||
| 30ac3f8c0a |
@@ -0,0 +1 @@
|
||||
commands/code/apply-issue-main.md
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
description: Increment the AIX monotonic version number
|
||||
allowed-tools: Bash(git add:*),Bash(git status:*),Bash(git commit:*),Edit,Write
|
||||
model: haiku
|
||||
disable-model-invocation: true
|
||||
---
|
||||
|
||||
Increment `Monotonics.Aix` in `src/common/app.release.ts` and commit it.
|
||||
|
||||
**Pre-flight checks (MUST pass or abort):**
|
||||
1. Run `git branch --show-current` - MUST be on `main` branch
|
||||
2. Run `git status src/common/app.release.ts` - file MUST be unmodified (no changes on this specific file)
|
||||
|
||||
**Execute:**
|
||||
1. Read current `Monotonics.Aix` value from `src/common/app.release.ts`
|
||||
2. Increment by 1
|
||||
3. Update ONLY that line
|
||||
4. Run: `git add src/common/app.release.ts && git commit -m "Roll AIX"`
|
||||
|
||||
Confirm new version number.
|
||||
@@ -0,0 +1,31 @@
|
||||
---
|
||||
description: Sync Anthropic API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Please take a look at my API code for Anthropic: message wire types `src/modules/aix/server/dispatch/wiretypes/anthropic.wiretypes.ts`, assembly of the request messages (adapters) `src/modules/aix/server/dispatch/chatGenerate/adapters/anthropic.messageCreate.ts`, and parsing of the response in streaming or not `src/modules/aix/server/dispatch/chatGenerate/parsers/anthropic.parser.ts`.
|
||||
|
||||
IMPORTANT: we only support the Messages API (message create). We do NOT support other APIs such as the older Completions API.
|
||||
We support Anthropic caching natively, and want to make sure tools and state (crafting the history) are also done well.
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources:**
|
||||
- Docs API: https://docs.claude.com/en/api/messages
|
||||
- Release notes: https://docs.claude.com/en/release-notes/api
|
||||
- Tools use: https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview
|
||||
- Handling stop reasons: https://docs.claude.com/en/api/handling-stop-reasons
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- Anthropic TypeScript SDK: https://github.com/anthropics/anthropic-sdk-typescript
|
||||
- Anthropic Python SDK: https://github.com/anthropics/anthropic-sdk-python
|
||||
- Recent news and announcements: Web Search for "anthropic api changelog" or "new claude api" or "new claude api pricing"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully and look if there are any discrepancies in the protocols, the available API surface, the structure of the messages, functionality, logic, etc.
|
||||
Make sure you look deep in the fields of the requests and responses, especially required fields, streaming event types, and any new response shapes.
|
||||
|
||||
Please point out all of the differences in the API whether it's in the final parsing and reassembly of the streaming message, or the protocol changed, etc.
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
description: Sync Google Gemini API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Please take a look at my API code for Google Gemini: message wire types `src/modules/aix/server/dispatch/wiretypes/gemini.wiretypes.ts`, assembly of the request messages (adapters) `src/modules/aix/server/dispatch/chatGenerate/adapters/gemini.generateContent.ts`, and parsing of the response in streaming or not `src/modules/aix/server/dispatch/chatGenerate/parsers/gemini.parser.ts`.
|
||||
|
||||
IMPORTANT: we only support the generateContent API, not other Gemini APIs such as embeddings, etc.
|
||||
Caching is only supported when implicit, we do not explicitly manage Gemini Caches. Same for file uploads and other systems.
|
||||
Image generation happens through models, i.e. 'Gemini 2.5 Flash - Nano Banana' generates images using AIX from generateContent (chat input).
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources:**
|
||||
- Docs API 1/2: https://ai.google.dev/api/generate-content
|
||||
- Docs API 2/2: https://ai.google.dev/api/caching#Content
|
||||
- Release notes: https://ai.google.dev/gemini-api/docs/changelog
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- Google AI JavaScript SDK: https://github.com/googleapis/js-genai (check latest commits, README, type definitions)
|
||||
Recent news and announcements: Web Search for "gemini api changelog" or "nwe gemini api updates" or "new gemini api pricing"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully and look if there are any discrepancies in the protocols, the available API surface, the structure of the messages, functionality, logic, etc.
|
||||
Make sure you look deep in the fields of the requests and responses, especially required fields, streaming event types, and any new response shapes.
|
||||
|
||||
Please point out all of the differences in the API whether it's in the final parsing and reassembly of the streaming message, or the protocol changed, etc.
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
description: Sync OpenAI API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Please take a look at my API code for OpenAI: message wire types `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts`, assembly of the request messages (adapters) `src/modules/aix/server/dispatch/chatGenerate/adapters/openai.chatCompletions.ts`, and parsing of the response in streaming or not `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.parser.ts`.
|
||||
|
||||
IMPORTANT: we prioritize the new Responses API, while Chat Completions is still supported but legacy.
|
||||
We do NOT support other APIs such as Realtime (incl. websockets), etc.
|
||||
We also do not support Agentic APIs (Agent SDK, AgentKit, ChatKit, Assistants API etc), as we perform similar functionality in AIX (server or client side).
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources:**
|
||||
- Responses API (AIX prioritizes it): https://platform.openai.com/docs/api-reference/responses/create
|
||||
- Chat Completions API: https://platform.openai.com/docs/api-reference/chat/create
|
||||
- Changelog: https://platform.openai.com/docs/changelog
|
||||
- Models: https://platform.openai.com/docs/models
|
||||
- Pricing (use Copy Page button to download markdown): https://platform.openai.com/docs/pricing
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- OpenAI Node.js SDK: https://github.com/openai/openai-node
|
||||
- OpenAI Python SDK: https://github.com/openai/openai-python
|
||||
- OpenAI OpenAPI spec: https://github.com/openai/openai-openapi
|
||||
Recent news and announcements: Web Search for "openai api changelog" or "openai new models" or "openai new prices"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully and look if there are any discrepancies in the protocols, the available API surface, the structure of the messages, functionality, logic, etc.
|
||||
Make sure you look deep in the fields of the requests and responses, especially required fields, streaming event types, and any new response shapes.
|
||||
|
||||
Please point out all of the differences in the API whether it's in the final parsing and reassembly of the streaming message, or the protocol changed, etc.
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
@@ -0,0 +1,49 @@
|
||||
---
|
||||
description: Sync OpenRouter API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Review the OpenRouter implementation:
|
||||
- Models list: `src/modules/llms/server/openai/openrouter.wiretypes.ts` (list API response schema)
|
||||
- Chat wire types: `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts` (OpenAI-compatible)
|
||||
- Request adapter: `src/modules/aix/server/dispatch/chatGenerate/adapters/openai.chatCompletions.ts` ('openrouter' dialect)
|
||||
- Response parser: `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.parser.ts` (shared OpenAI parser)
|
||||
- Vendor config: `src/modules/llms/vendors/openrouter/openrouter.vendor.ts`
|
||||
|
||||
GOAL: Ensure complete support for OpenRouter's API including advanced features like reasoning/thinking tokens, tool use, search integration, and multi-modal capabilities. OpenRouter is OpenAI-compatible but has important extensions and differences.
|
||||
|
||||
Use Task tool with subagent_type=Explore and thoroughness="very thorough" to discover:
|
||||
1. Map API structure - all endpoints, parameters, capabilities from https://openrouter.ai/docs
|
||||
2. **Advanced features** - How to use: reasoning/thinking tokens (o1, DeepSeek R1), tool use/function calling, search integration, multi-modal (vision/audio)
|
||||
3. Changelog location - How does OpenRouter communicate API updates and breaking changes?
|
||||
4. Model metadata - What capabilities are exposed in the models list API? How to detect feature support?
|
||||
5. OpenAI deviations - Extensions, special headers (HTTP-Referer, X-Title), response fields, streaming differences
|
||||
|
||||
Then check the latest API information. Try these sources (be creative if blocked):
|
||||
|
||||
**Primary Sources:**
|
||||
- API Reference: https://openrouter.ai/docs/api-reference
|
||||
- Chat Completions: https://openrouter.ai/docs/api-reference#chat-completions
|
||||
- Models List: https://openrouter.ai/docs/api-reference#models-list
|
||||
- Parameters Guide: https://openrouter.ai/docs/parameters
|
||||
- Announcements: https://openrouter.ai/announcements (feature launches, API updates, new models)
|
||||
- Models Directory: https://openrouter.ai/models (check metadata for capabilities)
|
||||
|
||||
**Alternative Sources:**
|
||||
- GitHub: https://github.com/OpenRouterTeam (SDKs, examples, issues for recent changes)
|
||||
- Web Search: "openrouter api changelog" or "openrouter reasoning tokens" or "openrouter tool use"
|
||||
|
||||
**If blocked:** Ask user to provide documentation.
|
||||
|
||||
$ARGUMENTS
|
||||
Focus on discrepancies and gaps:
|
||||
- **Request/Response structure**: New fields, changed requirements, streaming event types
|
||||
- **Feature support**: Thinking tokens format, tool calling protocol, search parameters
|
||||
- **Model capabilities**: How to detect and enable advanced features per model
|
||||
- **OpenRouter extensions**: Headers, routing, fallbacks, rate limiting (free vs paid)
|
||||
- **Breaking changes**: Protocol updates, deprecated fields, new required parameters
|
||||
|
||||
Report differences in wire types, adapter logic, parser handling, or dialect-specific quirks.
|
||||
Prioritize new capabilities that improve user experience (reasoning visibility, better tool use, etc.).
|
||||
|
||||
When making changes, add comments with date: `// [OpenRouter, 2026-MM-DD]: explanation`
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Sync xAI Responses API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Review the xAI Responses API implementation:
|
||||
- xAI wire types: `src/modules/aix/server/dispatch/wiretypes/xai.wiretypes.ts` (xAI-specific request schema, tools)
|
||||
- Request adapter: `src/modules/aix/server/dispatch/chatGenerate/adapters/xai.responsesCreate.ts` (AIX → xAI Responses API)
|
||||
- Response parser: `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.responses.parser.ts` (shared with OpenAI Responses)
|
||||
- Dispatch routing: `src/modules/aix/server/dispatch/chatGenerate/chatGenerate.dispatch.ts` (dialect='xai' routing)
|
||||
- OpenAI shared types: `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts` (InputItem/OutputItem schemas reused by xAI)
|
||||
|
||||
IMPORTANT context:
|
||||
- We use ONLY the xAI Responses API (`POST /v1/responses`). We do NOT use the Chat Completions API (`/v1/chat/completions`) for xAI anymore.
|
||||
- xAI's Responses API is similar to OpenAI's but has key differences - the skill should find what changed since our last sync.
|
||||
- Response streaming/parsing reuses the OpenAI Responses parser since the format is compatible.
|
||||
- We do NOT implement: Files API, Collections Search, Remote MCP tools, Voice Agent API, Image/Video generation, Batch API, or Deferred Completions.
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources (guide pages work well with WebFetch despite being JS-rendered):**
|
||||
- Responses API Guide: https://docs.x.ai/docs/guides/chat
|
||||
- Stateful Responses: https://docs.x.ai/docs/guides/responses-api
|
||||
- Tools Overview: https://docs.x.ai/docs/guides/tools/overview
|
||||
- Search Tools (web_search, x_search): https://docs.x.ai/docs/guides/tools/search-tools
|
||||
- Code Execution Tool: https://docs.x.ai/docs/guides/tools/code-execution-tool
|
||||
- Function Calling: https://docs.x.ai/docs/guides/function-calling
|
||||
- Streaming: https://docs.x.ai/docs/guides/streaming-response
|
||||
- Reasoning: https://docs.x.ai/docs/guides/reasoning
|
||||
- Structured Outputs: https://docs.x.ai/docs/guides/structured-outputs
|
||||
- Models & Pricing: https://docs.x.ai/developers/models
|
||||
- Release Notes: https://docs.x.ai/developers/release-notes
|
||||
- API Reference: https://docs.x.ai/developers/api-reference#create-new-response
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- xAI Python SDK: https://github.com/xai-org/xai-sdk-python
|
||||
- Web Search for "xai grok api changelog 2026" or "xai responses api new features"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully for discrepancies between our implementation and the current API docs:
|
||||
|
||||
1. **Request fields**: Compare `XAIWire_API_Responses.Request_schema` against current docs - any new, changed, or deprecated parameters?
|
||||
2. **Tool definitions**: Compare `XAIWire_Responses_Tools` - any new parameters on web_search/x_search/code_interpreter? Any new hosted tool types?
|
||||
3. **Input/Output item types**: Any xAI-specific output items not handled by the shared OpenAI parser (e.g., x_search_call, web_search_call, code_interpreter_call)?
|
||||
4. **Streaming events**: Any xAI-specific SSE event types beyond what the OpenAI Responses parser handles?
|
||||
5. **Response shape**: Usage reporting differences, new fields in the response object?
|
||||
6. **Adapter logic**: Message role mapping, content type handling, system message approach - still correct?
|
||||
7. **Include options**: Any new values for the `include` array?
|
||||
8. **Reasoning config**: Which models support it and with what values?
|
||||
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
When making changes, add comments with date: `// [xAI, 2026-MM-DD]: explanation`
|
||||
|
||||
**Self-update this skill**: After completing the sync, if your research reveals that assumptions in THIS skill file (`.claude/commands/aix/sync-xai-api.md`) are wrong or outdated - e.g., new APIs we now implement, new tool types added, URLs moved, file paths changed - update this skill file to stay accurate for next time.
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
description: Review in-flight changes for coherence, completeness, and quality
|
||||
---
|
||||
|
||||
Review the current in-flight changes in the big-agi-private repository (dev branch, continuously rebased ~1800 commits on top of main).
|
||||
|
||||
**Step 1: Scope and read**
|
||||
|
||||
`git diff --stat` + `git status` for breadth. Then full `git diff` (if empty: `git diff --cached`, then `git diff HEAD~1`).
|
||||
For every file in the diff, read surrounding context in the actual source file - the diff alone hides bugs in adjacent untouched code.
|
||||
|
||||
**Step 2: Reverse-engineer the intent**
|
||||
|
||||
From the diff, determine the **what**, **how**, and **why**. Present this concisely so the author can confirm or correct,
|
||||
but don't stop here, continue to the full review in the same response.
|
||||
|
||||
**Step 3: Validate**
|
||||
|
||||
Run `tsc --noEmit --pretty` and `npm run lint` (in parallel). Report any errors with the review.
|
||||
If the diff removes/renames identifiers, grep the codebase for stale references to the OLD names. This catches broken guards, stale imports, and incomplete migrations.
|
||||
|
||||
**Step 4: Deep review**
|
||||
|
||||
Evaluate every file in the diff.
|
||||
Leave no rocks unturned - correctness, coherence, completeness, excess, generalization, maintenance burden,
|
||||
codebase consistency, etc.
|
||||
|
||||
**Step 5: Prioritized next steps**
|
||||
|
||||
Think about what happens when the next developer touches this code.
|
||||
Rank findings by severity (bug > correctness > cleanup > cosmetic). Be specific about what to change and where.
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
description: Sync LLM parameter options between full model dialog and chat side panel
|
||||
---
|
||||
|
||||
Audit and sync LLM parameter configurations between the two UI editors. Goal: identical `value` fields in option arrays + equivalent onChange logic. Labels/descriptions can differ for UI space.
|
||||
|
||||
**Files to Compare:**
|
||||
1. **Full Model Dialog**: `src/modules/llms/models-modal/LLMParametersEditor.tsx` (main branch)
|
||||
2. **Chat Side Panel**: `src/apps/chat/components/layout-panel/ChatPanelModelParameters.tsx` (main derived branches only)
|
||||
|
||||
**Reference Documentation:**
|
||||
- Parameter system: `kb/systems/LLM-parameters-system.md`
|
||||
- Parameter registry: `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Task: Perform a comprehensive audit**
|
||||
|
||||
1. **Read both files** and extract all option arrays (e.g., `_reasoningEffortOptions`, `_antEffortOptions`, `_geminiThinkingLevelOptions`, etc.)
|
||||
|
||||
2. **Check for missing parameters:**
|
||||
- Parameters handled in `LLMParametersEditor.tsx` but NOT in `ChatPanelModelParameters.tsx`
|
||||
- Parameters in `ChatPanelModelParameters.tsx`'s `_interestingParameters` array but missing UI controls
|
||||
- Note: The side panel intentionally shows only "interesting" parameters - focus on those listed in `_interestingParameters`
|
||||
|
||||
3. **Check for value mismatches** between corresponding option arrays:
|
||||
- Different number of options (e.g., 3 vs 4 options)
|
||||
- Same label but different `value` (this causes the bug in issue #926)
|
||||
- Different labels for the same `value`
|
||||
- Missing `_UNSPECIFIED`/Default option in one but not the other
|
||||
|
||||
4. **Check onChange handler consistency:**
|
||||
- Both should remove parameter on `_UNSPECIFIED` selection
|
||||
- Both should set explicit values the same way
|
||||
- Watch for conditions like `value === 'high'` that may differ
|
||||
|
||||
**Output Format:**
|
||||
|
||||
```
|
||||
## Parameter Sync Audit Report
|
||||
|
||||
### Missing Parameters
|
||||
- [ ] `llmVndXyz` - In full dialog, missing from side panel
|
||||
|
||||
### Value Mismatches
|
||||
- [ ] `_xyzOptions`:
|
||||
- Full dialog: [values...]
|
||||
- Side panel: [values...]
|
||||
- Issue: [description]
|
||||
|
||||
### Handler Inconsistencies
|
||||
- [ ] `llmVndXyz` onChange differs: [explanation]
|
||||
|
||||
### Recommended Fixes
|
||||
1. [Specific fix with code snippet if needed]
|
||||
```
|
||||
|
||||
**Fix Direction:** Full dialog is source of truth. Update side panel to match its values when mismatched.
|
||||
|
||||
**Notes:**
|
||||
- Side panel uses shorter descriptions (space-constrained) - that's fine
|
||||
- Variable names may differ (e.g., `_anthropicEffortOptions` vs `_antEffortOptions`) - that's fine, but same is better
|
||||
- `value` fields must be identical sets
|
||||
- `_UNSPECIFIED` must mean the same thing in both
|
||||
- onChange: remove on `_UNSPECIFIED`, set explicit value otherwise
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
description: Update Alibaba model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/alibaba.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Pricing: https://www.alibabacloud.com/help/en/model-studio/models
|
||||
- Billing Guide: https://www.alibabacloud.com/help/en/model-studio/billing-for-model-studio
|
||||
|
||||
**Fallbacks if blocked:**
|
||||
- Search "alibaba model studio latest pricing", "alibaba latest models", "qwen models pricing", or search GitHub for latest model prices and context windows
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,49 @@
|
||||
---
|
||||
description: Update Anthropic model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/anthropic/anthropic.models.ts` with latest model definitions.
|
||||
|
||||
Reference files (for context only, do not modify):
|
||||
- `src/modules/llms/server/llm.server.types.ts`
|
||||
- `src/modules/llms/server/models.mappings.ts`
|
||||
- `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Workflow: Start with recent changes, then verify the full model list.**
|
||||
|
||||
**Primary Sources (append `.md` to any path for clean markdown):**
|
||||
1. Recent changes: https://platform.claude.com/docs/en/release-notes/overview.md
|
||||
2. Models & IDs: https://platform.claude.com/docs/en/about-claude/models/overview.md
|
||||
3. Pricing (base, cache, batch, long context): https://platform.claude.com/docs/en/about-claude/pricing.md
|
||||
4. Deprecations & retirement dates: https://platform.claude.com/docs/en/about-claude/model-deprecations.md
|
||||
|
||||
**Discovering feature docs:** The release notes and models overview markdown
|
||||
contain inline links to feature-specific pages (thinking modes, effort,
|
||||
context windows, what's-new pages, etc.). When a new capability is
|
||||
referenced, follow those links — append `.md` to get markdown. Examples of
|
||||
pages you might discover this way:
|
||||
- `about-claude/models/whats-new-claude-*` — per-generation changes
|
||||
- `build-with-claude/extended-thinking` — thinking budget configuration
|
||||
- `build-with-claude/effort` — effort parameter levels
|
||||
- `build-with-claude/adaptive-thinking` — adaptive thinking mode
|
||||
|
||||
**Fallback web pages** (crawl if `.md` paths break or structure changes):
|
||||
- https://platform.claude.com/docs/en/about-claude/models/overview
|
||||
- https://platform.claude.com/docs/en/about-claude/pricing
|
||||
- https://platform.claude.com/docs/en/release-notes/overview
|
||||
- https://claude.com/pricing
|
||||
|
||||
**Fallbacks if blocked:** Check the Anthropic TypeScript SDK at
|
||||
https://github.com/anthropics/anthropic-sdk-typescript, or web-search
|
||||
for "anthropic models latest pricing" / "anthropic latest models".
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- For new models: check which `parameterSpecs` are needed (thinking mode,
|
||||
effort levels, 1M context, skills, web tools) by reading the linked
|
||||
feature docs and comparing with existing model entries
|
||||
- When thinking/effort semantics change between generations
|
||||
(e.g. adaptive vs manual thinking), document in comments
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
description: Update DeepSeek model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/deepseek.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Pricing: https://api-docs.deepseek.com/quick_start/pricing
|
||||
- Model List: https://api-docs.deepseek.com/api/list-models
|
||||
- Release Notes: https://api-docs.deepseek.com/updates (check for version updates like V3.2-Exp)
|
||||
|
||||
**Note:** DeepSeek frequently releases new versions with significant pricing changes. Always check release notes first.
|
||||
|
||||
**Fallbacks if blocked:** Search "deepseek api latest pricing", "deepseek latest models", "deepseek models list" or search GitHub for latest model prices and context windows
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,91 @@
|
||||
---
|
||||
description: Update/validate dynamic vendor model parsers (OpenRouter, TogetherAI, Alibaba, Azure, Novita, ChutesAI, FireworksAI, TLUS, LM Studio, LocalAI, FastAPI)
|
||||
---
|
||||
|
||||
Validate that the dynamic (API-fetched) vendor model parsers are up to date and not silently broken.
|
||||
|
||||
These vendors do NOT have hardcoded model lists - they fetch models from APIs at runtime. But their parsers, filters, heuristic detection, and capability mapping can break if upstream APIs change. This skill covers all dynamic vendors NOT covered by the other `llms:update-models-{vendor}` skills.
|
||||
|
||||
## Vendors to Validate
|
||||
|
||||
### High Risk
|
||||
|
||||
**OpenRouter** - `src/modules/llms/server/openai/models/openrouter.models.ts`
|
||||
- Most complex parser. Vendor-specific parameter inheritance (Anthropic thinking variants, Gemini thinking/image, OpenAI reasoning effort, xAI/DeepSeek reasoning).
|
||||
- Hardcoded family ordering list (lines ~24-37) - check if new leading vendors are missing.
|
||||
- Hardcoded old/deprecated model hiding list (lines ~39-49) - check if stale.
|
||||
- Cache pricing detection (Anthropic-style vs OpenAI-style) - verify format still valid.
|
||||
- Variant injection for Anthropic thinking/non-thinking - verify still correct.
|
||||
- Reference: https://openrouter.ai/docs/models
|
||||
|
||||
### Medium Risk
|
||||
|
||||
**Novita** - `src/modules/llms/server/openai/models/novita.models.ts`
|
||||
- Features array mapping (`function-calling`, `reasoning`, `structured-outputs`) and input modalities parsing.
|
||||
- Pricing unit conversion (hundredths of cent per million → dollars per 1K).
|
||||
- Hostname heuristic: `novita.ai`.
|
||||
|
||||
**ChutesAI** - `src/modules/llms/server/openai/models/chutesai.models.ts`
|
||||
- Custom `max_model_len` field for context window.
|
||||
- Assumes all models support Vision + Functions (aggressive).
|
||||
- Hostname heuristic: `.chutes.ai`.
|
||||
|
||||
**FireworksAI** - `src/modules/llms/server/openai/models/fireworksai.models.ts`
|
||||
- Relies on provider capability flags: `supports_chat`, `supports_image_input`, `supports_tools`.
|
||||
- Hostname heuristic: `fireworks.ai/`.
|
||||
|
||||
**TogetherAI** - `src/modules/llms/server/openai/models/together.models.ts`
|
||||
- Type allow-list (`type: 'chat'`), vision detection by string match.
|
||||
- Custom wire schema with pricing conversion.
|
||||
|
||||
**TLUS** - `src/modules/llms/server/openai/models/tlusapi.models.ts`
|
||||
- Detected by response structure (`total_models`, `free_models`, `pro_models` fields).
|
||||
- Capability enum mapping (`text`, `vision`, `audio`, `tool-calling`, `reasoning`, `websearch`).
|
||||
- Tier-based pricing (`free` vs paid).
|
||||
|
||||
**Alibaba** - `src/modules/llms/server/openai/models/alibaba.models.ts`
|
||||
- Model list was cleared (dynamic-only). Exclusion patterns for non-chat models.
|
||||
- Assumes 128K context and Vision+Functions for all models (overly permissive).
|
||||
- Check if hardcoded data should be restored now that naming has stabilized.
|
||||
|
||||
### Low Risk (local/generic - validate only if issues reported)
|
||||
|
||||
**Azure** - `src/modules/llms/server/openai/models/azure.models.ts`
|
||||
- Custom deployments API, not `/v1/models`. User-specific. Deployment name fallback logic.
|
||||
|
||||
**LM Studio** - `src/modules/llms/server/openai/models/lmstudio.models.ts`
|
||||
- Local service, native API (`/api/v1/models`). GGUF metadata parsing, capability flags.
|
||||
|
||||
**LocalAI** - `src/modules/llms/server/openai/models/localai.models.ts`
|
||||
- Local service. String-based hide list, vision/reasoning detection by name pattern.
|
||||
|
||||
**FastAPI** - `src/modules/llms/server/openai/models/fastapi.models.ts`
|
||||
- Generic passthrough. Detected by `owned_by === 'fastchat'`. Minimal parsing.
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
For each vendor (prioritize High > Medium > Low):
|
||||
|
||||
1. **Read the parser file** and check for:
|
||||
- Deny/allow lists that may be stale (new model families missing)
|
||||
- Capability assumptions that may be wrong (e.g. "all models support vision")
|
||||
- Field names that may have changed upstream
|
||||
- Pricing conversion math that may use wrong units
|
||||
|
||||
2. **Check upstream docs** (where available) for:
|
||||
- API response schema changes
|
||||
- New model types or capability fields
|
||||
- Deprecated fields
|
||||
|
||||
3. **Cross-reference with OpenRouter** (aggregator):
|
||||
- OpenRouter surfaces models from many of these vendors
|
||||
- If OpenRouter shows capabilities that a vendor's parser misses, the parser is stale
|
||||
|
||||
4. **Fix issues found** - update parsers, filters, deny lists as needed.
|
||||
|
||||
5. Run `tsc --noEmit` after changes.
|
||||
|
||||
**Important:**
|
||||
- Do NOT convert dynamic vendors to hardcoded lists - the dynamic approach is intentional
|
||||
- Focus on parser correctness, not model coverage
|
||||
- Flag any vendor whose API response format seems to have changed substantially
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
description: Update Gemini model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/gemini/gemini.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.types.ts`, `src/modules/llms/server/llm.server.types.ts`, and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://ai.google.dev/gemini-api/docs/models
|
||||
- Pricing: https://ai.google.dev/gemini-api/docs/pricing
|
||||
- Changelog: https://ai.google.dev/gemini-api/docs/changelog
|
||||
|
||||
**Fallbacks if blocked:** Check Google AI JS SDK at https://github.com/googleapis/js-genai, search "gemini models latest pricing", "gemini latest models", or search GitHub for latest model prices and context windows
|
||||
|
||||
**Important:**
|
||||
- Ignore context windows (auto-determined at runtime) and training cutoffs (not supported)
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review, do NOT remove comments
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description: Update Groq model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/groq.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Source:**
|
||||
- Fetch https://console.groq.com/docs/models.md directly (markdown format, no search needed)
|
||||
- Pricing: https://groq.com/pricing/
|
||||
|
||||
**Do NOT use web search.** The `.md` endpoint provides structured markdown content directly.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description: Update Kimi model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/moonshot.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources (fetch directly, no search needed):**
|
||||
- Pricing: https://platform.moonshot.ai/docs/pricing/chat
|
||||
- API Reference: https://platform.moonshot.ai/docs/api/chat
|
||||
|
||||
**Do NOT use web search.** Fetch the URLs directly, or ask the user to provide data, if unaccessible.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
description: Update Mistral model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/mistral.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.mistral.ai/getting-started/models/models_overview/
|
||||
- Pricing: https://mistral.ai/pricing#api-pricing
|
||||
- Changelog: https://docs.mistral.ai/getting-started/changelog/
|
||||
|
||||
**Fallbacks if blocked:**
|
||||
- Search "mistral [model-name] latest pricing", "mistral api latest pricing", "mistral latest models", or search GitHub for latest model prices and context windows
|
||||
- Cross-reference: pricepertoken.com, helicone.ai, artificialanalysis.ai
|
||||
- Check Mistral API list models response
|
||||
- As last resort: Use Chrome DevTools MCP to render pricing table
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,44 @@
|
||||
---
|
||||
description: Update Ollama model definitions with latest featured models
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/ollama/ollama.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Automated Workflow:**
|
||||
```bash
|
||||
# 1. Fetch the HTML (sorted by newest for stable ordering)
|
||||
curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html
|
||||
|
||||
# 2. Parse it with the script
|
||||
node .claude/scripts/parse-ollama-models.js > /tmp/ollama-parsed.txt 2>&1
|
||||
|
||||
# 3. Review the parsed output
|
||||
cat /tmp/ollama-parsed.txt
|
||||
```
|
||||
|
||||
The parser outputs: `modelName|pulls|capabilities|sizes`
|
||||
- Example: `deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b`
|
||||
|
||||
**Primary Sources:**
|
||||
- Model Library: https://ollama.com/library?sort=newest
|
||||
- Parser script: `.claude/scripts/parse-ollama-models.js`
|
||||
|
||||
**Fallbacks if blocked:** Check https://github.com/ollama/ollama, search "ollama featured models", "ollama latest models", or search GitHub for latest model info
|
||||
|
||||
**Important:**
|
||||
- Parser filtering rules:
|
||||
- Top 30 newest models are always included (regardless of pull count)
|
||||
- After top 30, only models with 50K+ pulls are included
|
||||
- Models with 'cloud' capability are automatically excluded
|
||||
- Models with 'embedding' capability are automatically excluded
|
||||
- Sort them in the EXACT same order as the source (newest first, for stable ordering)
|
||||
- Extract tags: 'tools' → hasTools, 'vision' → hasVision, 'embedding' → isEmbeddings (note the 's'), 'thinking' → tags only
|
||||
- Extract 'b' tags (1.5b, 7b, 32b) to tags field
|
||||
- Set today's date (YYYYMMDD format) for newly added models only
|
||||
- Update OLLAMA_LAST_UPDATE constant to today's date
|
||||
- Do NOT change dates of existing models
|
||||
- Review the full model list for additions, removals, and changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments and newlines to make diffs easy to review
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
description: Update OpenAI model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/openai.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Manual hint:** For pricing page, expand all tables before copying content.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://platform.openai.com/docs/models (use Copy Page button)
|
||||
- Pricing: https://platform.openai.com/docs/pricing (expand tables first)
|
||||
|
||||
**Known Issue:** OpenAI docs block automated access (403 Forbidden). Manual browser access required.
|
||||
|
||||
**Fallbacks if blocked:**
|
||||
- Search "openai models latest pricing", "openai latest models" for third-party aggregators, or search GitHub for latest model prices and context windows
|
||||
- OpenAI Node SDK (https://github.com/openai/openai-node) has limited model metadata only
|
||||
- As last resort: Use Chrome DevTools MCP to navigate and extract from official docs
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description: Update OpenPipe model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/openpipe.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Base Models: https://docs.openpipe.ai/base-models
|
||||
- Pricing: https://docs.openpipe.ai/pricing/pricing
|
||||
|
||||
**Fallbacks if blocked:** Search "openpipe models latest pricing", "openpipe latest models", "openpipe base models", or search GitHub for latest model prices and context windows
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
description: Update Perplexity model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/perplexity.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.perplexity.ai/getting-started/models
|
||||
- Pricing: https://docs.perplexity.ai/getting-started/pricing
|
||||
- Changelog: https://docs.perplexity.ai/changelog/changelog
|
||||
|
||||
**Fallbacks if blocked:** Search "perplexity api latest pricing", "perplexity latest models", or search GitHub for latest model prices and context windows
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
description: Update xAI model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/xai.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Pricing: https://docs.x.ai/docs/models?cluster=us-east-1#detailed-pricing-for-all-grok-models
|
||||
|
||||
**Known Issue:** docs.x.ai blocks automated access (403 Forbidden). Use fallbacks below.
|
||||
|
||||
**Fallbacks if blocked:**
|
||||
- Search "xai grok latest pricing", "xai latest models", "xai api models", or search GitHub for latest model prices and context windows
|
||||
- Random sites? https://the-rogue-marketing.github.io/grok-api-latest-llms-pricing-october-2025/ (find a newer version), https://langdb.ai/app/providers/xai/ (browse by model, limited coverage)
|
||||
- As last resort: Use Chrome DevTools MCP to access docs.x.ai
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
description: Verify model parameterSpecs match API-validated sweep data
|
||||
argument-hint: openai | anthropic | gemini | xai (or empty for all)
|
||||
---
|
||||
|
||||
# Verify LLM Parameters
|
||||
|
||||
Compare model `parameterSpecs` in definition files against API-validated sweep data.
|
||||
|
||||
If `$ARGUMENTS` provided, verify only that dialect, which includes reading the pair of sweep results and model defintions. Otherwise verify all four, and read the pairs in sequence.
|
||||
|
||||
## Files
|
||||
|
||||
**Sweep results** (source of truth for select parameters):
|
||||
- `tools/develop/llm-parameter-sweep/llm-{dialect}-parameters-sweep.json`
|
||||
By the time you see these files, the repo owner has already updated them via `tools/develop/llm-parameter-sweep/sweep.sh` (very long running, 15 min per vendor).
|
||||
|
||||
**Model definitions (source of truth for model defintions for the user and application, including constants, interfaces, supported parameters and sometimes allowed parameter values)**:
|
||||
- OpenAI: `src/modules/llms/server/openai/models/openai.models.ts`
|
||||
- Anthropic: `src/modules/llms/server/anthropic/anthropic.models.ts`
|
||||
- Gemini: `src/modules/llms/server/gemini/gemini.models.ts`
|
||||
- xAI: `src/modules/llms/server/openai/models/xai.models.ts`
|
||||
|
||||
## Task
|
||||
|
||||
The sweep data is the source of truth for allowed model parameter values or value ranges.
|
||||
|
||||
For each model in the sweep, verify the model definition exposes exactly those capabilities - no more, no less. This includes:
|
||||
- The parameter is present in parameterSpecs
|
||||
- The paramId variant covers exactly the values from the sweep, if applicable
|
||||
- etc.
|
||||
|
||||
Report models where the definition doesn't match the sweep.
|
||||
|
||||
## Parameter Mapping
|
||||
|
||||
Example parameter mapping. Note that new parameters may have been added to both the definition, and the sweep.
|
||||
The objective of the sweep is to hint at model definition values, but the model definitions are what matters for Big-AGI,
|
||||
and need to be carefully updated, otherwise thousands of clients may break.
|
||||
|
||||
| Dialect | Sweep Key | Model paramId |
|
||||
|---------|-----------|---------------|
|
||||
| OpenAI | `oai-reasoning-effort` | `llmVndOaiReasoningEffort*` (multiple variants) |
|
||||
| OpenAI | `oai-verbosity` | `llmVndOaiVerbosity` |
|
||||
| OpenAI | `oai-image-generation` | `llmVndOaiImageGeneration` |
|
||||
| OpenAI | `oai-web-search` | `llmVndOaiWebSearchContext` |
|
||||
| Anthropic | `ant-effort` | `llmVndAntEffort` |
|
||||
| Anthropic | `ant-thinking-budget` | `llmVndAntThinkingBudget` |
|
||||
| Gemini | `gemini-thinking-level` | `llmVndGeminiThinkingLevel*` |
|
||||
| Gemini | `gemini-thinking-budget` | `llmVndGeminiThinkingBudget` |
|
||||
| xAI | `xai-web-search` | `llmVndXaiWebSearch` |
|
||||
|
||||
## Output
|
||||
|
||||
Report first for every model the expected values from the sweep, then the actual values from the definition, then the mismatches.
|
||||
|
||||
Finally make one table for each dialect listing all models with mismatches and the specific issues.
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Generate changelog bullets for big-agi.com/changes
|
||||
argument-hint: date like "2026-01-10" or empty for auto-detect
|
||||
---
|
||||
|
||||
Generate changelog bullets for a single entry in https://big-agi.com/changes
|
||||
|
||||
**Step 1: Find the starting date**
|
||||
|
||||
IMPORTANT: This repo rebases frequently, so commits are INTERLEAVED throughout history.
|
||||
New commits can appear at line 10, 500, or 1800. Use AUTHOR DATE (`%ad`) to filter - it's preserved during rebases.
|
||||
|
||||
If `$ARGUMENTS` provided, use it as the cutoff date.
|
||||
|
||||
If NO argument:
|
||||
1. Fetch https://big-agi.com/changes to get the most recent changelog date
|
||||
2. Use that date as the cutoff
|
||||
|
||||
**Step 2: Get commits by author date**
|
||||
|
||||
Filter commits by author date to catch ALL new commits regardless of position in history:
|
||||
|
||||
```bash
|
||||
# For commits after Jan 10, 2026 (adjust date pattern as needed)
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep "2026-01-1[1-9]\|2026-01-2\|2026-02"
|
||||
|
||||
# Verify interleaving by checking line numbers
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep -n "2026-01-1[1-9]"
|
||||
```
|
||||
|
||||
The line numbers prove commits are scattered (e.g., lines 14, 638, 1156, 1803 = interleaved).
|
||||
|
||||
**Step 3: Write bullets**
|
||||
|
||||
Real examples from big-agi.com/changes:
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
- "Cloud Sync launched! - long awaited and top requested"
|
||||
- "Deepseek V3.2 Speciale comes with almost Gemini 3 Pro performance but 20 times cheaper"
|
||||
- "Anthropic Opus 4.5 with controls for effort (speed tradeoff), thinking budget, search"
|
||||
- "Login with email, via magic link"
|
||||
- "Mobile UX fixes for popups drag/interaction"
|
||||
|
||||
**Rules:**
|
||||
|
||||
1. **Order by importance** - most significant changes first, minor fixes last
|
||||
2. **Feature-first, no verb prefixes** - "Gemini 3 support" not "Add Gemini 3 support"
|
||||
3. **Model names lead** when it's about LLMs
|
||||
4. **Specific details** - "4-level thinking: high, medium, low, minimal" not "multiple thinking levels"
|
||||
5. **One-liners** - short, no fluff
|
||||
6. **Consolidate commits** - 10 persona editor commits = 1 bullet
|
||||
7. **No corporate speak** - no "enhanced", "streamlined", "robust", "leverage"
|
||||
|
||||
**Skip:** WIP, internal refactors, KB docs, automation, review cleanups, trivial fixes, deps bumps, CI changes.
|
||||
|
||||
**Output:** Just bullets, ready to paste. 2-5 bullets but adapt depending on scope, especially
|
||||
in relation to the usual https://big-agi.com/changes entries.
|
||||
@@ -0,0 +1,113 @@
|
||||
---
|
||||
description: Execute the Big-AGI release process
|
||||
argument-hint: version like "2.0.4" or empty to auto-increment patch
|
||||
---
|
||||
|
||||
Execute the release process for Big-AGI. Go step-by-step, waiting for user approval between major steps.
|
||||
|
||||
## Step 1: Determine Version
|
||||
|
||||
If `$ARGUMENTS` provided, use it. Otherwise, read `package.json` and increment patch version.
|
||||
|
||||
## Step 2: Update Files
|
||||
|
||||
1. **package.json** - Update `version` field
|
||||
2. **src/common/app.release.ts** - Increment `Monotonics.NewsVersion` (e.g., 203 → 204)
|
||||
3. **src/apps/news/news.data.tsx** - Add new entry at top of `NewsItems` array
|
||||
|
||||
For the news entry, ask user for release name and key highlights.
|
||||
|
||||
**News entry style** - Draft is a starting point, user will refine:
|
||||
- Models lead when model-heavy, grouped together
|
||||
- Callout features get own bullet with colon explanation
|
||||
- UX items grouped, minimal bold
|
||||
- Fixes last, brief
|
||||
- Release name stays subtle - don't oversell the theme
|
||||
|
||||
Use `<B>`, `<B issue={N}>`, `<B href='url'>`. Re-read file after user edits.
|
||||
|
||||
4. User runs `npm i` to update lockfile
|
||||
|
||||
## Step 3: README
|
||||
|
||||
Update `README.md`:
|
||||
- Line ~46: Update model examples if new flagship models
|
||||
- Line ~147: Add release bullet above previous version
|
||||
|
||||
**Style:** `- Open X.Y.Z: **Name** feature1, feature2, feature3`
|
||||
|
||||
## Step 4: Git Operations
|
||||
|
||||
User commits changes, then:
|
||||
```bash
|
||||
git tag vX.Y.Z
|
||||
git push opensource vX.Y.Z
|
||||
```
|
||||
|
||||
## Step 5: GitHub Release
|
||||
|
||||
Create release with `gh release create`. Structure:
|
||||
|
||||
```
|
||||
# Big-AGI X.Y.Z - Name
|
||||
|
||||
## What's New
|
||||
|
||||
### **Headline Feature**
|
||||
1-2 sentences explaining the main theme. Then bullet points for specifics.
|
||||
|
||||
### **Also New**
|
||||
- Bullet list of other features
|
||||
- Keep it scannable
|
||||
|
||||
**Full Changelog**: https://github.com/enricoros/big-AGI/compare/vPREV...vNEW
|
||||
|
||||
## Get Started
|
||||
Available now at [big-agi.com](https://big-agi.com), via Docker, or self-host from source.
|
||||
```
|
||||
|
||||
## Step 6: Announcements
|
||||
|
||||
Draft for user to post:
|
||||
|
||||
**Twitter** - Thematic, not feature dumps. Talk about what it means, not what it lists:
|
||||
```
|
||||
Big-AGI Open X.Y.Z is out!
|
||||
|
||||
[Theme - e.g., "Lots of love to models: native support, latest protocols, total configuration - puts you in control."]
|
||||
|
||||
[One more angle, natural prose]
|
||||
|
||||
[Optional link]
|
||||
```
|
||||
|
||||
**Discord** - Structured with bold headers:
|
||||
```
|
||||
## :partyblob: Big-AGI **Open** X.Y.Z
|
||||
|
||||
**Category:** Items
|
||||
**Category:** Items
|
||||
**More:** Count of commits/fixes
|
||||
```
|
||||
|
||||
## Tone Guide
|
||||
|
||||
**Good:**
|
||||
- "Lots of love to models: native support, latest protocols, total configuration"
|
||||
- "UX quality of life improvements, from Google Drive to message reorder"
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
|
||||
**Bad:**
|
||||
- "Rolling out the red carpet for top models!" (too salesy)
|
||||
- "Enhanced and streamlined the robust model experience" (corporate speak)
|
||||
- "Added support for Gemini 3 Flash model with multiple thinking levels" (verb prefix, vague)
|
||||
|
||||
## Reference
|
||||
|
||||
Find previous copy at:
|
||||
- **GitHub releases:** https://github.com/enricoros/big-AGI/releases
|
||||
- **News entries:** `src/apps/news/news.data.tsx`
|
||||
- **README:** `README.md` release notes section
|
||||
- **Changelog:** https://big-agi.com/changes
|
||||
|
||||
Match the existing tone - professional but human, specific not generic, features not marketing.
|
||||
Executable
+113
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Parse Ollama models from HTML (sorted by newest for stable ordering)
|
||||
*
|
||||
* Usage:
|
||||
* 1. Fetch HTML: curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html
|
||||
* 2. Parse: node .claude/scripts/parse-ollama-models.js
|
||||
*
|
||||
* Outputs: pipe-delimited format: modelName|pulls|capabilities|sizes
|
||||
* Example: deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b
|
||||
*
|
||||
* Filtering rules:
|
||||
* - Top 30 newest models are always included (regardless of pull count)
|
||||
* - After top 30, only models with 50K+ pulls are included
|
||||
* - Models with 'cloud' capability are always excluded
|
||||
* - Models with 'embedding' capability are always excluded
|
||||
*
|
||||
* Pull counts are rounded to significant figures for stable diffs:
|
||||
* - >=10M: round to 100K (e.g., 109,123,456 -> 109,100,000)
|
||||
* - >=1M: round to 10K (e.g., 5,432,100 -> 5,430,000)
|
||||
* - <1M: round to 1K (e.g., 88,700 -> 89,000)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
|
||||
const htmlPath = process.argv[2] || '/tmp/ollama-newest.html';
|
||||
const TOP_N_ALWAYS_INCLUDE = 30;
|
||||
const MIN_PULLS_THRESHOLD = 50000;
|
||||
|
||||
if (!fs.existsSync(htmlPath)) {
|
||||
console.error(`Error: HTML file not found at ${htmlPath}`);
|
||||
console.error('Please fetch it first with:');
|
||||
console.error(' curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const html = fs.readFileSync(htmlPath, 'utf8');
|
||||
|
||||
// Split into model sections - each starts with <a href="/library/
|
||||
const modelSections = html.split(/<a href="\/library\//);
|
||||
const allParsedModels = [];
|
||||
|
||||
for (let i = 1; i < modelSections.length; i++) {
|
||||
const section = modelSections[i].substring(0, 5000); // Large enough window to capture all data
|
||||
|
||||
// Extract model name (first quoted string)
|
||||
const nameMatch = section.match(/^([^"]+)"/);
|
||||
if (!nameMatch) continue;
|
||||
const name = nameMatch[1];
|
||||
|
||||
// Extract pulls using x-test-pull-count
|
||||
const pullsMatch = section.match(/x-test-pull-count>([^<]+)</);
|
||||
let pulls = 0;
|
||||
if (pullsMatch) {
|
||||
const pullStr = pullsMatch[1].replace(/,/g, '');
|
||||
if (pullStr.includes('M')) {
|
||||
pulls = Math.floor(parseFloat(pullStr) * 1000000);
|
||||
} else if (pullStr.includes('K')) {
|
||||
pulls = Math.floor(parseFloat(pullStr) * 1000);
|
||||
} else {
|
||||
pulls = parseInt(pullStr);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract capabilities (tools, vision, embedding, thinking, cloud)
|
||||
const capabilities = [];
|
||||
const capabilityRegex = /x-test-capability[^>]*>([^<]+)</g;
|
||||
let capMatch;
|
||||
while ((capMatch = capabilityRegex.exec(section)) !== null) {
|
||||
capabilities.push(capMatch[1].trim());
|
||||
}
|
||||
|
||||
// Extract sizes (1.5b, 7b, etc.)
|
||||
const sizes = [];
|
||||
const sizeRegex = /x-test-size[^>]*>([^<]+)</g;
|
||||
let sizeMatch;
|
||||
while ((sizeMatch = sizeRegex.exec(section)) !== null) {
|
||||
sizes.push(sizeMatch[1].trim());
|
||||
}
|
||||
|
||||
// Skip models with 'cloud' or 'embedding' capability
|
||||
if (capabilities.includes('cloud') || capabilities.includes('embedding')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allParsedModels.push({ name, pulls: roundPulls(pulls), capabilities, sizes });
|
||||
}
|
||||
|
||||
// Apply filtering: top 30 always included, rest need 50K+ pulls
|
||||
const models = allParsedModels.filter((model, index) => {
|
||||
return index < TOP_N_ALWAYS_INCLUDE || model.pulls >= MIN_PULLS_THRESHOLD;
|
||||
});
|
||||
|
||||
/**
|
||||
* Round pulls to significant figures for stable output.
|
||||
* This reduces churn from daily fluctuations while preserving magnitude.
|
||||
*/
|
||||
function roundPulls(pulls) {
|
||||
if (pulls >= 10000000) return Math.round(pulls / 100000) * 100000; // >=10M: round to 100K
|
||||
if (pulls >= 1000000) return Math.round(pulls / 10000) * 10000; // >=1M: round to 10K
|
||||
return Math.round(pulls / 1000) * 1000; // <1M: round to 1K
|
||||
}
|
||||
|
||||
// Output in pipe-delimited format (in the order they appear on the page)
|
||||
models.forEach(m => {
|
||||
const caps = m.capabilities.join(',');
|
||||
const tags = m.sizes.join(',');
|
||||
console.log(`${m.name}|${m.pulls}|${caps}|${tags}`);
|
||||
});
|
||||
|
||||
const topNCount = Math.min(TOP_N_ALWAYS_INCLUDE, allParsedModels.length);
|
||||
const thresholdCount = models.length - topNCount;
|
||||
console.error(`\nTotal models: ${models.length} (top ${topNCount} newest + ${thresholdCount} with ${MIN_PULLS_THRESHOLD / 1000}K+ pulls)`);
|
||||
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(cat:*)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(eslint:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(gh issue list:*)",
|
||||
"Bash(gh issue view:*)",
|
||||
"Bash(git branch:*)",
|
||||
"Bash(git cherry-pick:*)",
|
||||
"Bash(git describe:*)",
|
||||
"Bash(git grep:*)",
|
||||
"Bash(git log:*)",
|
||||
"Bash(git ls-tree:*)",
|
||||
"Bash(git mv:*)",
|
||||
"Bash(git show:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(node:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(npm install:*)",
|
||||
"Bash(npm run:*)",
|
||||
"Bash(rg:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(sed:*)",
|
||||
"Bash(tree:*)",
|
||||
"Bash(tsc:*)",
|
||||
"Read(//tmp/**)",
|
||||
"Skill(llms:update-models*)",
|
||||
"WebFetch",
|
||||
"WebFetch(domain:big-agi.com)",
|
||||
"WebSearch",
|
||||
"mcp__chrome-devtools",
|
||||
"mcp__github",
|
||||
"mcp__ide__getDiagnostics"
|
||||
],
|
||||
"deny": [
|
||||
"Read(node_modules)",
|
||||
"Read(node_modules/**)"
|
||||
]
|
||||
}
|
||||
}
|
||||
+15
-40
@@ -1,43 +1,18 @@
|
||||
# big-AGI non-code files
|
||||
/docs/
|
||||
/dist/
|
||||
README.md
|
||||
*
|
||||
|
||||
# Ignore build and log files
|
||||
Dockerfile
|
||||
/.dockerignore
|
||||
!app/
|
||||
!kb/
|
||||
!pages/
|
||||
!public/
|
||||
!src/
|
||||
!tools/
|
||||
|
||||
# Node build artifacts
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
!*.mjs
|
||||
!middleware_BASIC_AUTH.ts
|
||||
!middleware.ts
|
||||
!next.config.ts
|
||||
!package*.json
|
||||
!tsconfig.json
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# versioning
|
||||
.git/
|
||||
.github/
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
!LICENSE
|
||||
!README.md
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
name: 🔥 Make AI Fix This
|
||||
description: Bug, question, or feedback - AI analyzes and changes Big-AGI appropriately
|
||||
labels: [ 'claude-triage' ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for opening an issue! Our AI will analyze it and change Big-AGI appropriately.
|
||||
|
||||
**What happens next:**
|
||||
- AI searches the codebase and documentation
|
||||
- You get a response, typically within 30 minutes
|
||||
- Ticket gets follow-up and community votes
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What's happening?
|
||||
description: Describe the bug, feature request, or question. Be as detailed as you can.
|
||||
placeholder: |
|
||||
Bug example: "In Beam, Anthropic models seem to have search off..."
|
||||
Model request: "Add Claude Opus 4.5 out today, see https://..."
|
||||
Feature example: "Add the option to to save frequent prompt templates for reuse..."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Where does this happen?
|
||||
description: If this is a bug or issue, where are you experiencing it?
|
||||
options:
|
||||
- Big-AGI Pro (big-agi.com)
|
||||
- Self-deployed from GitHub
|
||||
- Docker deployment
|
||||
- Local development
|
||||
- Not applicable (question/feedback)
|
||||
- Other
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Impact on your workflow
|
||||
description: How does this affect your use of Big-AGI?
|
||||
options:
|
||||
- Blocking - Can't use Big-AGI
|
||||
- High - Major feature broken
|
||||
- Medium - Workaround exists
|
||||
- Low - Minor inconvenience
|
||||
- None - Just a question/suggestion
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment (if applicable)
|
||||
description: Device, OS, browser - only if reporting a bug
|
||||
placeholder: |
|
||||
Device: Macbook Pro M3
|
||||
OS: macOS 15.2
|
||||
Browser: Chrome 131
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Screenshots, error messages, or anything else that helps
|
||||
placeholder: Paste screenshots or error messages here
|
||||
validations:
|
||||
required: false
|
||||
@@ -5,14 +5,29 @@ labels: [ 'type: bug' ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Thank you for reporting a bug.
|
||||
value: Thank you for reporting a bug. Please help us by providing accurate environment information.
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Environment
|
||||
description: (required) Where are you experiencing this issue?
|
||||
options:
|
||||
- Big-AGI Pro (big-agi.com)
|
||||
- Self-deployed from GitHub
|
||||
- Docker container (specify in description)
|
||||
- Local development
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: (required) Please provide a clear description. Please also provide the steps to reproduce.
|
||||
description: (required) Please provide a clear description and **steps to reproduce**.
|
||||
placeholder: 'Concise description + steps to reproduce.'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Device and browser
|
||||
@@ -20,10 +35,12 @@ body:
|
||||
placeholder: 'Device: (e.g., iPhone 16, Pixel 9, PC, Macbook...), OS: (e.g., iOS 17, Windows 12), Browser: (e.g., Chrome 119, Safari 18, Firefox..)'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots and more
|
||||
placeholder: 'Attach screenshots, or add any additional context here.'
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Willingness to Contribute
|
||||
|
||||
@@ -32,7 +32,6 @@ assignees: enricoros
|
||||
- [ ] verify deployment on Vercel
|
||||
- [ ] verify container on GitHub Packages
|
||||
- [ ] update the GitHub release
|
||||
- [ ] push as stable `git push opensource main:main-stable`
|
||||
- Announce:
|
||||
- [ ] Discord announcement
|
||||
- [ ] Twitter announcement
|
||||
@@ -51,7 +50,7 @@ To familiarize yourself with the application, the following are the Website and
|
||||
```
|
||||
|
||||
- paste the URL: https://big-agi.com
|
||||
- drag & drop: [README.md](https://raw.githubusercontent.com/enricoros/big-AGI/v2-dev/README.md)
|
||||
- drag & drop: [README.md](https://raw.githubusercontent.com/enricoros/big-AGI/main/README.md)
|
||||
|
||||
```markdown
|
||||
I am announcing a new version, 1.2.3.
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: docker
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
ignore:
|
||||
- dependency-name: "node"
|
||||
versions: [">=25", "<26"] # Node 25 breaks the build because of a dummy localStorage object
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
|
||||
# Disabled npm updates for now - will need precise package pinning, as some packages changed behavior upstream
|
||||
# - package-ecosystem: npm
|
||||
# directory: /
|
||||
# schedule:
|
||||
# interval: weekly
|
||||
# commit-message:
|
||||
# prefix: "chore(deps)"
|
||||
# cooldown:
|
||||
# semver-patch: 3
|
||||
# semver-minor: 7
|
||||
# semver-major: 14
|
||||
# # Ignore packages intentionally pinned due to upstream issues
|
||||
# ignore:
|
||||
# # Issue #857: v11.6+ breaks streaming; tried 11.4.4/11.6/11.7, only 11.5.1 works
|
||||
# - dependency-name: "@trpc/*"
|
||||
# versions: [">=11.5.1", "<12"]
|
||||
# # Pinned during tRPC #857 debugging - may be safe to unpin, test first
|
||||
# - dependency-name: "@tanstack/react-query"
|
||||
# versions: [">=5.90.10", "<6"]
|
||||
# # Pinned because 5.0.8 changes signatures so return set({ .. }) != void;
|
||||
# - dependency-name: "zustand"
|
||||
# versions: [">=5.0.7", "<6"]
|
||||
# groups:
|
||||
# next:
|
||||
# patterns:
|
||||
# - "@next/*"
|
||||
# - "eslint-config-next"
|
||||
# - "next"
|
||||
# react:
|
||||
# patterns:
|
||||
# - "react"
|
||||
# - "react-dom"
|
||||
# - "@types/react"
|
||||
# - "@types/react-dom"
|
||||
# emotion:
|
||||
# patterns:
|
||||
# - "@emotion/*"
|
||||
# mui:
|
||||
# patterns:
|
||||
# - "@mui/*"
|
||||
# dnd-kit:
|
||||
# patterns:
|
||||
# - "@dnd-kit/*"
|
||||
# prisma:
|
||||
# patterns:
|
||||
# - "@prisma/*"
|
||||
# - "prisma"
|
||||
# vercel:
|
||||
# patterns:
|
||||
# - "@vercel/*"
|
||||
@@ -0,0 +1,59 @@
|
||||
name: Claude Code DM
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
claude-dm:
|
||||
# Only allow repository owner to trigger DMs with @claude (blocks other users and bots)
|
||||
if: |
|
||||
github.actor == 'enricoros' &&
|
||||
github.triggering_actor == 'enricoros' &&
|
||||
((github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: write # Required for code creation and commits
|
||||
issues: write
|
||||
pull-requests: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Run Claude Code DM Response
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# Security: Only users with write access can trigger (DMs allow code execution)
|
||||
# Note: contents:write permission enables code creation and commits
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-opus-4-6
|
||||
--max-turns 100
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
@@ -0,0 +1,83 @@
|
||||
name: Claude Code Auto-Triage Issues
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
claude-issue-triage:
|
||||
# Optional: Skip for bot users and direct mentions in the body (handled by claude-dm.yml)
|
||||
if: |
|
||||
github.event.issue.user.type != 'Bot' &&
|
||||
!contains(github.event.issue.body, '@claude')
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: read # was write, but we're not altering PRs here
|
||||
actions: read
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Analyze issue and provide help
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
# Security: Allow any user to trigger triage (automated issue help is safe)
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: '*'
|
||||
# track_progress: true # Enables tracking comments
|
||||
show_full_output: ${{ github.event.repository.private }} # security: do not log verbosely in private repo
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
prompt: |
|
||||
REPO: ${{ github.repository }}
|
||||
ISSUE NUMBER: #${{ github.event.issue.number }}
|
||||
|
||||
A user has reported an issue. Please help them by:
|
||||
|
||||
1. Deep think about the issue:
|
||||
**Understand the problem**: Analyze the issue description and any error messages
|
||||
**Search for context**:
|
||||
- Use the repository's CLAUDE.md for high level guidance and especially kb/ documentation
|
||||
- Look in relevant code files, including kb/ documentation
|
||||
**Use web search**: When potentially outside Big-AGI (e.g. user configuration), search the web for similar errors or related issues
|
||||
**Provide a solution**:
|
||||
- Provide multiple solutions if uncertain, and say so
|
||||
- Analyze the code and suggest specific fixes with code examples
|
||||
- If possible also suggest fixes or workarounds for immediate relief
|
||||
- Reference specific files and line numbers
|
||||
- Suggest workarounds for immediate relief if applicable
|
||||
- Use web search to find similar issues and solutions
|
||||
- Test selectively and even npm install and run build if needed to verify the solution
|
||||
2. Always add the 'claude-triage' issue label to indicate this issue was triaged by Claude
|
||||
3. Comment with:
|
||||
- Very brief thank you note, if applicable
|
||||
- Initial assessment
|
||||
- Next steps or clarification needed
|
||||
- Link duplicates if found
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
|
||||
IMPORTANT: You are in READ-ONLY triage mode. Analyze and suggest solutions in your comment, but do NOT attempt to push code changes.
|
||||
If you're uncertain, say so and suggest next steps.
|
||||
Be welcoming, helpful, professional, solution-focused and no-BS.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-opus-4-6
|
||||
--max-turns 75
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
@@ -12,38 +12,130 @@ name: Create and publish Docker images
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2-dev
|
||||
#- v1-dev # Disabled because this is not needed anymore
|
||||
#- v1-stable # Disabled as the v* tag is used for stable releases
|
||||
- main # Primary branch (Big-AGI Open)
|
||||
tags:
|
||||
- 'v*' # Trigger on version tags (e.g., v1.7.0)
|
||||
- 'v2.*' # Stable releases (v2.0.0, v2.1.0, etc.)
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
# Build job: runs on native runners for each platform (no QEMU emulation)
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
name: Build ${{ matrix.platform }}
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI Open
|
||||
org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
NEXT_PUBLIC_BUILD_HASH=${{ github.sha }}
|
||||
NEXT_PUBLIC_BUILD_REF_NAME=${{ github.ref_name }}
|
||||
outputs: type=image,push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true
|
||||
provenance: false
|
||||
cache-from: type=gha,scope=${{ github.repository }}-${{ matrix.platform }}
|
||||
cache-to: type=gha,scope=${{ github.repository }}-${{ matrix.platform }},mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
# Merge job: combines platform-specific images into a unified multi-arch manifest
|
||||
merge:
|
||||
name: Merge manifests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: build
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Prepare
|
||||
run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -51,35 +143,34 @@ jobs:
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=raw,value=development,enable=${{ github.ref == 'refs/heads/v2-dev' }} # For v2-dev branch
|
||||
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/v1-stable' }}
|
||||
type=ref,event=tag # Use the tag name as a tag for tag builds
|
||||
type=semver,pattern={{version}} # Generate semantic versioning tags for tag builds
|
||||
type=sha,format=short,prefix=sha- # Just in case none of the above applies
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI
|
||||
org.opencontainers.image.description=Generative AI suite powered by state-of-the-art models
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
# Development: main branch
|
||||
type=raw,value=development,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
# Enable build cache (future)
|
||||
#cache-from: type=gha
|
||||
#cache-to: type=gha,mode=max
|
||||
# Enable provenance and SBOM (future)
|
||||
#provenance: true
|
||||
#sbom: true
|
||||
# Latest: v2.x releases (safe default)
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v2.') }}
|
||||
|
||||
# Stable: v2.x releases (alias)
|
||||
type=raw,value=stable,enable=${{ startsWith(github.ref, 'refs/tags/v2.') }}
|
||||
|
||||
# Version tags (v2.0.0, 2.0.0)
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
--annotation='index:org.opencontainers.image.title=Big-AGI Open' \
|
||||
--annotation='index:org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.' \
|
||||
--annotation='index:org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}' \
|
||||
--annotation='index:org.opencontainers.image.documentation=https://big-agi.com' \
|
||||
$(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}:${{ steps.meta.outputs.version }}
|
||||
@@ -53,3 +53,6 @@ next-env.d.ts
|
||||
.env*.local
|
||||
/.run/dev (ENV).run.xml
|
||||
/src/modules/3rdparty/aider/scratch*
|
||||
|
||||
# Ignore temporary CC files
|
||||
/tmpclaude*
|
||||
@@ -1,3 +0,0 @@
|
||||
overrides=@mui/material@^5.0.0:
|
||||
dependencies:
|
||||
@mui/material: replaced-by=@mui/joy
|
||||
@@ -0,0 +1,257 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Validate (~5s, safe while dev server runs, do NOT use `next build` ~45s for same checks)
|
||||
tsc --noEmit --pretty && npm run lint # Type check (~3.5s) + ESLint (~2s)
|
||||
eslint src/path/to/file.ts # Lint specific file
|
||||
|
||||
# Full build (~60s+, only when suspecting runtime/bundle issues)
|
||||
npm run build # next build runs compile+lint+types but stops at first type-error file; tsc shows all at once
|
||||
|
||||
# Database & External Services
|
||||
# npm run supabase:local-update-types # Generate TypeScript types
|
||||
# npm run stripe:listen # Listen for Stripe webhooks
|
||||
```
|
||||
|
||||
## Development Environment
|
||||
|
||||
- Dev servers may be running on ports 3000, 3001, 3002, or 3003 (not always this app - other projects may occupy these ports). Never start or stop dev servers, let the user do it.
|
||||
- For runtime debugging, use `mcp__chrome-devtools` if present to launch a controlled Chrome instance against the running dev server - useful for console errors, network inspection, and React devtree.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Big-AGI is a Next.js 15 application with a modular architecture built for advanced AI interactions. The codebase follows a three-layer structure with distinct separation of concerns.
|
||||
|
||||
### Core Directory Structure
|
||||
|
||||
You are started from the root of the repository (i.e. where the git folder is or scripts should be run from). You won't need to issue 'cd ...' commands.
|
||||
|
||||
```
|
||||
/app/api/ # Next.js App Router (API routes only, mostly -> /src/server/)
|
||||
/pages/ # Next.js Pages Router (file-based, mostly -> /src/apps/)
|
||||
/src/
|
||||
├── apps/ # Feature applications (self-contained modules)
|
||||
├── modules/ # Reusable business logic and integrations
|
||||
├── common/ # Shared infrastructure and utilities
|
||||
└── server/ # Backend API layer with tRPC
|
||||
/kb/ # Knowledge base for modules, architectures
|
||||
```
|
||||
|
||||
### Key Technologies
|
||||
|
||||
- **Frontend**: Next.js 15, React 18, Material-UI Joy, Emotion (CSS-in-JS)
|
||||
- **State Management**: Zustand with localStorge/IndexedDB (single cell) persistence
|
||||
- **API Layer**: tRPC with TanStack React Query for type-safe communication
|
||||
- **Runtime**: Edge Runtime for AI operations, Node.js for data processing
|
||||
|
||||
### Apps Architecture Pattern
|
||||
|
||||
Each app in `/src/apps/` is a self-contained feature module:
|
||||
- Main component (`App*.tsx`)
|
||||
- Local state store (`store-app-*.ts`)
|
||||
- Feature-specific components and layouts
|
||||
- Runtime configurations
|
||||
|
||||
Example apps: `chat/`, `call/`, `beam/`, `draw/`, `personas/`, `settings-modal/`
|
||||
|
||||
### Modules Architecture Pattern
|
||||
|
||||
Modules in `/src/modules/` provide reusable business logic:
|
||||
- **`aix/`** - AI communication framework for real-time streaming
|
||||
- **`beam/`** - Multi-model AI reasoning system (scatter/gather pattern)
|
||||
- **`blocks/`** - Content rendering (markdown, code, images, etc.)
|
||||
- **`llms/`** - Language model abstraction supporting 19 vendors
|
||||
|
||||
### Key Subsystems & Their Patterns
|
||||
|
||||
#### 1. AIX - Real-time AI Communication
|
||||
**Location**: `/src/modules/aix/`
|
||||
**Pattern**: Client-server streaming architecture with provider abstraction
|
||||
|
||||
- **Client** → tRPC → **Server** → **AI Providers**
|
||||
- Handles streaming/non-streaming responses with batching and error recovery
|
||||
- Particle-based streaming: `AixWire_Particles` → `ContentReassembler` → `DMessage`
|
||||
- Provider-agnostic through adapter pattern (OpenAI, Anthropic, Gemini protocols)
|
||||
|
||||
#### 3. Beam - Multi-Model Reasoning
|
||||
**Location**: `/src/modules/beam/`
|
||||
**Pattern**: Scatter/Gather for parallel AI processing
|
||||
|
||||
- **Scatter**: Multiple models (rays) process input in parallel
|
||||
- **Gather**: Fusion algorithms combine outputs
|
||||
- Real-time UI updates via vanilla Zustand stores
|
||||
- BeamStore per conversation via ConversationHandler
|
||||
|
||||
#### 4. Conversation Management
|
||||
**Location**: `/src/common/stores/chat/` and `/src/common/chat-overlay/`
|
||||
**Pattern**: Overlay architecture with handler per conversation
|
||||
|
||||
- `ConversationHandler` orchestrates chat, beam, ephemerals
|
||||
- Per-chat stores: `PerChatOverlayStore` + `BeamStore`
|
||||
- Message structure: `DMessage` → `DMessageFragment[]`
|
||||
- Supports multi-pane with independent conversation states
|
||||
|
||||
### Storage System
|
||||
|
||||
Big-AGI uses a local-first architecture with Zustand + IndexedDB:
|
||||
- **Zustand** stores for in-memory state management
|
||||
- **localStorage** for persistent settings/all storage (via Zustand persist middleware)
|
||||
- **IndexedDB** for persistent chat-only storage (via Zustand persist middleware) on a single key-val cell
|
||||
- **Local-first** architecture with offline capability
|
||||
- **Migration system** for upgrading data structures across versions
|
||||
|
||||
Key storage patterns:
|
||||
- Stores use `createIDBPersistStorage()` for IndexedDB persistence
|
||||
- Version-based migrations handle data structure changes
|
||||
- Partialize/merge functions control what gets persisted
|
||||
- Rehydration logic repairs and upgrades data on load
|
||||
|
||||
Located in `/src/common/stores/` with stores like:
|
||||
- `chat/store-chats.ts`: Conversations and messages
|
||||
- `llms/store-llms.ts`: Model configurations
|
||||
|
||||
### Layout System ("Optima")
|
||||
|
||||
The Optima layout system provides:
|
||||
- **Responsive design** adapting desktop/mobile
|
||||
- **Drawer/Panel/Toolbar** composition
|
||||
- **Split-pane support** for multi-conversation views
|
||||
- **Portal-based rendering** for flexible component placement
|
||||
|
||||
Located in `/src/common/layout/optima/`
|
||||
|
||||
### State Management Patterns
|
||||
|
||||
1. **Global Stores** (Zustand with IndexedDB persistence)
|
||||
- `store-chats`: Conversations and messages
|
||||
- `store-llms`: Model configurations
|
||||
- `store-ux-labs`: UI preferences and labs features
|
||||
- **Zustand pattern**: Always wrap multi-property selectors with `useShallow` from `zustand/react/shallow` to prevent re-renders on reference changes
|
||||
|
||||
2. **Per-Instance Stores** (Vanilla Zustand)
|
||||
- `store-beam_vanilla`: Beam scatter/gather state
|
||||
- `store-perchat_vanilla`: Chat overlay state
|
||||
- `store-attachment-drafts_vanilla`: Attachment drafts
|
||||
- High-performance, no React integration
|
||||
|
||||
3. **Module Stores**
|
||||
- Feature-specific configuration and state
|
||||
- Example: `store-module-beam`, `store-module-t2i`
|
||||
|
||||
### User Flows & Interdependencies
|
||||
|
||||
#### Chat Message Flow
|
||||
1. User input → `Composer` → `DMessage` creation
|
||||
2. `ConversationHandler.messageAppend()` → Store update
|
||||
3. `_handleExecute()` / `ConversationHandler.executeChatMessages()` → AIX client request
|
||||
4. AIX streaming → `ContentReassembler` → UI updates
|
||||
5. Zustand auto-persistence → IndexedDB
|
||||
|
||||
#### Beam Multi-Model Flow
|
||||
1. User triggers Beam → `BeamStore.open()` state update
|
||||
2. Scatter: Parallel `aixChatGenerateContent()` to N models
|
||||
3. Real-time ray updates → UI progress
|
||||
4. Gather: User selects fusion → Combined output
|
||||
5. Result → New message in conversation
|
||||
|
||||
### Development Patterns
|
||||
|
||||
#### Module Integration
|
||||
- Each module exports its functionality through index files
|
||||
- Modules register with central registries (e.g., `vendors.registry.ts`)
|
||||
- Configuration objects define module behavior
|
||||
- Type-safe integration through strict TypeScript interfaces
|
||||
|
||||
#### Component Patterns
|
||||
- **Controlled components** with clear prop interfaces
|
||||
- **Hook-based logic** extraction for reusability
|
||||
- **Portal rendering** for overlays and modals
|
||||
- **Suspense boundaries** for async operations
|
||||
|
||||
#### API Patterns
|
||||
- **tRPC routers** for type-safe API endpoints
|
||||
- **Zod schemas** for runtime validation
|
||||
- **Middleware** for request/response processing
|
||||
- **Edge functions** for performance-critical AI operations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- API keys stored client-side in localStorage (user-provided)
|
||||
- Server-side API keys in environment variables only
|
||||
- XSS protection through proper content escaping
|
||||
- No credential transmission to third parties
|
||||
|
||||
## Knowledge Base
|
||||
|
||||
Architecture and system documentation is available in the `/kb/` knowledge base:
|
||||
|
||||
@kb/KB.md
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Testing & Quality
|
||||
- Run `npm run lint` before committing
|
||||
- Type-check with `tsc --noEmit`
|
||||
- Test critical user flows manually
|
||||
|
||||
### Adding a New LLM Vendor
|
||||
1. Create vendor in `/src/modules/llms/vendors/[vendor]/`
|
||||
2. Implement `IModelVendor` interface
|
||||
3. Register in `vendors.registry.ts`
|
||||
4. Add environment variables to the vendor's server file and `/src/server/env.server.ts` (if server-side keys needed)
|
||||
|
||||
### Debugging Storage Issues
|
||||
- Check IndexedDB: DevTools → Application → IndexedDB → `app-chats`
|
||||
- Monitor Zustand state: Use Zustand DevTools
|
||||
- Check migration logs in console during rehydration
|
||||
|
||||
## Code Examples
|
||||
|
||||
### AIX Streaming Pattern
|
||||
```typescript
|
||||
// Efficient streaming with decimation
|
||||
aixChatGenerateContent_DMessage_FromConversation(
|
||||
llmId,
|
||||
chatHistory,
|
||||
{ abortSignal, throttleParallelThreads: 1 },
|
||||
async (update, isDone) => {
|
||||
// Real-time UI updates
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
### Model Registry Pattern
|
||||
```typescript
|
||||
// Registry pattern for extensibility
|
||||
const MODEL_VENDOR_REGISTRY: Record<ModelVendorId, IModelVendor> = {
|
||||
openai: ModelVendorOpenAI,
|
||||
anthropic: ModelVendorAnthropic,
|
||||
// ... 17 more vendors
|
||||
};
|
||||
```
|
||||
|
||||
## Server Architecture
|
||||
|
||||
The server uses a split architecture with two tRPC routers:
|
||||
|
||||
### Edge Network (`trpc.router-edge`)
|
||||
Distributed edge runtime for low-latency AI operations:
|
||||
- **AIX** - AI streaming and communication
|
||||
- **LLM Routers** - Direct vendor integrations (OpenAI, Anthropic, Gemini, Ollama)
|
||||
- **Speex** - Unified TTS router (ElevenLabs, Inworld, and other TTS vendors)
|
||||
- **External Services** - Google Search, YouTube transcripts
|
||||
|
||||
Located at `/src/server/trpc/trpc.router-edge.ts`
|
||||
|
||||
### Cloud Network (`trpc.router-cloud`)
|
||||
Centralized server for data processing operations:
|
||||
- **Browse** - Web scraping and content extraction
|
||||
- **Trade** - Import/export functionality (ChatGPT, markdown, JSON)
|
||||
|
||||
Located at `/src/server/trpc/trpc.router-cloud.ts`
|
||||
|
||||
**Key Pattern**: Edge runtime for AI (fast, distributed), Cloud runtime for data ops (centralized, Node.js)
|
||||
+36
-11
@@ -1,6 +1,9 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# check=skip=CopyIgnoredFile
|
||||
|
||||
# Base
|
||||
FROM node:22-alpine AS base
|
||||
ENV NEXT_TELEMETRY_DISABLED 1
|
||||
FROM node:24-alpine AS base
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
# Dependencies
|
||||
FROM base AS deps
|
||||
@@ -14,7 +17,7 @@ COPY src/server/prisma ./src/server/prisma
|
||||
RUN sh -c '[ ! -e /lib/libssl.so.3 ] && ln -s /usr/lib/libssl.so.3 /lib/libssl.so.3 || echo "Link already exists"'
|
||||
|
||||
# Install dependencies, including dev (release builds should use npm ci)
|
||||
ENV NODE_ENV development
|
||||
ENV NODE_ENV=development
|
||||
RUN npm ci
|
||||
|
||||
|
||||
@@ -22,20 +25,37 @@ RUN npm ci
|
||||
FROM base AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Deployment type marker
|
||||
ENV NEXT_PUBLIC_DEPLOYMENT_TYPE=docker
|
||||
|
||||
# Optional build version arguments at build time
|
||||
ARG NEXT_PUBLIC_BUILD_HASH
|
||||
ENV NEXT_PUBLIC_BUILD_HASH=${NEXT_PUBLIC_BUILD_HASH}
|
||||
ARG NEXT_PUBLIC_BUILD_REF_NAME
|
||||
ENV NEXT_PUBLIC_BUILD_REF_NAME=${NEXT_PUBLIC_BUILD_REF_NAME}
|
||||
|
||||
# Optional argument to configure GA4 at build time (see: docs/deploy-analytics.md)
|
||||
ARG NEXT_PUBLIC_GA4_MEASUREMENT_ID
|
||||
ENV NEXT_PUBLIC_GA4_MEASUREMENT_ID=${NEXT_PUBLIC_GA4_MEASUREMENT_ID}
|
||||
|
||||
# Optional argument to configure PostHog at build time (see: docs/deploy-analytics.md)
|
||||
ARG NEXT_PUBLIC_POSTHOG_KEY
|
||||
ENV NEXT_PUBLIC_POSTHOG_KEY=${NEXT_PUBLIC_POSTHOG_KEY}
|
||||
|
||||
# Optional argument to configure Google Drive Picker at build time (can reuse AUTH_GOOGLE_ID value)
|
||||
ARG NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID
|
||||
ENV NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=${NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID}
|
||||
|
||||
# Copy development deps and source
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
ENV NODE_ENV production
|
||||
ENV NODE_ENV=production
|
||||
RUN npm run build
|
||||
|
||||
# Reduce installed packages to production-only
|
||||
RUN npm prune --production
|
||||
RUN npm prune --omit=dev
|
||||
|
||||
|
||||
# Runner
|
||||
@@ -43,18 +63,23 @@ FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
# As user
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
RUN addgroup --system --gid 1001 nodejs \
|
||||
&& adduser --system --uid 1001 nextjs \
|
||||
&& apk add --no-cache openssl
|
||||
|
||||
# Copy Built app
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/src/server/prisma ./src/server/prisma
|
||||
# Instead of `COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next`, we only extract some parts, excluding .next/cache which is build time only:
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/BUILD_ID ./.next/
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/server ./.next/server
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/types ./.next/types
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/*.json ./.next/
|
||||
|
||||
# Minimal ENV for production
|
||||
ENV NODE_ENV production
|
||||
ENV PATH $PATH:/app/node_modules/.bin
|
||||
ENV NODE_ENV=production
|
||||
|
||||
# Run as non-root user
|
||||
USER nextjs
|
||||
@@ -63,4 +88,4 @@ USER nextjs
|
||||
EXPOSE 3000
|
||||
|
||||
# Start the application
|
||||
CMD ["next", "start"]
|
||||
CMD ["/app/node_modules/.bin/next", "start"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023-2024 Enrico Ros
|
||||
Copyright (c) 2023-2026 Enrico Ros
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1,41 +1,192 @@
|
||||
# BIG-AGI 🧠✨
|
||||
<div align="center">
|
||||
|
||||
Welcome to big-AGI, the AI suite for professionals that need function, form,
|
||||
simplicity, and speed. Powered by the latest models from 12 vendors and
|
||||
open-source servers, `big-AGI` offers best-in-class Chats,
|
||||
[Beams](https://github.com/enricoros/big-AGI/issues/470),
|
||||
and [Calls](https://github.com/enricoros/big-AGI/issues/354) with AI personas,
|
||||
visualizations, coding, drawing, side-by-side chatting, and more -- all wrapped in a polished UX.
|
||||
<img width="256" height="256" alt="Big-AGI Logo" src="https://big-agi.com/assets/logo-bright-github.svg" />
|
||||
|
||||
Stay ahead of the curve with big-AGI. 🚀 Pros & Devs love big-AGI. 🤖
|
||||
<h1><a href="https://big-agi.com">Big-AGI</a></h1>
|
||||
|
||||
[](https://big-agi.com)
|
||||
[](https://big-agi.com)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://vercel.com/new/clone?repository-url=https://github.com/enricoros/big-agi)
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
<br/>
|
||||
[](https://github.com/enricoros/big-agi/commits)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://github.com/enricoros/big-AGI/graphs/contributors)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
<br/>
|
||||
|
||||
> 🚀 Big-AGI 2 is launching Q4 2024. Be the first to experience it before the public release.
|
||||
>
|
||||
> 👉 [Apply for Early Access](https://y2rjg0zillz.typeform.com/to/ZSADpr5u?utm_source=gh-2&utm_medium=readme&utm_campaign=ea2)
|
||||
[](https://github.com/enricoros/big-agi/issues/new?template=ai-triage.yml)
|
||||
|
||||
Or fork & run on Vercel
|
||||
[//]: # ([](https://stats.uptimerobot.com/59MXcnmjrM))
|
||||
[//]: # ([](https://github.com/enricoros/big-AGI/releases/latest))
|
||||
[//]: # ()
|
||||
[//]: # ([](#))
|
||||
[//]: # ([](https://x.com/enricoros))
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-AGI)
|
||||
</div>
|
||||
|
||||
### New Version
|
||||
<br/>
|
||||
|
||||
This repository contains two main versions:
|
||||
# Big-AGI Open 🧠
|
||||
|
||||
- Big-AGI 2: next-generation, bringing the most advanced AI experience
|
||||
- `v2-dev`: V2 development branch, the exciting one, future default
|
||||
- Big-AGI Stable: as deployed on big-agi.com
|
||||
- `v1-dev`: V1 development branch (this branch)
|
||||
- `v1-stable`: Current stable version
|
||||
This is the open-source foundation of **Big-AGI**, ___the multi-model AI workspace for experts___.
|
||||
|
||||
Note: After the V2 release in Q4, `v2-dev` will become the default branch and `v1-dev` will reach EOL.
|
||||
Big-AGI is the multi-model AI workspace for experts: Engineers architecting systems. Founders making decisions. Researchers validating hypotheses.
|
||||
You need to think broader, decide faster, and build with confidence, then you need Big-AGI.
|
||||
|
||||
### Quick links: 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [installation](docs/installation.md) 👉 [documentation](docs/README.md)
|
||||
It comes packed with **world-class features** like Beam, and is praised for its **best-in-class AI chat UX**.
|
||||
**As an independent, non-VC-funded project, Pro subscriptions at $10.99/mo fund development for everyone, including the free and open-source tiers.**
|
||||
|
||||
### What's New in 1.16.1...1.16.8 · Sep 13, 2024 (patch releases)
|
||||

|
||||
[](https://big-agi.com/beam)
|
||||
[](https://big-agi.com/inspector)
|
||||
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest (o1-preview and o1-mini are supported in Big-AGI 2)
|
||||
### What makes Big-AGI different:
|
||||
|
||||
**Intelligence**: with [Beam & Merge](https://big-agi.com/beam) for multi-model de-hallucination, native search, and bleeding-edge AI models like Opus 4.5, Nano Banana Pro, Kimi K2.5 or GPT 5.2 -
|
||||
**Control**: with personas, data ownership, requests inspection, unlimited usage with API keys, and *no vendor lock-in* -
|
||||
and **Speed**: with a local-first, over-powered, zero-latency, madly optimized web app.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center" width="25%">
|
||||
<b>🧠 Intelligence</b><br/>
|
||||
<img src="https://img.shields.io/badge/Multi--Model-Trust-4285F4?style=for-the-badge" alt="Multi-Model"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>✨ Experience</b><br/>
|
||||
<img src="https://img.shields.io/badge/Clean-UX-34A853?style=for-the-badge" alt="Clean UX"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>⚡ Performance</b><br/>
|
||||
<img src="https://img.shields.io/badge/Zero-Latency-EA4335?style=for-the-badge" alt="Zero Latency"/>
|
||||
</td>
|
||||
<td align="center" width="25%">
|
||||
<b>🔒 Control</b><br/>
|
||||
<img src="https://img.shields.io/badge/No-Lock--in-FBBC04?style=for-the-badge" alt="No Lock-in"/>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" valign="top">
|
||||
Beam & Merge<br/>
|
||||
No context junk<br/>
|
||||
Purest AI outputs
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
Flow-state interface<br/>
|
||||
Higly customizable<br/>
|
||||
Best-in-class UX
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
Local-first<br/>
|
||||
Highly parallel<br/>
|
||||
Madly optimized
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
No vendor lock-in<br/>
|
||||
Your API keys<br/>
|
||||
AI Inspector
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Who uses Big-AGI:
|
||||
Loved by engineers, founders, researchers, self-hosters, and IT departments for its power, reliability, and transparency.
|
||||
|
||||
<img width="830" height="370" alt="image" src="https://github.com/user-attachments/assets/513c4f77-0970-4a56-b23b-1416c8246174" />
|
||||
|
||||
Choose Big-AGI because you don't need another clone or slop - you need an AI tool that scales with you.
|
||||
|
||||
### Show me a screenshot:
|
||||
Sure - here is real-world screeengrab as I'm writing this, while running a Beam to extract SVG from an image with Sonnet 4.5, Opus 4.1, GPT 5.1, Gemini 2.5 Pro, Nano Banana, etc.
|
||||
<img alt="Real-world screen capture as of Nov 15 2025, 2am" src="https://github.com/user-attachments/assets/853f4160-27cb-4ac9-826b-402f1e63d4af" />
|
||||
|
||||
|
||||
## Get Started
|
||||
|
||||
| Tier | Best For | What You Get | Setup |
|
||||
|------------------------------------------------------|-------------------|---------------------------------------------------------------|-------------|
|
||||
| Big-AGI Open (self-host) | **IT** | First to get new models support. Maximum control and privacy. | 5-30 min |
|
||||
| [big-agi.com](https://big-agi.com) Free | **Everyone** | Full core experience, improved Beam, new Personas, best UX. | **2 min**\* |
|
||||
| **[big-agi.com](https://big-agi.com) Pro** $10.99/mo | **Professionals** | Everything + **Sync** across unlimited devices + 1GB storage | **2 min**\* |
|
||||
|
||||
\*: **Configuration requires your API keys**. *Big-AGI does not charge for model usage or limit your access*.
|
||||
**Why Pro?** As an independent project, Pro subscriptions fund all development. Early subscribers shape the roadmap directly.
|
||||
|
||||
[](https://big-agi.com)
|
||||
|
||||
**Self-host and developers** (full control)
|
||||
- Develop locally or self-host with Docker on your own infrastructure – [guide](docs/installation.md)
|
||||
- Or fork & run on Vercel:
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-AGI)
|
||||
|
||||
[//]: # (**For the latest Big-AGI:**)
|
||||
|
||||
[//]: # (- [**Big-AGI Open**](https://github.com/enricoros/big-AGI/tree/main) - Open Source, latest models and features (main branch))
|
||||
|
||||
[//]: # (- [**Big-AGI Pro**](https://big-agi.com) - Hosted with Cloud Sync)
|
||||
|
||||
---
|
||||
|
||||
## Our Philosophy
|
||||
|
||||
We're an independent, non-VC-funded project with a simple belief: **AI should elevate you, not replace you**.
|
||||
|
||||
This is why we built Big-AGI to be **local-first**, madly optimized to 0-latency, launched multi-model first to
|
||||
defeat hallucinations, designed Beam around the **humans in the loop**, re-wrote frameworks and abstractions
|
||||
so you **are not vendor locked-in**, and obsessed over a powerful UI that works, just works.
|
||||
|
||||
NOTE: this is a powerful tool - if you need a toy UI or clone, this ain't it.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Release Notes
|
||||
|
||||
👉 **[See the Live Release Notes](https://big-agi.com/changes)**
|
||||
- Open 2.0.3: **Red Carpet** **Kimi K2.5**, **Gemini 3 Flash**, **GPT 5.2**, Google Drive, Inworld, Novita.ai, Speech/UX improvements
|
||||
- Open 2.0.2: **Speex** multi-vendor speech synthesis, **Opus 4.5**, **Gemini 3 Pro**, **Nano Banana Pro**, **Grok 4.1**, **GPT-5.1**, **Kimi K2** + 280 fixes
|
||||
|
||||
### What's New in 2.0 · Oct 31, 2025 · Open
|
||||
|
||||
- **Big-AGI Open** is ready and more productive and faster than ever, with:
|
||||
- **Beam 2**: multi-modal, program-based, follow-ups, save presets
|
||||
- Top-notch AI models support including **agentic models** and **reasoning models**
|
||||
- **Image Generation** and editing with Nano Banana and gpt-image-1
|
||||
- **Web Search** with citations for supported models
|
||||
- **UI** & Mobile UI overhaul with peeking and side panels
|
||||
- And all of the [Big-AGI 2 changes](https://github.com/enricoros/big-AGI/issues/567#issuecomment-2262187617) and more
|
||||
- Built for the future, madly optimized
|
||||
|
||||
<img width="830" height="385" alt="image" src="https://github.com/user-attachments/assets/ad52761d-7e3f-44d8-b41e-947ce8b4faa1" />
|
||||
|
||||
#### **Open** links: 👉 [changelog](https://big-agi.com/changes) 👉 [installation](docs/installation.md) 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [documentation](docs/README.md)
|
||||
|
||||
**For teams and institutions:** Need shared prompts, SSO, or managed deployments? Reach out at enrico@big-agi.com. We're actively collecting requirements from research groups and IT departments.
|
||||
|
||||
<details>
|
||||
<summary>5,000 Commits Milestone</summary>
|
||||
|
||||
Hit 5k commits last week. That's a lot of code.
|
||||
|
||||
Recent work has been intense:
|
||||
- Chain of thought reasoning across multiple LLMs: **OpenAI o3** and o1, **DeepSeek R1**, **Gemini 2.0 Flash Thinking**, and more
|
||||
- Beam is real - ~35% of our users run it daily to compare models
|
||||
- New AIX framework lets us scale features we couldn't before
|
||||
- UI is faster than ever. Like, terminal-fast
|
||||
|
||||
The new architecture is solid and the speed improvements are real.
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.16.1...1.16.10 · 2024-2025 (patch releases)</summary>
|
||||
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
- 1.16.7: OpenAI support for GPT-4o 2024-08-06
|
||||
- 1.16.6: Groq support for Llama 3.1 models
|
||||
- 1.16.5: GPT-4o Mini support
|
||||
@@ -48,7 +199,10 @@ Note: After the V2 release in Q4, `v2-dev` will become the default branch and `v
|
||||
- 1.16.2: Updates to Beam
|
||||
- 1.16.1: Support for the new OpenAI GPT-4o 2024-05-13 model
|
||||
|
||||
### What's New in 1.16.0 · May 9, 2024 · Crystal Clear
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.16.0 · May 9, 2024 · Crystal Clear</summary>
|
||||
|
||||
- [Beam](https://big-agi.com/blog/beam-multi-model-ai-reasoning) core and UX improvements based on user feedback
|
||||
- Chat cost estimation 💰 (enable it in Labs / hover the token counter)
|
||||
@@ -59,14 +213,20 @@ Note: After the V2 release in Q4, `v2-dev` will become the default branch and `v
|
||||
- Models update: **Anthropic**, **Groq**, **Ollama**, **OpenAI**, **OpenRouter**, **Perplexity**
|
||||
- Code soft-wrap, chat text selection toolbar, 3x faster on Apple silicon, and more [#517](https://github.com/enricoros/big-AGI/issues/517), [507](https://github.com/enricoros/big-AGI/pull/507)
|
||||
|
||||
#### 3,000 Commits Milestone · April 7, 2024
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>3,000 Commits Milestone · April 7, 2024</summary>
|
||||
|
||||

|
||||
|
||||
- 🥇 Today we <b>celebrate commit 3000</b> in just over one year, and going stronger 🚀
|
||||
- 📢️ Thanks everyone for your support and words of love for Big-AGI, we are committed to creating the best AI experiences for everyone.
|
||||
|
||||
### What's New in 1.15.0 · April 1, 2024 · Beam
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.15.0 · April 1, 2024 · Beam</summary>
|
||||
|
||||
- ⚠️ [**Beam**: the multi-model AI chat](https://big-agi.com/blog/beam-multi-model-ai-reasoning). find better answers, faster - a game-changer for brainstorming, decision-making, and creativity. [#443](https://github.com/enricoros/big-AGI/issues/443)
|
||||
- Managed Deployments **Auto-Configuration**: simplify the UI models setup with backend-set models. [#436](https://github.com/enricoros/big-AGI/issues/436)
|
||||
@@ -76,6 +236,8 @@ Note: After the V2 release in Q4, `v2-dev` will become the default branch and `v
|
||||
- 1.15.1: Support for Gemini Pro 1.5 and OpenAI Turbo models
|
||||
- Beast release, over 430 commits, 10,000+ lines changed: [release notes](https://github.com/enricoros/big-AGI/releases/tag/v1.15.0), and changes [v1.14.1...v1.15.0](https://github.com/enricoros/big-AGI/compare/v1.14.1...v1.15.0)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.14.1 · March 7, 2024 · Modelmorphic</summary>
|
||||
|
||||
@@ -146,99 +308,85 @@ https://github.com/enricoros/big-AGI/assets/1590910/a6b8e172-0726-4b03-a5e5-10cf
|
||||
|
||||
</details>
|
||||
|
||||
For full details and former releases, check out the [changelog](docs/changelog.md).
|
||||
For full details and former releases, check out the [archived versions changelog](docs/changelog.md).
|
||||
|
||||
## 👉 Key Features ✨
|
||||
## 👉 Supported Models & Integrations
|
||||
|
||||
|  |  |  |  |  |
|
||||
Delightful UX with latest models exclusive features like Beam for **multi-model AI validation**.
|
||||
> 
|
||||
> [](https://big-agi.com/beam)
|
||||
|
||||
|  |  |  |  |  |
|
||||
|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|
|
||||
| **Chat**<br/>**Call**<br/>**Beam**<br/>**Draw**, ... | Local & Cloud<br/>Open & Closed<br/>Cheap & Heavy<br/>Google, Mistral, ... | Attachments<br/>Diagrams<br/>Multi-Chat<br/>Mobile-first UI | Stored Locally<br/>Easy self-Host<br/>Local actions<br/>Data = Gold | AI Personas<br/>Voice Modes<br/>Screen Capture<br/>Camera + OCR |
|
||||
|
||||

|
||||
|
||||
You can easily configure 100s of AI models in big-AGI:
|
||||
### AI Models & Vendors
|
||||
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) (multimodal) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) |
|
||||
| Multimodal services | [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| Language services | [Anthropic](https://anthropic.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) |
|
||||
| Image services | [Prodia](https://prodia.com/) (SDXL) |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) (Voice synthesis / cloning) |
|
||||
Configure 100s of AI models from 19+ providers:
|
||||
|
||||
Add extra functionality with these integrations:
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) (non-open) |
|
||||
| Multimodal services | [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Anthropic](https://anthropic.com) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| LLM services | [Alibaba](https://www.alibabacloud.com/en/product/modelstudio) · [DeepSeek](https://deepseek.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [Moonshot](https://www.moonshot.cn/) · [OpenPipe](https://openpipe.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) · [xAI](https://x.ai/) · [Z.ai](https://z.ai/) |
|
||||
| Image services | OpenAI · Google Gemini |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) · [Inworld](https://inworld.ai) · [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech) · LocalAI · Browser (Web Speech API) |
|
||||
|
||||
| **More** | _integrations_ |
|
||||
|:-------------|:---------------------------------------------------------------------------------------------------------------|
|
||||
| Web Browse | [Browserless](https://www.browserless.io/) · [Puppeteer](https://pptr.dev/)-based |
|
||||
| Web Search | [Google CSE](https://programmablesearchengine.google.com/) |
|
||||
| Code Editors | [CodePen](https://codepen.io/pen/) · [StackBlitz](https://stackblitz.com/) · [JSFiddle](https://jsfiddle.net/) |
|
||||
| Sharing | [Paste.gg](https://paste.gg/) (Paste chats) |
|
||||
| Tracking | [Helicone](https://www.helicone.ai) (LLM Observability) |
|
||||
### Additional Integrations
|
||||
|
||||
[//]: # (- [x] **Flow-state UX** for uncompromised productivity)
|
||||
|
||||
[//]: # (- [x] **AI Personas**: Tailor your AI interactions with customizable personas)
|
||||
|
||||
[//]: # (- [x] **Sleek UI/UX**: A smooth, intuitive, and mobile-responsive interface)
|
||||
|
||||
[//]: # (- [x] **Efficient Interaction**: Voice commands, OCR, and drag-and-drop file uploads)
|
||||
|
||||
[//]: # (- [x] **Privacy First**: Self-host and use your own API keys for full control)
|
||||
|
||||
[//]: # (- [x] **Advanced Tools**: Execute code, import PDFs, and summarize documents)
|
||||
|
||||
[//]: # (- [x] **Seamless Integrations**: Enhance functionality with various third-party services)
|
||||
|
||||
[//]: # (- [x] **Open Roadmap**: Contribute to the progress of big-AGI)
|
||||
|
||||
<br/>
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
To get started with big-AGI, follow our comprehensive [Installation Guide](docs/installation.md).
|
||||
The guide covers various installation options, whether you're spinning it up on
|
||||
your local computer, deploying on Vercel, on Cloudflare, or rolling it out
|
||||
through Docker.
|
||||
|
||||
Whether you're a developer, system integrator, or enterprise user, you'll find step-by-step instructions
|
||||
to set up big-AGI quickly and easily.
|
||||
|
||||
[](docs/installation.md)
|
||||
|
||||
Or bring your API keys and jump straight into our free instance on [big-AGI.com](https://big-agi.com).
|
||||
|
||||
<br/>
|
||||
|
||||
# 🌟 Get Involved!
|
||||
|
||||
[//]: # ([](https://discord.gg/MkH4qj2Jp9))
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
- [ ] 📢️ [**Chat with us** on Discord](https://discord.gg/MkH4qj2Jp9)
|
||||
- [ ] ⭐ **Give us a star** on GitHub 👆
|
||||
- [ ] 🚀 **Do you like code**? You'll love this gem of a project! [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
|
||||
- [ ] 💡 Got a feature suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
- [ ] ✨ [Deploy](docs/installation.md) your [fork](docs/customizations.md) for your friends and family, or [customize it for work](docs/customizations.md)
|
||||
|
||||
<br/>
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/stargazers))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/network))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/pulls))
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/LICENSE))
|
||||
|
||||
## 📜 Licensing
|
||||
|
||||
Big-AGI incorporates third-party software components that are subject
|
||||
to separate license terms. For detailed information about these
|
||||
components and their respective licenses, please refer to
|
||||
the [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md).
|
||||
| **More** | _integrations_ |
|
||||
|:--------------|:---------------------------------------------------------------------------------------------------------------|
|
||||
| Web Browse | [Browserless](https://www.browserless.io/) · [Puppeteer](https://pptr.dev/)-based |
|
||||
| Web Search | [Google CSE](https://programmablesearchengine.google.com/) |
|
||||
| Code Editors | [CodePen](https://codepen.io/pen/) · [StackBlitz](https://stackblitz.com/) · [JSFiddle](https://jsfiddle.net/) |
|
||||
| Observability | [Helicone](https://www.helicone.ai) |
|
||||
|
||||
---
|
||||
|
||||
2023-2024 · Enrico Ros x [Big-AGI](https://big-agi.com) · Like this project? Leave a star! 💫⭐
|
||||
## 🚀 Installation
|
||||
|
||||
Self-host with Docker, deploy on Vercel, or develop locally. Full setup guide:
|
||||
|
||||
[](docs/installation.md)
|
||||
|
||||
Or use the hosted version at [big-agi.com](https://big-agi.com) with your API keys.
|
||||
|
||||
---
|
||||
|
||||
## 👋 Community & Contributing
|
||||
|
||||
### Connect
|
||||
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
⭐ [Star the repo](https://github.com/enricoros/big-agi) if Big-AGI is useful to you
|
||||
|
||||
### Contribute
|
||||
|
||||
**🤖 AI-Powered Issue Assistance**
|
||||
|
||||
When you open an issue, our custom AI triage system (powered by [Claude Code](https://github.com/anthropics/claude-code-action) with Big-AGI architecture documentation) analyzes it, searches the codebase, and provides solutions - typically within 30 minutes. We've trained the system on our modules and subsystems so it handles most issues effectively. Your feedback drives development!
|
||||
|
||||
[](https://github.com/enricoros/big-agi/issues/new?template=ai-triage.yml)
|
||||
[](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
|
||||
[](https://github.com/users/enricoros/projects/4/views/4)
|
||||
[](docs/customizations.md)
|
||||
[](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
#### Contributors
|
||||
|
||||
<a href="https://github.com/enricoros/big-agi/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=enricoros/big-agi&max=48&columns=12" />
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
MIT License · [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md)
|
||||
|
||||
**2023-2026** · Enrico Ros × [Big-AGI](https://big-agi.com)
|
||||
|
||||
@@ -2,23 +2,38 @@ import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterCloud } from '~/server/trpc/trpc.router-cloud';
|
||||
import { createTRPCFetchContext } from '~/server/trpc/trpc.server';
|
||||
import { posthogServerSendException } from '~/server/posthog/posthog.server';
|
||||
|
||||
const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
|
||||
endpoint: '/api/cloud',
|
||||
router: appRouterCloud,
|
||||
req,
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-cloud failed on ${path ?? 'unk-path'}: ${error.message}`)
|
||||
: undefined,
|
||||
onError: async function({ path, error, type, ctx }) {
|
||||
|
||||
// -> DEV error logging
|
||||
if (process.env.NODE_ENV === 'development')
|
||||
console.error(`❌ tRPC-cloud failed on ${path ?? 'unk-path'}: ${error.message}`);
|
||||
|
||||
// -> Capture node errors
|
||||
await posthogServerSendException(error, undefined, {
|
||||
domain: 'trpc-onerror',
|
||||
runtime: 'nodejs',
|
||||
endpoint: path ?? 'unknown',
|
||||
method: req.method,
|
||||
url: req.url,
|
||||
additionalProperties: {
|
||||
error_code: error.code,
|
||||
error_type: type,
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
// NOTE: the following statement breaks the build on non-pro deployments, and conditionals don't work either
|
||||
// so we resorted to raising the timeout from 10s to 25s in the vercel.json file instead
|
||||
// export const maxDuration = 25;
|
||||
|
||||
// so we resorted to raising the timeout from 10s to 60s in the vercel.json file instead
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'nodejs';
|
||||
export const dynamic = 'force-dynamic';
|
||||
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
|
||||
@@ -10,9 +10,11 @@ const handlerEdgeRoutes = (req: Request) => fetchRequestHandler({
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
|
||||
? ({ path, error }) => console.error(`\n❌ tRPC-edge failed on ${path ?? 'unk-path'}: ${error.message}`)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
// NOTE: we don't set maxDuration explicitly here - however we set it in the Vercel project settings, raising to the limit of 300s
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'edge';
|
||||
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
|
||||
+1
-4
@@ -1,8 +1,6 @@
|
||||
# Very simple docker-compose file to run the app on http://localhost:3000 (or http://127.0.0.1:3000).
|
||||
#
|
||||
# For more examples, such runnin big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
version: '3.9'
|
||||
# For more examples, such running big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
@@ -11,4 +9,3 @@ services:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
+33
-21
@@ -2,36 +2,49 @@
|
||||
|
||||
Information you need to get started, configure, and use big-AGI productively.
|
||||
|
||||
👉 **[Changelog](https://big-agi.com/changes)** - See what's new
|
||||
|
||||
## Getting Started
|
||||
|
||||
Guides for basic big-AGI features:
|
||||
Essential guides:
|
||||
|
||||
- **[Enabling Microphone for Speech Recognition](help-feature-microphone.md)**: Instructions to
|
||||
allow speech recognition in browsers and apps.
|
||||
- **[FAQ](help-faq.md)**: Common questions and answers
|
||||
- **[Enabling Microphone](help-feature-microphone.md)**: Configure speech recognition in your browser
|
||||
|
||||
## AI Model Configuration
|
||||
## AI Services
|
||||
|
||||
Detailed guides to configure AI models and advanced features in big-AGI.
|
||||
How to set up AI models and features in big-AGI.
|
||||
|
||||
> 👉 The following applies to users of big-AGI.com, as the public instance is empty and requires user configuration.
|
||||
|
||||
- **Cloud AI Services**:
|
||||
- **[Azure OpenAI](config-azure-openai.md)**
|
||||
- **[OpenRouter](config-openrouter.md)**
|
||||
- Easy API key setup: **Anthropic**, **Deepseek**, **Google AI**, **Groq**, **Mistral**, **OpenAI**, **OpenPipe**, **Perplexity**, **TogetherAI**, **xAI**
|
||||
- Easy API key configuration:
|
||||
[Alibaba](https://bailian.console.alibabacloud.com/?apiKey=1#/api-key),
|
||||
[Anthropic](https://console.anthropic.com/settings/keys),
|
||||
[Deepseek](https://platform.deepseek.com/api_keys),
|
||||
[Google Gemini](https://aistudio.google.com/app/apikey),
|
||||
[Groq](https://console.groq.com/keys),
|
||||
[Mistral](https://console.mistral.ai/api-keys/),
|
||||
[OpenAI](https://platform.openai.com/api-keys),
|
||||
[OpenPipe](https://app.openpipe.ai/settings),
|
||||
[Perplexity](https://www.perplexity.ai/settings/api),
|
||||
[TogetherAI](https://api.together.xyz/settings/api-keys),
|
||||
[xAI](http://x.ai/api),
|
||||
[Z.ai](https://z.ai/)
|
||||
- **[Azure OpenAI](config-azure-openai.md)** guide
|
||||
- **FireworksAI** ([API keys](https://fireworks.ai/account/api-keys), via custom OpenAI endpoint: https://api.fireworks.ai/inference)
|
||||
- **[OpenRouter](config-openrouter.md)** guide
|
||||
|
||||
|
||||
- **Local AI Integrations**:
|
||||
- **[LocalAI](config-local-localai.md)**
|
||||
- **[LM Studio](config-local-lmstudio.md)**
|
||||
- **[Ollama](config-local-ollama.md)**
|
||||
- [LocalAI](config-local-localai.md), [LM Studio](config-local-lmstudio.md), [Ollama](config-local-ollama.md)
|
||||
|
||||
|
||||
- **Enhanced AI Features**:
|
||||
- **[Web Browsing](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud (advanced)
|
||||
- **[Web Browsing](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud
|
||||
- **Web Search**: Google Search API (see '[Environment Variables](environment-variables.md)')
|
||||
- **Image Generation**: DALL·E 3 and 2, or Prodia API for Stable Diffusion XL
|
||||
- **Voice Synthesis**: ElevenLabs API for voice generation
|
||||
- **Image Generation**: GPT Image (gpt-image-1), DALL·E 3 and 2
|
||||
- **Voice Synthesis**: ElevenLabs, Inworld, OpenAI TTS, LocalAI, or browser Web Speech API
|
||||
|
||||
## Deployment & Customization
|
||||
|
||||
@@ -39,13 +52,14 @@ Detailed guides to configure AI models and advanced features in big-AGI.
|
||||
|
||||
For deploying a custom big-AGI instance:
|
||||
|
||||
- **[Installation Guide](installation.md)**: Set up your own big-AGI instance
|
||||
- **[Installation Guide](installation.md)**, including:
|
||||
- Set up your own big-AGI instance
|
||||
- Source build or pre-built options
|
||||
- Local, cloud, or on-premises deployment
|
||||
|
||||
|
||||
- **Advanced Setup**:
|
||||
- **[Source Code Customization Guide](customizations.md)**: Modify the source code
|
||||
- **[Source Code Customization](customizations.md)**: Modify the source code
|
||||
- **[Access Control](deploy-authentication.md)**: Optional, add basic user authentication
|
||||
- **[Database Setup](deploy-database.md)**: Optional, enables "Chat Link Sharing"
|
||||
- **[Reverse Proxy](deploy-reverse-proxy.md)**: Optional, enables custom domains and SSL
|
||||
@@ -53,10 +67,8 @@ For deploying a custom big-AGI instance:
|
||||
|
||||
## Community & Support
|
||||
|
||||
Connect with the growing big-AGI community:
|
||||
|
||||
- Check the [changelog](https://big-agi.com/changes) for the latest updates
|
||||
- Visit our [GitHub repository](https://github.com/enricoros/big-AGI) for source code and issue tracking
|
||||
- Check the latest updates and features on [Changelog](changelog.md) or the in-app [News](https://get.big-agi.com/news)
|
||||
- Connect with us and other users on [Discord](https://discord.gg/MkH4qj2Jp9) for discussions, help, and sharing your experiences with big-AGI
|
||||
- Join our [Discord](https://discord.gg/MkH4qj2Jp9) for discussions and help
|
||||
|
||||
Thank you for choosing big-AGI. We're excited to give you the best tools to amplify yourself.
|
||||
Let's build something great.
|
||||
|
||||
+19
-7
@@ -1,18 +1,30 @@
|
||||
## Changelog
|
||||
## Archived Versions - Changelog
|
||||
|
||||
This is a high-level changelog. Calls out some of the high level features batched
|
||||
by release.
|
||||
|
||||
- For the live changelog, see [big-agi.com/changes](https://big-agi.com/changes)
|
||||
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
### 1.17.0 - Jun 2024
|
||||
> NOTE: with the release of 2.0.0 we switching to [big-agi.com/changes](https://big-agi.com/changes) for the
|
||||
> continuously updated changelog.
|
||||
|
||||
- milestone: [1.17.0](https://github.com/enricoros/big-agi/milestone/17)
|
||||
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
|
||||
### What's New in 2 · Oct 31, 2025 · Open
|
||||
|
||||
### What's New in 1.16.1...1.16.8 · Sep 13, 2024 (patch releases)
|
||||
- **Big-AGI Open** is ready and more productive and faster than ever, with:
|
||||
- **Beam 2**: multi-modal, program-based, follow-ups, save presets
|
||||
- Top-notch AI models support including **agentic models** and **reasoning models**
|
||||
- **Image Generation** and editing with Nano Banana and gpt-image-1
|
||||
- **Web Search** with citations for supported models
|
||||
- **UI** & Mobile UI overhaul with peeking and side panels
|
||||
- And all of the [Big-AGI 2 changes](https://github.com/enricoros/big-AGI/issues/567#issuecomment-2262187617) and more
|
||||
- Built for the future, madly optimized
|
||||
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest (o1-preview and o1-mini are supported in Big-AGI 2)
|
||||
### What's New in 1.16.1...1.16.9 · Jan 21, 2025 (patch releases)
|
||||
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
- 1.16.7: OpenAI support for GPT-4o 2024-08-06
|
||||
- 1.16.6: Groq support for Llama 3.1 models
|
||||
- 1.16.5: GPT-4o Mini support
|
||||
@@ -46,7 +58,7 @@ by release.
|
||||
### What's New in 1.15.0 · April 1, 2024 · Beam
|
||||
|
||||
- ⚠️ [**Beam**: the multi-model AI chat](https://big-agi.com/blog/beam-multi-model-ai-reasoning). find better answers, faster - a game-changer for brainstorming, decision-making, and creativity. [#443](https://github.com/enricoros/big-AGI/issues/443)
|
||||
- Managed Deployments **Auto-Configuration**: simplify the UI mdoels setup with backend-set models. [#436](https://github.com/enricoros/big-AGI/issues/436)
|
||||
- Managed Deployments **Auto-Configuration**: simplify the UI models setup with backend-set models. [#436](https://github.com/enricoros/big-AGI/issues/436)
|
||||
- Message **Starring ⭐**: star important messages within chats, to attach them later. [#476](https://github.com/enricoros/big-AGI/issues/476)
|
||||
- Enhanced the default Persona
|
||||
- Fixes to Gemini models and SVGs, improvements to UI and icons
|
||||
|
||||
+48
-28
@@ -14,7 +14,7 @@ If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follow
|
||||
1. Launch the `big-AGI` application
|
||||
2. Go to the **Models** settings
|
||||
3. Add a Vendor and select **Azure OpenAI**
|
||||
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
|
||||
- Enter the Endpoint (e.g., 'https://your-resource-name.openai.azure.com')
|
||||
- Enter the API Key (e.g., 'fd5...........................ba')
|
||||
|
||||
The deployed models are now available in the application. If you don't have a configured
|
||||
@@ -23,6 +23,36 @@ Azure OpenAI service instance, continue with the next section.
|
||||
In addition to using the UI, configuration can also be done using
|
||||
[environment variables](environment-variables.md).
|
||||
|
||||
## Server Configuration
|
||||
|
||||
For server deployments, set these environment variables:
|
||||
|
||||
```bash
|
||||
AZURE_OPENAI_API_ENDPOINT=https://your-resource-name.openai.azure.com
|
||||
AZURE_OPENAI_API_KEY=your-api-key
|
||||
```
|
||||
|
||||
This enables Azure OpenAI for all users without requiring individual API keys. For more details, see [environment-variables.md](environment-variables.md).
|
||||
|
||||
## Azure OpenAI API Versions
|
||||
|
||||
Azure OpenAI supports both traditional deployment-based API and the next-generation v1 API:
|
||||
|
||||
### Next-Generation v1 API (Default)
|
||||
- **Enabled by default** for GPT-5-like models (GPT-5, GPT-6, o3, o4, etc.)
|
||||
- Uses direct `/openai/v1/responses` endpoint without deployment IDs
|
||||
- Optimized for advanced reasoning models and new features
|
||||
- Can be disabled by setting `AZURE_OPENAI_DISABLE_V1=true`
|
||||
|
||||
### Traditional Deployment-Based API
|
||||
- Uses `/openai/deployments/{deployment-name}/...` endpoints
|
||||
- Required for older models and when v1 API is disabled
|
||||
- Needs deployment ID for all API calls
|
||||
|
||||
### Known Limitations
|
||||
- **Web Search Tool**: Azure OpenAI does not support the `web_search_preview` tool that's available in OpenAI's API
|
||||
- Models with web search capabilities will have this feature automatically disabled on Azure
|
||||
|
||||
## Setting Up Azure
|
||||
|
||||
### Step 1: Azure Account & Subscription
|
||||
@@ -34,18 +64,7 @@ In addition to using the UI, configuration can also be done using
|
||||
- Fill in the required fields and click on **Create**
|
||||
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
|
||||
|
||||
### Step 2: Apply for Azure OpenAI Service
|
||||
|
||||
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
|
||||
and acceptance should be quick (even as low as minutes).
|
||||
|
||||
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
|
||||
2. Click on **Apply for access**
|
||||
- Fill in the required fields (including the subscription ID) and click on **Apply**
|
||||
|
||||
Once your application is accepted, you can create OpenAI resources on Azure.
|
||||
|
||||
### Step 3: Create Azure OpenAI Resource
|
||||
### Step 2: Create Azure OpenAI Resource
|
||||
|
||||
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
|
||||
|
||||
@@ -55,31 +74,32 @@ For more information, see [Azure: Create and deploy OpenAI](https://learn.micros
|
||||

|
||||
- Select the subscription
|
||||
- Select a resource group or create a new one
|
||||
- Select the region. Note that the region determines the available models.
|
||||
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
- Select the region. **Important**: The region determines which models are available.
|
||||
> Popular regions like **East US**, **West Europe**, and **Australia East** typically have the best model availability. For the latest model availability by region, see [Azure OpenAI Model Availability](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
- Name the service (e.g., `your-openai-api-1234`)
|
||||
- Select a pricing tier (e.g., `S0` for standard)
|
||||
- Select: "All networks, including the internet, can access this resource."
|
||||
- Click on **Review + create** and then **Create**
|
||||
|
||||
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
|
||||
the OpenAI Service instance page to get this information.
|
||||
After creating the resource, you can access the API Keys and Endpoints:
|
||||
|
||||
- Click on **Go to resource**
|
||||
- Click on **Develop**
|
||||
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
|
||||
- Copy `KEY 1`
|
||||
1. Click on **Go to resource** (or navigate to your Azure OpenAI resource)
|
||||
2. In the left sidebar, under **Resource Management**, click on **Keys and Endpoint**
|
||||
3. Copy the required information:
|
||||
- **Endpoint**: e.g., 'https://your-resource-name.openai.azure.com/'
|
||||
- **Key**: Copy either KEY 1 or KEY 2 (both work identically)
|
||||
|
||||
### Step 4: Deploy Models
|
||||
### Step 3: Deploy Models
|
||||
|
||||
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
|
||||
|
||||
1. Click on **Model Deployments > Manage Deployments**
|
||||
2. Click on **+Create New Deployment**
|
||||

|
||||
- Select the model you want to deploy
|
||||
- Optionally select a version
|
||||
- name the model, e.g., `gpt4-32k-0613`
|
||||
1. In your Azure OpenAI resource, click on **Model deployments** in the left sidebar
|
||||
2. Click on **Create new deployment**
|
||||
3. Fill in the deployment details:
|
||||
- **Select a model**: Choose from available models
|
||||
- **Model version**: Select the latest version or a specific one
|
||||
- **Deployment name**: Give it a meaningful name
|
||||
4. Click **Deploy**
|
||||
|
||||
Repeat as necessary for each model you want to deploy.
|
||||
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
# Google Drive Integration
|
||||
|
||||
Attach files from Google Drive directly in the chat composer.
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Enable APIs
|
||||
|
||||
In [Google Cloud Console](https://console.cloud.google.com/):
|
||||
|
||||
1. Go to **APIs & Services > Library**
|
||||
2. Enable **Google Drive API** and **Google Picker API**
|
||||
|
||||
### 2. Configure OAuth
|
||||
|
||||
1. Go to **APIs & Services > OAuth consent screen**
|
||||
2. Create consent screen (External or Internal)
|
||||
3. Add scope: `https://www.googleapis.com/auth/drive.file`
|
||||
4. Add test users if in testing mode
|
||||
|
||||
### 3. Create Credentials
|
||||
|
||||
1. Go to **APIs & Services > Credentials**
|
||||
2. Create **OAuth client ID** (Web application)
|
||||
3. Add JavaScript origins:
|
||||
- `http://localhost:3000` (dev)
|
||||
- `https://your-domain.com` (prod)
|
||||
|
||||
### 4. Set Environment Variable
|
||||
|
||||
```bash
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=your-client-id.apps.googleusercontent.com
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
- Click **Drive** button in attachment menu
|
||||
|
||||
## Supported Files
|
||||
|
||||
| Type | Export Format |
|
||||
|-----------------|---------------------|
|
||||
| Regular files | Downloaded directly |
|
||||
| Google Docs | Markdown (.md) |
|
||||
| Google Sheets | CSV (.csv) |
|
||||
| Google Slides | PDF (.pdf) |
|
||||
| Google Drawings | SVG (.svg) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Picker won't open**: Check `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` is set and APIs are enabled.
|
||||
|
||||
**OAuth errors**: Verify your domain is in authorized JavaScript origins. Add yourself as test user if app is in testing mode.
|
||||
|
||||
**Download fails**: Check file permissions and that Drive API is enabled.
|
||||
@@ -54,7 +54,7 @@ If the running LocalAI instance is configured with a [Model Gallery](https://loc
|
||||
|
||||
At the time of writing, LocalAI does not publish the model `context window size`.
|
||||
Every model is assumed to be capable of chatting, and with a context window of 4096 tokens.
|
||||
Please update the [src/modules/llms/transports/server/openai/models/models.data.ts](../src/modules/llms/server/openai/models/models.data.ts)
|
||||
Please update the [src/modules/llms/server/models.mappings.ts](../src/modules/llms/server/models.mappings.ts)
|
||||
file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
|
||||
|
||||
# 🤝 Support
|
||||
|
||||
+27
-6
@@ -31,17 +31,14 @@ At time of writing, big-AGI has only 2 operations that run on Node.js Functions:
|
||||
browsing (fetching web pages) and sharing. They both can exceed 10 seconds, especially
|
||||
when fetching large pages or waiting for websites to be completed.
|
||||
|
||||
We provide `vercel_PRODUCTION.json` to raise the duration to 25 seconds (from a default of 10), to use it,
|
||||
make sure to rename it to `vercel.json` before build.
|
||||
|
||||
From the Vercel Project > Settings > General > Build & Development Settings,
|
||||
you can for instance set the build command to:
|
||||
|
||||
```bash
|
||||
mv vercel_PRODUCTION.json vercel.json; next build
|
||||
next build
|
||||
```
|
||||
|
||||
### Change the Personas
|
||||
### Change the Personas (v1.x only)
|
||||
|
||||
Edit the `src/data.ts` file to customize personas. This file houses the default personas. You can add, remove, or modify these to meet your project's needs.
|
||||
|
||||
@@ -55,6 +52,21 @@ Adapt the UI to match your project's aesthetic, incorporate new features, or exc
|
||||
- [ ] Modify `src/common/app.config.tsx` to alter the application's name
|
||||
- [ ] Update `src/common/app.nav.tsx` to revise the navigation bar
|
||||
|
||||
### Add a Message of the Day
|
||||
|
||||
You can display a temporary announcement banner at the top of the app using the `NEXT_PUBLIC_MOTD` environment variable.
|
||||
|
||||
- Set this variable in your deployment environment
|
||||
- The message supports template variables:
|
||||
- `{{app_build_hash}}`: Current git commit hash
|
||||
- `{{app_build_pkgver}}`: Package version
|
||||
- `{{app_build_time}}`: Build timestamp as date
|
||||
- `{{app_deployment_type}}`: Deployment type (local, docker, vercel, etc.)
|
||||
- Users can dismiss the message (until next page refresh)
|
||||
- Use it for version announcements, maintenance notices, or feature highlights
|
||||
|
||||
Example: `NEXT_PUBLIC_MOTD=🚀 New features available in {{app_build_pkgver}}! Try the improved Beam.`
|
||||
|
||||
## Testing & Deployment
|
||||
|
||||
Test your application thoroughly using local development (refer to README.md for local build instructions). Deploy using your preferred hosting service. big-AGI supports deployment on platforms like Vercel, Docker, or any Node.js-compatible service, especially those supporting NextJS's "Edge Runtime."
|
||||
@@ -65,7 +77,16 @@ Test your application thoroughly using local development (refer to README.md for
|
||||
|
||||
## Debugging
|
||||
|
||||
We introduced the `/info/debug` page that provides a detailed overview of the application's environment, including the API keys, environment variables, and other configuration settings.
|
||||
The application includes a client-side logging system. You can view recent logs via the UI (Settings > Tools > Logs).
|
||||
|
||||
For deeper debugging during development:
|
||||
|
||||
1. **Debug Page**: Access the `/info/debug` page for an overview of the application's environment, configuration, API status, and environment variables available to the client.
|
||||
2. **Conditional Breakpoints**: To automatically pause execution in your browser's developer tools when critical errors (`error`, `critical`, `DEV` levels) are logged to the console, set the following environment variable in your local `.env.local` file and restart your development server:
|
||||
```bash
|
||||
NEXT_PUBLIC_DEBUG_BREAKS=true
|
||||
```
|
||||
This allows you to inspect the application state at the exact moment an important error occurs. This feature only works in development mode (`npm run dev`) and requires the environment variable to be explicitly set to `true`.
|
||||
|
||||
<br/>
|
||||
|
||||
|
||||
+40
-23
@@ -2,8 +2,9 @@
|
||||
|
||||
The open-source big-AGI project provides support for the following analytics services:
|
||||
|
||||
- **Vercel Analytics**: automatic when deployed to Vercel
|
||||
- **Google Analytics 4**: manual setup required
|
||||
- **PostHog Analytics**: manual setup required
|
||||
- **Vercel Analytics**: automatic when deployed to Vercel
|
||||
|
||||
The following is a quick overview of the Analytics options for the deployers of this open-source project.
|
||||
big-AGI is deployed to many large-scale and enterprise though various ways (custom builds, Docker, Vercel, Cloudflare, etc.),
|
||||
@@ -11,6 +12,36 @@ and this guide is for its customization.
|
||||
|
||||
## Service Configuration
|
||||
|
||||
### Google Analytics 4
|
||||
|
||||
- Why: user engagement and retention, performance insights, personalization, content optimization
|
||||
- What: https://support.google.com/analytics/answer/11593727
|
||||
|
||||
Google Analytics 4 (GA4) is a powerful tool for understanding user behavior and engagement.
|
||||
This can help optimize big-AGI, understanding which features are needed/users and which aren't.
|
||||
|
||||
To enable Google Analytics 4, you need to set the `NEXT_PUBLIC_GA4_MEASUREMENT_ID` environment variable
|
||||
before starting the local build or the docker build (i.e. at build time), at which point the
|
||||
server/container will be able to report analytics to your Google Analytics 4 property.
|
||||
|
||||
As of Feb 27, 2024, this feature is in development.
|
||||
|
||||
### PostHog Analytics
|
||||
|
||||
- Why: feature usage tracking, user journeys, conversion optimization, product analytics
|
||||
- What: page views, page leave events, user interactions, and deployment context
|
||||
|
||||
PostHog provides comprehensive product analytics with privacy controls. It helps understand how users interact with big-AGI's features, identify opportunities for improvement, and optimize the user experience.
|
||||
|
||||
To enable PostHog, set the `NEXT_PUBLIC_POSTHOG_KEY` environment variable at build time. PostHog is configured with tracking optimization and privacy in mind:
|
||||
|
||||
- Uses a proxy endpoint (`/a/ph`) to avoid ad blockers
|
||||
- Respects user opt-out preferences via local storage
|
||||
- Tracks only essential information without PII
|
||||
- Adds deployment context for better segmentation
|
||||
|
||||
The implementation follows PostHog's best practices for Next.js applications and includes manual page view tracking for proper single-page application support.
|
||||
|
||||
### Vercel Analytics
|
||||
|
||||
- Why: understand coarse traction, and identify deployment issues - all without tracking individual users
|
||||
@@ -31,33 +62,19 @@ const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) => <>
|
||||
</>;
|
||||
```
|
||||
|
||||
When big-AGI is served on Vercel hosts, the ```process.env.NEXT_PUBLIC_VERCEL_URL``` environment variable is trueish, and
|
||||
When big-AGI is served on Vercel hosts, the `process.env.NEXT_PUBLIC_VERCEL_URL` environment variable is trueish, and
|
||||
analytics will be sent by default to the Vercel Analytics service which is deployed by Vercel IF configured from the
|
||||
Vercel project dashboard.
|
||||
|
||||
In summary: to turn it on: activate the `Analytics` service in the Vercel project dashboard.
|
||||
|
||||
### Google Analytics 4
|
||||
|
||||
- Why: user engagement and retention, performance insights, personalization, content optimization
|
||||
- What: https://support.google.com/analytics/answer/11593727
|
||||
|
||||
Google Analytics 4 (GA4) is a powerful tool for understanding user behavior and engagement.
|
||||
This can help optimize big-AGI, understanding which features are needed/users and which aren't.
|
||||
|
||||
To enable Google Analytics 4, you need to set the `NEXT_PUBLIC_GA4_MEASUREMENT_ID` environment variable
|
||||
before starting the local build or the docker build (i.e. at build time), at which point the
|
||||
server/container will be able to report analytics to your Google Analytics 4 property.
|
||||
|
||||
As of Feb 27, 2024, this feature is in development.
|
||||
|
||||
## Configurations
|
||||
|
||||
| Scope | Default | Description / Instructions |
|
||||
|-----------------------------------------------------------------------------------------|------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Your source builds of big-AGI | None | **Vercel**: enable Vercel Analytics from the dashboard. · **Google Analytics**: set environment variable at build time. |
|
||||
| Your docker builds of big-AGI | None | **Vercel**: n/a. · **Google Analytics**: set environment variable at `docker build` time. |
|
||||
| [big-agi.com](https://big-agi.com) | Vercel + Google | The main website ([privacy policy](https://big-agi.com/privacy)) hosted for free for anyone. |
|
||||
| [official Docker packages](https://github.com/enricoros/big-AGI/pkgs/container/big-agi) | Google Analytics | **Vercel**: n/a · **Google Analytics**: set to the big-agi.com Google Analytics for analytics and improvements. |
|
||||
| Scope | Default | Description / Instructions |
|
||||
|-------------------------------------------------------------------------------------------------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Your **Source** builds of big-AGI | None | **Google Analytics**: set environment variable at build time · **PostHog**: set environment variable at build time · **Vercel**: enable Vercel Analytics from the dashboard |
|
||||
| Your **Docker** builds of big-AGI | None | (**Vercel**: n/a) · **Google Analytics**: set environment variable at `docker build` time · **PostHog**: set environment variable at `docker build` time. |
|
||||
| [get.big-agi.com](https://get.big-agi.com) (**Big-AGI 1.x Legacy**) | Vercel + Google + PostHog | The main website ([privacy policy](https://big-agi.com/privacy)) hosted for free for anyone. |
|
||||
| [prebuilt Docker packages](https://github.com/enricoros/big-AGI/pkgs/container/big-agi) (**Big-AGI 1.x**, 'latest' tag) | Google Analytics | **Vercel**: n/a · **Google Analytics**: set to the big-agi.com Google Analytics for analytics and improvements · **PostHog**: n/a |
|
||||
|
||||
Note: this information is updated as of Feb 27, 2024 and can change at any time.
|
||||
Note: this information is updated as of March 3, 2025 and can change at any time.
|
||||
@@ -31,6 +31,12 @@ file.
|
||||
|
||||
### Official Images: [ghcr.io/enricoros/big-agi](https://github.com/enricoros/big-agi/pkgs/container/big-agi)
|
||||
|
||||
#### Available Tags
|
||||
|
||||
- **`:latest`** / **`:stable`** - Latest stable release (recommended)
|
||||
- **`:development`** - Main branch (bleeding edge)
|
||||
- **`:v2.0.0`** - Specific versions
|
||||
|
||||
#### Run using *docker* 🚀
|
||||
|
||||
```bash
|
||||
|
||||
@@ -19,7 +19,6 @@ services:
|
||||
- .env
|
||||
environment:
|
||||
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
depends_on:
|
||||
- browserless
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This document provides an explanation of the environment variables used in the big-AGI application.
|
||||
|
||||
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.mjs`](../src/server/env.mjs).
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.ts`](../src/server/env.ts).
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
@@ -23,6 +23,8 @@ MDB_URI=
|
||||
OPENAI_API_KEY=
|
||||
OPENAI_API_HOST=
|
||||
OPENAI_API_ORG_ID=
|
||||
ALIBABA_API_HOST=
|
||||
ALIBABA_API_KEY=
|
||||
AZURE_OPENAI_API_ENDPOINT=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
@@ -33,6 +35,7 @@ GROQ_API_KEY=
|
||||
LOCALAI_API_HOST=
|
||||
LOCALAI_API_KEY=
|
||||
MISTRAL_API_KEY=
|
||||
MOONSHOT_API_KEY=
|
||||
OLLAMA_API_HOST=
|
||||
OPENPIPE_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
@@ -54,17 +57,18 @@ GOOGLE_CSE_ID=
|
||||
ELEVENLABS_API_KEY=
|
||||
ELEVENLABS_API_HOST=
|
||||
ELEVENLABS_VOICE_ID=
|
||||
# Text-To-Image: Prodia
|
||||
PRODIA_API_KEY=
|
||||
|
||||
# Backend HTTP Basic Authentication (see `deploy-authentication.md` for turning on authentication)
|
||||
HTTP_BASIC_AUTH_USERNAME=
|
||||
HTTP_BASIC_AUTH_PASSWORD=
|
||||
|
||||
|
||||
# Frontend variables
|
||||
# Frontend variables
|
||||
NEXT_PUBLIC_MOTD=
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=
|
||||
NEXT_PUBLIC_PLANTUML_SERVER_URL=
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
```
|
||||
|
||||
## Backend Variables
|
||||
@@ -88,8 +92,13 @@ requiring the user to enter an API key
|
||||
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
|
||||
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
|
||||
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
|
||||
| `ALIBABA_API_HOST` | The Alibaba AI OpenAI-compatible endpoint | Optional |
|
||||
| `ALIBABA_API_KEY` | The API key for Alibaba AI | Optional |
|
||||
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
|
||||
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
|
||||
| `AZURE_OPENAI_DISABLE_V1` | Disables the next-generation v1 API for GPT-5-like models (set to 'true' to disable) | Optional, defaults to enabled |
|
||||
| `AZURE_OPENAI_API_VERSION` | API version for traditional deployment-based endpoints | Optional, defaults to '2025-04-01-preview' |
|
||||
| `AZURE_DEPLOYMENTS_API_VERSION` | API version for the deployments listing endpoint | Optional, defaults to '2023-03-15-preview' |
|
||||
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as AWS Bedrock | Optional |
|
||||
| `DEEPSEEK_API_KEY` | The API key for Deepseek AI | Optional |
|
||||
@@ -98,6 +107,7 @@ requiring the user to enter an API key
|
||||
| `LOCALAI_API_HOST` | Sets the URL of the LocalAI server, or defaults to http://127.0.0.1:8080 | Optional |
|
||||
| `LOCALAI_API_KEY` | The (Optional) API key for LocalAI | Optional |
|
||||
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
|
||||
| `MOONSHOT_API_KEY` | The API key for Moonshot AI | Optional |
|
||||
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-local-ollama.md](config-local-ollama.md) | |
|
||||
| `OPENPIPE_API_KEY` | The API key for OpenPipe | Optional |
|
||||
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
|
||||
@@ -123,12 +133,11 @@ Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| **Text-To-Speech** | ElevenLabs, Inworld, OpenAI TTS, LocalAI, and browser Web Speech API are supported |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
|
||||
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
|
||||
| | *Note: OpenAI TTS and LocalAI TTS reuse credentials from your configured LLM services (no separate env vars needed)* |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
@@ -142,10 +151,14 @@ Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
The value of these variables are passed to the frontend (Web UI) - make sure they do not contain secrets.
|
||||
|
||||
| Variable | Description |
|
||||
|:----------------------------------|:-----------------------------------------------------------------------------------------|
|
||||
| `NEXT_PUBLIC_GA4_MEASUREMENT_ID` | The measurement ID for Google Analytics 4. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_PLANTUML_SERVER_URL` | The URL of the PlantUML server, used for rendering UML diagrams. (code in RederCode.tsx) |
|
||||
| Variable | Description |
|
||||
|:----------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `NEXT_PUBLIC_DEBUG_BREAKS` | (optional, development) When set to 'true', enables automatic debugger breaks on DEV/error/critical logs in development builds |
|
||||
| `NEXT_PUBLIC_MOTD` | Message of the Day - displays a dismissible banner at the top of the app (see [customizations](customizations.md) for the template variables). Example: 🔔 Welcome to our deployment! Version {{app_build_pkgver}} built on {{app_build_time}}. |
|
||||
| `NEXT_PUBLIC_GA4_MEASUREMENT_ID` | (optional) The measurement ID for Google Analytics 4. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` | (optional) Google OAuth Client ID for Drive Picker. Can reuse `AUTH_GOOGLE_ID`. See [Google Drive](config-feature-google-drive.md) |
|
||||
| `NEXT_PUBLIC_PLANTUML_SERVER_URL` | The URL of the PlantUML server, used for rendering UML diagrams. Allows using custom local servers. |
|
||||
| `NEXT_PUBLIC_POSTHOG_KEY` | (optional) Key for PostHog analytics. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
|
||||
> Important: these variables must be set at build time, which is required by Next.js to pass them to the frontend.
|
||||
> This is in contrast to the backend variables, which can be set when starting the local server/container.
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
# Big-AGI Data Ownership Guide
|
||||
|
||||
Big-AGI is a **client-first** web application, which means it prioritizes speed and data ownership compared to cloud apps.
|
||||
Your *API keys*, *chat history*, and *settings* live in your
|
||||
browser's [local storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage), not
|
||||
on cloud servers.
|
||||
|
||||
You can use Big-AGI in two ways:
|
||||
|
||||
1. Run it yourself (open-source)
|
||||
2. Use big-agi.com (hosted service)
|
||||
|
||||
This guide explains how the open-source version handles your data. You can verify everything in [the source code](https://github.com/enricoros/big-agi).
|
||||
|
||||
## Client-Side Storage
|
||||
|
||||
Within Big-AGI almost all chat/keys data is handled client-side in your browser using two
|
||||
standard browser storage mechanisms:
|
||||
|
||||
- **Local Storage**: API keys, settings, and configurations ([learn more](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage))
|
||||
- **IndexedDB**: Chat history and larger files ([learn more](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API))
|
||||
|
||||
The Big-AGI backend mainly passes requests to AI services (OpenAI, Anthropic, etc.). It doesn't store your data, except for the chat-sharing function if used.
|
||||
|
||||
You can see your data in your browser's local storage and IndexedDB - try it yourself:
|
||||
|
||||
1. In Chrome: Open DevTools (press F12 on Windows, ⌘ + ⌥ + I on Mac)
|
||||
2. Click 'Application' > 'Local Storage'
|
||||
3. See your settings and API keys
|
||||
|
||||

|
||||
|
||||
### What This Means For You
|
||||
|
||||
Storing data in your browser means:
|
||||
|
||||
- Your data stays on **one device/browser only**
|
||||
- Clearing browser data **erases your chats** - make backups
|
||||
- Anyone using your browser can see your chats and keys
|
||||
- Running your own server needs technical skills
|
||||
|
||||
### Local Device Identifier
|
||||
|
||||
Big-AGI generates a _device identifier_ that combines timestamp and random components, stored only on your device. This identifier:
|
||||
|
||||
- Is used only for the **optional sync functionality** between your devices (not yet ready)
|
||||
- Helps maintain data consistency when using Big-AGI across multiple devices
|
||||
- Remains completely local unless you explicitly enable sync
|
||||
- Is not used for tracking, analytics, or telemetry
|
||||
- Can be deleted anytime by clearing local storage
|
||||
- Is fully transparent - see the implementation in `src/common/stores/store-client.ts`
|
||||
|
||||
## How Data Flows
|
||||
|
||||
AI interactions in Big-AGI, such as chats, AI titles, text to speech, browsing, flow through three components:
|
||||
|
||||
1. **Browser** (client/installed App) - Stores your keys & data locally
|
||||
2. **Backend** (routing server) - Passes requests to AI services
|
||||
3. **AI Services** - Where the actual AI processing happens
|
||||
|
||||
### Self-Deployed Version: Your Infrastructure
|
||||
|
||||
You run the server. Your data only leaves when making AI requests.
|
||||
The keys and chats are under your control and pass through your code, and are sent to
|
||||
the upstream AI services on a per-request basis.
|
||||
|
||||

|
||||
|
||||
### Web Version: Using big-agi.com
|
||||
|
||||
Your data passes through the hosted Big-AGI edge network to reach AI services. The keys
|
||||
and chats pass through Big-AGI's edge network to reach the AI services on a per-request basis,
|
||||
and then are send to the upstream AI services.
|
||||
|
||||

|
||||
|
||||
## Security Best Practices
|
||||
|
||||
**Basic Security**:
|
||||
|
||||
- **Never share API keys**
|
||||
- **Don't use shared computers**
|
||||
- Use private browsing for one-off sessions
|
||||
- Use trusted networks
|
||||
- Back up your data
|
||||
|
||||
**When Running Your Own Server**:
|
||||
|
||||
- Use [environment variables](environment-variables.md) for API keys
|
||||
- Run on trusted infrastructure
|
||||
- Keep your installation updated
|
||||
|
||||
## TL;DR
|
||||
|
||||
Your API keys and chats stay in your browser. The server only passes requests to AI services.
|
||||
|
||||
Use big-agi.com for convenience, or [run it yourself](installation.md) for full control.
|
||||
|
||||
Need help? Join our [Discord](https://discord.gg/MkH4qj2Jp9) or open a [GitHub issue](https://github.com/enricoros/big-agi/issues).
|
||||
@@ -0,0 +1,28 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
Quick answers to common questions about Big-AGI. For detailed documentation, see our [Website Docs](https://big-agi.com/docs).
|
||||
|
||||
### Versions
|
||||
|
||||
<details open>
|
||||
<summary><b>How do I check my Big-AGI version?</b></summary>
|
||||
|
||||
You can see the version in the _News_ section of the app, as per the image below.
|
||||
|
||||

|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><b>How do I verify my Vercel deployment version?</b></summary>
|
||||
|
||||
You can go in the **deployments** section of your Vercel project, and at a quick glance see
|
||||
what is the latest deployment status, time, and link to the source code.
|
||||
|
||||

|
||||
|
||||
Each deployment links directly to its source code commit.
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
Missing something? [Open an issue](https://github.com/enricoros/big-agi/issues/new) or [join our Discord](https://discord.gg/MkH4qj2Jp9).
|
||||
@@ -136,11 +136,6 @@ Deploy big-AGI on a Kubernetes cluster for enhanced scalability and management.
|
||||
|
||||
For more detailed instructions on Kubernetes deployment, including updating and troubleshooting, refer to our [Kubernetes Deployment Guide](deploy-k8s.md).
|
||||
|
||||
### Midori AI Subsystem for Docker Deployment
|
||||
|
||||
Follow the instructions found on [Midori AI Subsystem Site](https://io.midori-ai.xyz/subsystem/manager/)
|
||||
for your host OS. After completing the setup process, install the Big-AGI docker backend to the Midori AI Subsystem.
|
||||
|
||||
## Enterprise-Grade Installation
|
||||
|
||||
For businesses seeking a fully-managed, scalable solution, consider our managed installations.
|
||||
@@ -151,6 +146,6 @@ Enjoy all the features of big-AGI without the hassle of infrastructure managemen
|
||||
Join our vibrant community of developers, researchers, and AI enthusiasts. Share your projects, get help, and collaborate with others.
|
||||
|
||||
- [Discord Community](https://discord.gg/MkH4qj2Jp9)
|
||||
- [Twitter](https://twitter.com/yourusername)
|
||||
- [Twitter](https://twitter.com/enricoros)
|
||||
|
||||
For any questions or inquiries, please don't hesitate to [reach out to our team](mailto:hello@big-agi.com).
|
||||
|
||||
@@ -16,6 +16,8 @@ stringData:
|
||||
OPENAI_API_KEY: ""
|
||||
OPENAI_API_HOST: ""
|
||||
OPENAI_API_ORG_ID: ""
|
||||
ALIBABA_API_HOST: ""
|
||||
ALIBABA_API_KEY: ""
|
||||
AZURE_OPENAI_API_ENDPOINT: ""
|
||||
AZURE_OPENAI_API_KEY: ""
|
||||
ANTHROPIC_API_KEY: ""
|
||||
@@ -26,6 +28,7 @@ stringData:
|
||||
LOCALAI_API_HOST: ""
|
||||
LOCALAI_API_KEY: ""
|
||||
MISTRAL_API_KEY: ""
|
||||
MOONSHOT_API_KEY: ""
|
||||
OLLAMA_API_HOST: ""
|
||||
OPENPIPE_API_KEY: ""
|
||||
OPENROUTER_API_KEY: ""
|
||||
@@ -44,6 +47,3 @@ stringData:
|
||||
ELEVENLABS_API_KEY: ""
|
||||
ELEVENLABS_API_HOST: ""
|
||||
ELEVENLABS_VOICE_ID: ""
|
||||
|
||||
# Text-To-Image: Prodia
|
||||
PRODIA_API_KEY: ""
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 55 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 234 KiB |
@@ -0,0 +1,17 @@
|
||||
import { defineConfig } from "eslint/config";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import js from "@eslint/js";
|
||||
import { FlatCompat } from "@eslint/eslintrc";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const compat = new FlatCompat({
|
||||
baseDirectory: __dirname,
|
||||
recommendedConfig: js.configs.recommended,
|
||||
allConfig: js.configs.all
|
||||
});
|
||||
|
||||
export default defineConfig([{
|
||||
extends: compat.extends("next/core-web-vitals"),
|
||||
}]);
|
||||
@@ -0,0 +1,38 @@
|
||||
# Knowledge Base
|
||||
|
||||
Internal documentation for Big-AGI architecture and systems, for use by AI agents and developers.
|
||||
|
||||
**Structure:**
|
||||
- `/kb/modules/` - Core business logic (e.g. AIX)
|
||||
- `/kb/systems/` - Infrastructure (routing, startup)
|
||||
|
||||
## Index
|
||||
|
||||
### Modules Documentation
|
||||
|
||||
#### AIX - AI Communication Framework
|
||||
- **[AIX.md](modules/AIX.md)** - AIX streaming architecture documentation
|
||||
- **[AIX-callers-analysis.md](modules/AIX-callers-analysis.md)** - Analysis of AIX entry points, call chains, common and different rendering, error handling, etc.
|
||||
|
||||
#### CSF - Client-Side Fetch
|
||||
- **[CSF.md](systems/client-side-fetch.md)** - Direct browser-to-API communication for LLM requests
|
||||
|
||||
### Systems Documentation
|
||||
|
||||
#### Core Platform Systems
|
||||
- **[app-routing.md](systems/app-routing.md)** - Next.js routing, provider stack, and display state hierarchy
|
||||
- **[LLM-parameters-system.md](systems/LLM-parameters-system.md)** - Language model parameter flow across the system
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Writing Style
|
||||
|
||||
- **Direct and factual** - No marketing language
|
||||
- **Present tense** - "AIX handles streaming" not "AIX will handle"
|
||||
- **Active voice** - "The system processes" not "Processing is done by"
|
||||
- **Concrete examples** - Show actual code/config when helpful, briefly
|
||||
|
||||
### Maintenance
|
||||
|
||||
- Remove outdated information when detected!
|
||||
- Keep cross-references current when files move
|
||||
@@ -0,0 +1,144 @@
|
||||
# AIX Chat Generation Calls Analysis
|
||||
|
||||
This document analyzes all AIX function callers and their patterns for message removal, placeholder handling, and error management.
|
||||
|
||||
## AIX Function Architecture
|
||||
|
||||
### Three-Tier Call Hierarchy
|
||||
|
||||
**Core AIX Functions** (Direct tRPC API callers):
|
||||
- `aixChatGenerateContent_DMessage_FromConversation` - 8 callers (conversation streaming)
|
||||
- `aixChatGenerateContent_DMessage` - 6 callers (direct request/response)
|
||||
- `aixChatGenerateText_Simple` - 12 callers (text-only utilities)
|
||||
|
||||
**Utility Layer** (Hooks & Functions):
|
||||
- Conversation management, persona processing, content generation utilities
|
||||
|
||||
**UI Layer** (React Components):
|
||||
- User-facing interfaces with rich error states and fallback mechanisms
|
||||
|
||||
## Core Function Callers Analysis
|
||||
|
||||
### Conversation-Based Callers (`_FromConversation`)
|
||||
|
||||
| **Caller** | **Context** | **Message Removal** | **Placeholder** | **Error Handling** |
|
||||
|------------|-------------|-------------------|----------------|-------------------|
|
||||
| **Chat Persona** | `'conversation'` | `messageWasInterruptedAtStart()` → `removeMessage()` | None | Error fragments |
|
||||
| **Beam Scatter** | `'beam-scatter'` | `messageWasInterruptedAtStart()` → empty message | `SCATTER_PLACEHOLDER` | Ray status update |
|
||||
| **Beam Gather** | `'beam-gather'` | `messageWasInterruptedAtStart()` → clear fragments | `GATHER_PLACEHOLDER` | Re-throw errors |
|
||||
| **Beam Follow-up** | `'beam-followup'` | `messageWasInterruptedAtStart()` → remove message | `FOLLOWUP_PLACEHOLDER` | Status updates |
|
||||
| **ScratchChat** | `'scratch-chat'` | `aborted && !fragments` → array removal | `SCRATCH_CHAT_PLACEHOLDER` | Error fragments |
|
||||
| **Telephone** | `'call'` | None | None | Basic handling |
|
||||
| **ReAct Agent** | `'chat-react-turn'` | None | None | Append errors |
|
||||
| **Variform** | `'_DEV_'` | None | None | Throw errors |
|
||||
|
||||
### Direct Request Callers (`aixChatGenerateContent_DMessage`)
|
||||
|
||||
| **Caller** | **Context** | **Message Removal** | **Error Handling** |
|
||||
|------------|-------------|-------------------|-------------------|
|
||||
| **Auto Follow-ups** | `'chat-followup-*'` | `fragmentDelete()` on failure | `fragmentReplace()` with error |
|
||||
| **Gen CR Diffs** | `'aifn-gen-cr-diffs'` | None | State-based handling |
|
||||
| **Code Fixup** | `'fixup-code'` | None | Throw errors |
|
||||
| **Attachment Prompts** | `'chat-attachment-prompts'` | None | Throw errors |
|
||||
|
||||
### Text-Only Utilities (`aixChatGenerateText_Simple`)
|
||||
|
||||
| **Utility** | **Purpose** | **Error Strategy** | **Called By** |
|
||||
|-------------|-------------|-------------------|---------------|
|
||||
| **conversationTitle** | Auto-generate chat titles | Try/catch with fallback | UI components |
|
||||
| **conversationSummary** | Generate summaries | Try/catch with fallback | Chat drawer |
|
||||
| **useStreamChatText** | Generic text streaming | Error state management | FlattenerModal |
|
||||
| **useLLMChain** | Multi-step processing | Step-by-step handling | Persona creation |
|
||||
| **imaginePromptFromText** | Text → image prompts | Simple propagation | Image generation |
|
||||
| **aifnBeamGenerateBriefing** | Beam summaries | Null return on error | Beam completion |
|
||||
| **useAifnPersonaGenIdentity** | Extract persona identity | Query error handling | Persona flows |
|
||||
| **DiagramsModal** | Generate diagrams | Component error state | Manual generation |
|
||||
|
||||
## Message Removal Patterns
|
||||
|
||||
### 1. Complete Message Removal
|
||||
- **Chat Persona**: `messageWasInterruptedAtStart()` → `messageEditor.removeMessage()`
|
||||
- **ScratchChat**: `outcome === 'aborted' && !fragments?.length` → array removal
|
||||
- **Trigger**: Message aborted before any content generated
|
||||
|
||||
### 2. Fragment-Level Management
|
||||
- **Beam Gather**: Clear fragments array but keep message structure
|
||||
- **Auto Follow-ups**: Delete specific placeholder fragments on failure
|
||||
- **Purpose**: Maintain message structure while removing failed content
|
||||
|
||||
### 3. Empty Message Replacement
|
||||
- **Beam Scatter**: Replace with `createDMessageEmpty()` but preserve ray structure
|
||||
- **Purpose**: Keep UI structure intact while indicating failure
|
||||
|
||||
### 4. No Removal Strategy
|
||||
- **Text-only functions**: Use fallback values, error states, or null returns
|
||||
- **Simple callers**: Propagate errors upstream for handling
|
||||
|
||||
## Error Handling by Layer
|
||||
|
||||
### UI Layer (Components)
|
||||
- **Pattern**: Rich error states with user-facing messages
|
||||
- **Examples**: DiagramsModal, FlattenerModal
|
||||
- **Features**: Retry mechanisms, fallback UI, loading states
|
||||
|
||||
### Utility Layer (Hooks/Functions)
|
||||
- **Pattern**: Graceful degradation with fallbacks
|
||||
- **Examples**: conversationTitle, conversationSummary
|
||||
- **Features**: Silent failures, default values, try/catch blocks
|
||||
|
||||
### Core Layer (Direct API)
|
||||
- **Pattern**: Minimal handling, error propagation
|
||||
- **Examples**: Code Fixup, Attachment Prompts
|
||||
- **Features**: Assumes upstream error handling
|
||||
|
||||
## Key Implementation Details
|
||||
|
||||
### Message Removal Detection
|
||||
```typescript
|
||||
// Core detection logic
|
||||
function messageWasInterruptedAtStart(message: Pick<DMessage, 'generator' | 'fragments'>): boolean {
|
||||
return message.generator?.tokenStopReason === 'client-abort' && message.fragments.length === 0;
|
||||
}
|
||||
```
|
||||
|
||||
### Placeholder Management
|
||||
- **Initialization**: `createPlaceholderVoidFragment(placeholderText)`
|
||||
- **Replacement**: During streaming updates or on completion
|
||||
- **Cleanup**: Delete on error to avoid stale content
|
||||
|
||||
### Context Patterns
|
||||
- **Production**: `'conversation'`, `'beam-scatter'`, `'scratch-chat'`
|
||||
- **Features**: `'chat-followup-*'`, `'fixup-code'`, `'ai-diagram'`
|
||||
- **Development**: `'_DEV_'`
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Message Removal
|
||||
- Use `messageWasInterruptedAtStart()` for consistent detection
|
||||
- Only remove messages with no content that were client-aborted
|
||||
- Consider UI context when choosing removal vs. clearing strategy
|
||||
|
||||
### Error Handling
|
||||
- **Fragment-level**: Use `messageEditor.fragmentReplace()` with error fragments
|
||||
- **Message-level**: Use `messageEditor.removeMessage()` or array removal
|
||||
- **Status-level**: Update component state for UI feedback
|
||||
|
||||
### Placeholder Management
|
||||
- Initialize with descriptive placeholders using `createPlaceholderVoidFragment()`
|
||||
- Replace during streaming updates
|
||||
- Clean up on error to prevent stale content
|
||||
|
||||
## Architectural Insights
|
||||
|
||||
1. **Layered Error Handling**: Sophistication increases closer to UI
|
||||
2. **Context Specialization**: Different contexts for different use cases
|
||||
3. **Streaming vs Non-Streaming**: Conversation functions stream, utilities typically don't
|
||||
4. **Message vs Fragment Management**: Different strategies for different UI needs
|
||||
|
||||
The most sophisticated handling is in **Beam modules** and **Chat Persona** with comprehensive removal logic, while simpler callers rely on upstream error handling.
|
||||
|
||||
## Code References
|
||||
|
||||
- **Core function**: `src/modules/aix/client/aix.client.ts:aixChatGenerateContent_DMessage_FromConversation`
|
||||
- **Removal check**: `src/common/stores/chat/chat.message.ts:388:messageWasInterruptedAtStart()`
|
||||
- **Placeholder creation**: `src/common/stores/chat/chat.fragments.ts:createPlaceholderVoidFragment()`
|
||||
@@ -0,0 +1,190 @@
|
||||
# AIX
|
||||
|
||||
AIX is a client/server library for integrating advanced AI capabilities into web applications.
|
||||
|
||||
## Overview
|
||||
|
||||
AIX provides real-time, type-safe communication between a Typescript application and AI providers.
|
||||
|
||||
Built with tRPC, it manages the lifecycle of AI-generated content from request to rendering, supporting both streaming and non-streaming AI providers.
|
||||
|
||||
## Features
|
||||
|
||||
- Content Generation
|
||||
- Multi-Modal streaming/non-streaming
|
||||
- Throttled batching and error handling
|
||||
- Server-side timeout/retry
|
||||
- Function Calling and Code Execution
|
||||
- Complex AI Workflows (future)
|
||||
- Embeddings / Information Retrieval / Image Manipulation (future)
|
||||
|
||||
## AIX Providers support
|
||||
|
||||
| Service | Chat | Function Calling | Multi-Modal Input | Cont. (1) | Streaming | Idiosyncratic |
|
||||
|------------|------------|------------------|-------------------|-----------|-----------|---------------|
|
||||
| Alibaba | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| Anthropic | ✅ | ✅ + Parallel | Img: ✅ | ✅ | Yes + 📦 | |
|
||||
| Azure | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| Deepseek | ✅ | ❌ (rejected) | | ✅ | Yes + 📦 | |
|
||||
| Gemini | ✅ | ✅ + Parallel | Img: ✅ | ✅ | Yes + 📦 | Code ex.: ✅ |
|
||||
| Groq | ✅ | ✅ + Parallel | | ✅ | Yes + 📦 | |
|
||||
| LM Studio | ✅ | ❌ (not working) | | ❌ | Yes + 📦 | |
|
||||
| Local AI | ✅ | ✅ | | ❌ | Yes + 📦 | |
|
||||
| Mistral | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| OpenAI | ✅ | ✅ + Parallel | Img: ✅ | ✅ | Yes + 📦 | |
|
||||
| OpenPipe | ✅ | ✅ | Img: ✅ | ✅ | Yes + 📦 | |
|
||||
| OpenRouter | ✅ | ❌ (inconsistent) | | ✅ | Yes + 📦 | |
|
||||
| Perplexity | ✅ | ❌ (rejected) | | ✅ | Yes + 📦 | |
|
||||
| TogetherAI | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| xAI | | | | | | |
|
||||
| Z.ai | ✅ | ✅ | Img: ✅ | ✅ | Yes + 📦 | Thinking mode |
|
||||
| Ollama (2) | ❌ (broken) | ? | | | | |
|
||||
|
||||
Notes:
|
||||
|
||||
- 1: Continuation marks: a. sends reason=max-tokens (streaming/non-streaming), b. TBA
|
||||
- 2: Ollama has not been ported to AIX yet due to the custom APIs.
|
||||
|
||||
## 1. System Architecture
|
||||
|
||||
The subsystem comprises three main components:
|
||||
|
||||
1. **Client (e.g. Next.js Frontend)**
|
||||
|
||||
- Initiates requests
|
||||
- Renders AI-generated content in real-time
|
||||
- Reconstructs streamed data
|
||||
|
||||
2. **Server (e.g. Next.js Backend)**
|
||||
|
||||
- Acts as an intermediary between client and AI providers
|
||||
- Handles request preparation, dispatching, and response processing
|
||||
- Streams responses back to the client
|
||||
|
||||
3. **Upstream AI Providers**
|
||||
|
||||
- Generate AI content based on requests
|
||||
|
||||
### ChatGenerate workflow:
|
||||
|
||||
1. Request Initialization: AIX Client prepares and sends request (systemInstruction, messages=AixWire_Parts[], etc.) to AIX Server
|
||||
2. Dispatch Preparation: AIX Server prepares for upstream communication
|
||||
3. AI Provider Interaction: AIX Server communicates with AI Provider (streaming or non-streaming)
|
||||
4. Data Decoding, Transformation and Transmission: AIX Server sends AixWire_Particles to AIX Client
|
||||
5. Client-side Processing: Client's ContentReassembler processes AixWire_Particles into a list (likely a single) of multi-fragment (DMessageContentFragment[]) messages
|
||||
6. Completion: AIX Server sends 'done' control message, AIX Client finalizes data update
|
||||
7. Error Handling: AIX Server sends specific error messages when necessary
|
||||
|
||||
## 2. Files and Folders
|
||||
|
||||
AIX is organized into the following files and folders:
|
||||
|
||||
1. Client-Side (`/client/`):
|
||||
|
||||
- `aix.client.ts`: Main client-side entry point for AIX operations.
|
||||
- `aix.client.chatGenerateRequest.ts`: Handles conversion of chat messages to AIX-compatible format (AixWire_Content, AixWire_Parts, etc.).
|
||||
|
||||
2. Server-Side (`/server/`):
|
||||
|
||||
- API (`/server/api/`) - Client to Server communication:
|
||||
- `aix.router.ts`: Defines the tRPC router for AIX operations.
|
||||
- `aix.wiretypes.ts`: Contains Zod schemas for types and calls incoming from the client (AixWire_Parts, AixWire_Content, AixWire_Tooling, AixWire_API, ...), and outgoing (AixWire_Particles)
|
||||
|
||||
- Dispatch (`/server/dispatch/`) - Server to AI Provider communication:
|
||||
- `/server/dispatch/chatGenerate/`: Content Generation with chat-style inputs:
|
||||
- `./adapters/`: Adapters for creating API requests for different AI protocols (Anthropic, Gemini, OpenAI).
|
||||
- `./parsers/`: Parsers for parsing streaming/non-streamin responses from different AI protocols (same 3).
|
||||
- `chatGenerate.dispatch.ts`: Creates a pipeline to execute Chat Generation to a specific provider.
|
||||
- `ChatGenerateTransmitter.ts`: Used to serialize and transmit AixWire_Particles to the client.
|
||||
- `/server/dispatch/wiretypes/`: AI provider Wire Types:
|
||||
- Type definitions for different AI providers/protocols (Anthropic, Gemini, OpenAI).
|
||||
- `stream.demuxers.ts`: Handles demuxing of different stream formats.
|
||||
|
||||
## 3. Architecture Diagram
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant AIX Client
|
||||
participant AIX Server
|
||||
participant PartTransmitter
|
||||
participant AI Provider
|
||||
AIX Client ->> AIX Client: Initialize ContentReassembler
|
||||
AIX Client ->> AIX Client: Convert DMessage*Part to AixWire_Parts
|
||||
AIX Client ->> AIX Server: Send messages (arrays of AixWire_Parts)
|
||||
AIX Server ->> AIX Server: Prepare Dispatch (Upstream request, demux, parsing)
|
||||
|
||||
alt Dispatch Preparation Error
|
||||
AIX Server ->> AIX Client: Send `dispatch-prepare` error message
|
||||
else Dispatch Fetch
|
||||
AIX Server ->> AI Provider: Send AI-provider specific stream/non-stream request
|
||||
AIX Server ->> AIX Client: Send 'start' control message
|
||||
AIX Server ->> PartTransmitter: Initialize part particle serialization
|
||||
|
||||
alt Streaming AI Provider
|
||||
loop Until stream end or error
|
||||
AI Provider ->> AIX Server: Stream response chunk
|
||||
AIX Server ->> AIX Server: Demux chunk into DispatchEvents
|
||||
loop For each AI-provider specific DispatchEvent
|
||||
AIX Server ->> AIX Server: Parse DispatchEvent
|
||||
AIX Server ->> PartTransmitter: (Parser) Calls serialization functions
|
||||
PartTransmitter ->> PartTransmitter: Generate and throttle AixWire_PartParticles
|
||||
PartTransmitter -->> AIX Server: Yield AixWire_PartParticle
|
||||
end
|
||||
AIX Server ->> AIX Client: Send accumulated AixWire_PartParticles
|
||||
end
|
||||
AIX Server ->> PartTransmitter: Request any remaining particles
|
||||
PartTransmitter -->> AIX Server: Yield any final AixWire_PartParticles
|
||||
AIX Server ->> AIX Client: Send final AixWire_PartParticles (if any)
|
||||
else Non-Streaming AI Provider
|
||||
AI Provider ->> AIX Server: Send AI-provider specific complete response
|
||||
alt AI-provider specific full-response parser
|
||||
AIX Server ->> AIX Server: Parse full response
|
||||
AIX Server ->> PartTransmitter: Call particle serialization functions
|
||||
PartTransmitter ->> PartTransmitter: Generate AixWire_PartParticle
|
||||
PartTransmitter -->> AIX Server: Yield ALL AixWire_PartParticle
|
||||
end
|
||||
AIX Server ->> AIX Client: Send all AixWire_PartParticles
|
||||
end
|
||||
AIX Server ->> AIX Client: Send 'done' control message
|
||||
loop For each received batch of particles
|
||||
AIX Client ->> AIX Client: ContentReassembler processes particles into DMessage*Part
|
||||
alt DMessageTextPart
|
||||
AIX Client ->> AIX Client: Update UI with text content
|
||||
else DMessageImageRefPart
|
||||
AIX Client ->> AIX Client: Load and display image
|
||||
else DMessageToolInvocationPart
|
||||
AIX Client ->> AIX Client: Process tool invocation (dev only)
|
||||
else DMessageToolResponsePart
|
||||
AIX Client ->> AIX Client: Process tool response (dev only)
|
||||
else DMessageErrorPart
|
||||
AIX Client ->> AIX Client: Display error message
|
||||
else DMessageDocPart
|
||||
AIX Client ->> AIX Client: Process and display document
|
||||
else DMetaPlaceholderPart
|
||||
AIX Client ->> AIX Client: Handle placeholder (non-submitted)
|
||||
end
|
||||
end
|
||||
AIX Client ->> AIX Client: Finalize data update
|
||||
end
|
||||
|
||||
alt Error Handling
|
||||
AIX Server ->> AIX Client: Send 'error' specific control messages
|
||||
end
|
||||
|
||||
note over AIX Server, AI Provider: Server-side Timeout/Retry mechanism
|
||||
loop Retry on timeout (server-side)
|
||||
AIX Server ->> AI Provider: Retry request
|
||||
end
|
||||
|
||||
note over AIX Client: Client-side Timeout mechanism
|
||||
AIX Client ->> AIX Client: Timeout if no response received within set time
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2025-03-14 Update
|
||||
AIX is used in production in Big-AGI and is stable and performant.
|
||||
The code is tightly coupled with the tRPC framework and the rest of our codebase,
|
||||
so it is not recommended to use it outside of our ecosystem.
|
||||
|
||||
For a great Typescript alternative we recommend the Vercel AI SDK.
|
||||
@@ -0,0 +1,131 @@
|
||||
# LLM Parameters System
|
||||
|
||||
This document describes how parameters flow through Big-AGI's LLM parameters system, from definition to API invocation.
|
||||
|
||||
## System Overview
|
||||
|
||||
The LLM parameters system operates across five layers that transform parameters from global definitions to vendor-specific API calls. Each layer serves a specific purpose in the parameter resolution pipeline.
|
||||
|
||||
## Parameter Flow Architecture
|
||||
|
||||
### Layer 1: Parameter Registry
|
||||
**File**: `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
The `DModelParameterRegistry` defines all available parameters with their constraints and metadata. Each parameter includes type information, validation rules, and default behavior.
|
||||
|
||||
**Example**: `llmVndOaiReasoningEffort4` defines a 4-value enum with 'medium' as the required fallback.
|
||||
|
||||
**Default Value System**: The registry supports multiple default mechanisms:
|
||||
- `initialValue` - Parameter's base default (e.g., `llmVndOaiRestoreMarkdown: true`)
|
||||
- `requiredFallback` - Fallback for required parameters (e.g., `llmTemperature: 0.5`)
|
||||
- `nullable` - Parameters that can be explicitly null to skip API transmission
|
||||
|
||||
### Layer 2: Model Specifications
|
||||
**File**: `src/modules/llms/server/llm.server.types.ts`
|
||||
|
||||
Models declare which parameters they support through `parameterSpecs` arrays. Each spec can override registry defaults:
|
||||
|
||||
```typescript
|
||||
parameterSpecs: [
|
||||
{ paramId: 'llmVndOaiReasoningEffort4' },
|
||||
{ paramId: 'llmVndAntThinkingBudget', initialValue: 1024 }, // Override default
|
||||
{ paramId: 'llmVndGeminiThinkingBudget', rangeOverride: [0, 8192] }, // Custom range
|
||||
]
|
||||
```
|
||||
|
||||
**Parameter Visibility**: The `hidden` flag removes parameters from the UI while keeping them functional. Models can also mark parameters as `required`.
|
||||
|
||||
### Layer 3: Client Configuration
|
||||
|
||||
The system provides two UI configurators with different scopes:
|
||||
|
||||
#### Full Model Configuration Dialog
|
||||
**File**: `src/modules/llms/models-modal/LLMParametersEditor.tsx`
|
||||
Shows all non-hidden parameters from model's `parameterSpecs`. Used in the models modal for complete configuration.
|
||||
|
||||
#### ChatPanel Quick Controls
|
||||
**File**: `src/apps/chat/components/layout-panel/ChatPanelModelParameters.tsx`
|
||||
Shows only parameters that are:
|
||||
- In model's `parameterSpecs`
|
||||
- Listed in `_interestingParameters` array
|
||||
- Not marked as `hidden`
|
||||
|
||||
**Value Resolution**: Both UIs use `getAllModelParameterValues()` to merge:
|
||||
1. **Fallback values** - Required parameters get their `requiredFallback` values
|
||||
2. **Initial values** - Model's `initialParameters` (populated during model creation)
|
||||
3. **User values** - User's `userParameters` (highest priority)
|
||||
|
||||
### Layer 4: AIX Translation
|
||||
**File**: `src/modules/aix/client/aix.client.ts`
|
||||
|
||||
The AIX client transforms DLLM parameters to wire protocol format. This layer handles parameter precedence rules and name transformations:
|
||||
|
||||
```
|
||||
// Parameter precedence: newer 4-value version takes priority over 3-value
|
||||
...((llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort) ?
|
||||
{ vndOaiReasoningEffort: llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort } : {})
|
||||
```
|
||||
|
||||
**Client Options**: The system supports parameter overrides through `llmOptionsOverride` and complete replacement via `llmUserParametersReplacement`.
|
||||
|
||||
### Layer 5: Vendor Adaptation
|
||||
**Files**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
|
||||
Server-side adapters translate AIX parameters to vendor APIs. Each vendor may interpret parameters differently:
|
||||
|
||||
- **OpenAI**: `vndOaiReasoningEffort` → `reasoning_effort`
|
||||
- **Perplexity**: Reuses OpenAI parameter format
|
||||
- **OpenAI Responses API**: Maps to structured reasoning config with additional logic
|
||||
|
||||
## Parameter Initialization Process
|
||||
|
||||
When a model is loaded:
|
||||
|
||||
1. **Model Creation**: `modelDescriptionToDLLM()` creates the DLLM with empty `initialParameters`
|
||||
2. **Initial Value Application**: `applyModelParameterInitialValues()` populates initial values from:
|
||||
- Model spec `initialValue` (highest priority)
|
||||
- Registry `initialValue` (fallback)
|
||||
3. **Runtime Resolution**: `getAllModelParameterValues()` creates final parameter set:
|
||||
- Required fallbacks (for missing required parameters)
|
||||
- Initial parameters (model defaults)
|
||||
- User parameters (user overrides)
|
||||
|
||||
## Special Parameter Behaviors
|
||||
|
||||
**Hidden Parameters**: Parameters like `llmRef` are marked `hidden: true` in the registry and never appear in the UI, but remain functional for system use.
|
||||
|
||||
**Nullable Parameters**: Parameters with `nullable` configuration can be explicitly set to `null` to prevent transmission to the API, distinct from being undefined.
|
||||
|
||||
**Range Overrides**: Models can override parameter ranges (e.g., different Gemini models support different thinking budget ranges).
|
||||
|
||||
**Parameter Interactions**: The UI implements business logic like disabling web search when reasoning effort is 'minimal'.
|
||||
|
||||
## Type Safety Mechanisms
|
||||
|
||||
The system maintains type safety through:
|
||||
- `DModelParameterId` union from registry keys
|
||||
- `DModelParameterValue<T>` conditional types for values
|
||||
- `DModelParameterSpecAny` interfaces for specifications
|
||||
- Runtime validation via Zod schemas at API boundaries
|
||||
|
||||
## Model Variant Pattern
|
||||
|
||||
Some vendors use model variants to enable features, for instance:
|
||||
- **Anthropic**: Creates separate `idVariant: 'thinking'` entries forcing value of hidden parameters
|
||||
- **Google/OpenAI**: Parameters directly on base models
|
||||
|
||||
## Migration and Compatibility
|
||||
|
||||
The architecture supports parameter evolution:
|
||||
- **Version Coexistence**: Both `llmVndOaiReasoningEffort` and `llmVndOaiReasoningEffort4` exist simultaneously
|
||||
- **Precedence Rules**: Newer parameters take priority during AIX translation
|
||||
- **Graceful Degradation**: Unknown parameters log warnings but don't break functionality
|
||||
|
||||
## Key Implementation Files
|
||||
|
||||
- **Registry**: `src/common/stores/llms/llms.parameters.ts`
|
||||
- **Specifications**: `src/modules/llms/server/llm.server.types.ts`
|
||||
- **UI Controls**: `src/modules/llms/models-modal/LLMParametersEditor.tsx`
|
||||
- **AIX Translation**: `src/modules/aix/client/aix.client.ts`
|
||||
- **Wire Types**: `src/modules/aix/server/api/aix.wiretypes.ts`
|
||||
- **Vendor Adapters**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
@@ -0,0 +1,151 @@
|
||||
# Big-AGI Routing & Display States
|
||||
|
||||
This document describes the routing architecture and display state hierarchy in Big-AGI, from top-level providers down to component-level states.
|
||||
|
||||
## Overview
|
||||
|
||||
Big-AGI uses Next.js Pages Router with a provider stack that determines what users see based on application state and configuration.
|
||||
|
||||
## Quick Reference: Route Configurations
|
||||
|
||||
| Route | Purpose | Key Features |
|
||||
|-------|---------|--------------|
|
||||
| `/` | Main chat app | Default application |
|
||||
| `/call` | Voice interface | Voice-to-voice AI conversations |
|
||||
| `/personas` | Persona management | Create and manage AI personas |
|
||||
| ... | | |
|
||||
|
||||
## Decision Flow Diagram
|
||||
|
||||
The routing decisions follow a hierarchy from system-level provider configuration down to component-level states.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
Start([Navigate to Route]) --> Root[_app.tsx]
|
||||
|
||||
Root --> Theme[ProviderTheming]
|
||||
Theme --> Error[ErrorBoundary]
|
||||
Error --> Bootstrap[ProviderBootstrapLogic]
|
||||
|
||||
Bootstrap --> BootCheck{Bootstrap Checks}
|
||||
BootCheck -->|News| News[↗️ /news]
|
||||
BootCheck -->|Continue| Router{Router}
|
||||
|
||||
Router -->|/| Chat[Chat App]
|
||||
Router -->|/personas,/call,/beam...| OtherApps[Other Apps]
|
||||
Router -->|/news| NewsApp[News App]
|
||||
|
||||
Chat --> ChatStates{Chat States}
|
||||
|
||||
ChatStates -->|No Models| ZeroModels[🟡 Setup Models]
|
||||
ChatStates -->|No Conv| ZeroConv[🟡 Select Chat]
|
||||
ChatStates -->|No Msgs| PersonaGrid[Choose Persona]
|
||||
ChatStates -->|Ready| Active[🟢 Active Chat]
|
||||
|
||||
Active --> Features[Features:<br/>• Chat Bar<br/>• Beam Mode<br/>• Attachments]
|
||||
|
||||
style ZeroModels fill:#fff4cc
|
||||
style ZeroConv fill:#fff4cc
|
||||
style Active fill:#ccffcc
|
||||
style Chat fill:#f0f8ff
|
||||
style OtherApps fill:#f0f8ff
|
||||
style NewsApp fill:#f0f8ff
|
||||
```
|
||||
|
||||
## Display State Hierarchy
|
||||
|
||||
```
|
||||
_app.tsx (Root)
|
||||
├── ProviderTheming ← Always Applied
|
||||
├── ErrorBoundary ← Always Applied
|
||||
├── ProviderBootstrapLogic ← Always Applied
|
||||
│ ├── Tiktoken preload & Model auto-config
|
||||
│ ├── Storage maintenance & cleanup
|
||||
│ └── News Redirect (if conditions met)
|
||||
│
|
||||
└── Page Component
|
||||
├── AppChat (/) → Default app
|
||||
│ ├── CMLZeroModels → If no models configured
|
||||
│ ├── CMLZeroConversation → If no conversation selected
|
||||
│ └── PersonaGrid → If conversation empty
|
||||
│
|
||||
└── Other Apps → Personas, Call, Draw, News, Beam
|
||||
```
|
||||
|
||||
## Provider Stack
|
||||
|
||||
| Provider | Purpose | Key Functions |
|
||||
|----------|---------|---------------|
|
||||
| **ProviderTheming** | UI theme management | Theme switching, CSS variables |
|
||||
| **ErrorBoundary** | Error handling | Catches and displays errors gracefully |
|
||||
| **ProviderBootstrapLogic** | App initialization | • Tiktoken preload<br>• Model auto-config<br>• Storage cleanup<br>• News redirect logic |
|
||||
|
||||
For detailed initialization sequence and provider functions, see [app-startup-sequence.md](app-startup-sequence.md), if present.
|
||||
|
||||
## Application Routes
|
||||
|
||||
### Primary Apps
|
||||
- `/` → AppChat (default)
|
||||
- `/call` → Voice call interface
|
||||
- `/beam` → Multi-model reasoning
|
||||
- `/draw` → Image generation
|
||||
- `/personas` → Personas app
|
||||
- `/news` → News/updates
|
||||
|
||||
### Zero States
|
||||
|
||||
#### Chat App Zero States
|
||||
|
||||
**CMLZeroModels**
|
||||
- **Location**: `/src/apps/chat/components/messages-list/CMLZeroModels.tsx`
|
||||
- **Triggered**: No LLM sources configured
|
||||
- **Shows**: Welcome screen with "Setup Models" button
|
||||
|
||||
**CMLZeroConversation**
|
||||
- **Location**: `/src/apps/chat/components/messages-list/CMLZeroConversation.tsx`
|
||||
- **Triggered**: No conversation selected
|
||||
- **Shows**: "Select/create conversation" prompt
|
||||
|
||||
**PersonaGrid**
|
||||
- **App**: Chat (when conversation is empty)
|
||||
- **Triggered**: Conversation exists but has no messages
|
||||
- **Shows**: Persona selector interface
|
||||
|
||||
#### Feature-Specific Zero States
|
||||
|
||||
**Beam Tutorial**
|
||||
- **Feature**: Beam (multi-model reasoning)
|
||||
- **Component**: `ExplainerCarousel`
|
||||
- **Triggered**: First-time Beam usage
|
||||
- **Shows**: Interactive feature walkthrough
|
||||
|
||||
## Common Scenarios
|
||||
|
||||
### New User First Visit
|
||||
1. Navigates to `/` → Provider stack loads
|
||||
2. Bootstrap runs → No news redirect (first visit)
|
||||
3. Chat loads → **CMLZeroModels** (no models configured)
|
||||
4. User clicks "Setup Models" → Configuration flow
|
||||
|
||||
### Returning User with Saved State
|
||||
1. Navigates to `/` → Provider stack loads
|
||||
2. IndexedDB restores state → Previous conversation loaded
|
||||
3. Chat loads → **Active chat interface** (bypasses all zero states)
|
||||
4. All messages and context preserved from last session
|
||||
|
||||
### Shared Chat Viewer
|
||||
1. Navigates to `/link/chat/[id]` → Full provider stack
|
||||
2. Views read-only chat → May see "Import" option
|
||||
3. If importing → Checks for duplicates, creates new local conversation
|
||||
|
||||
## Storage System
|
||||
|
||||
Big-AGI uses a local-first architecture:
|
||||
- **Zustand** for reactive state management
|
||||
- **IndexedDB** for persistent storage via Zustand persist middleware
|
||||
- **Version-based migrations** for data structure upgrades
|
||||
|
||||
Key stores:
|
||||
- `app-chats`: Conversations and messages (IndexedDB)
|
||||
- `app-llms`: Model configurations (IndexedDB)
|
||||
- `app-ui`: UI preferences (localStorage)
|
||||
@@ -0,0 +1,13 @@
|
||||
# CSF - Client-Side Fetch
|
||||
|
||||
Client-Side Fetch (CSF) enables direct browser-to-API communication, bypassing the server for LLM requests. When enabled, the browser makes requests directly to vendor APIs (e.g., `api.openai.com`, `api.groq.com`) instead of routing through the Next.js server. This reduces latency, decreases server load, and is particularly useful for local models where the browser can communicate directly with Ollama or LM Studio.
|
||||
|
||||
## Implementation
|
||||
|
||||
CSF is implemented as an opt-in setting stored as `csf: boolean` in each vendor's service settings. The vendor interface exposes `csfAvailable?: (setup) => boolean` to determine if CSF can be enabled (typically checking if an API key or host is configured). The actual execution happens in `aix.client.direct-chatGenerate.ts` which dynamically imports when CSF is active, making direct fetch calls using the same wire protocols as the server.
|
||||
|
||||
All 17 supported vendors (OpenAI, Anthropic, Gemini, Ollama, LocalAI, Deepseek, Groq, Mistral, xAI, OpenRouter, Perplexity, Together AI, Alibaba, Moonshot, OpenPipe, LM Studio, Z.ai) support CSF. Cloud vendors require CORS support from the API provider (all tested vendors return `access-control-allow-origin: *`). Local vendors (Ollama, LocalAI, LM Studio) require CORS to be enabled on the local server.
|
||||
|
||||
## UI
|
||||
|
||||
The CSF toggle appears in each vendor's setup panel under "Advanced" settings, labeled "Direct Connection". It becomes visible when the prerequisites are met (API key present for cloud vendors, host configured for local vendors). The setting is managed through `useModelServiceClientSideFetch` hook which provides `csfAvailable`, `csfActive`, `csfToggle`, and `csfReset` for UI consumption.
|
||||
@@ -1,85 +0,0 @@
|
||||
import { readFile } from 'node:fs/promises';
|
||||
|
||||
// Build information
|
||||
process.env.NEXT_PUBLIC_BUILD_HASH = 'big-agi-2-dev';
|
||||
process.env.NEXT_PUBLIC_BUILD_PKGVER = JSON.parse('' + await readFile(new URL('./package.json', import.meta.url))).version;
|
||||
process.env.NEXT_PUBLIC_BUILD_TIMESTAMP = new Date().toISOString();
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH})`);
|
||||
|
||||
// Non-default build types
|
||||
const buildType =
|
||||
process.env.BIG_AGI_BUILD === 'standalone' ? 'standalone'
|
||||
: process.env.BIG_AGI_BUILD === 'static' ? 'export'
|
||||
: undefined;
|
||||
|
||||
buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig = {
|
||||
reactStrictMode: true,
|
||||
|
||||
// [exports] https://nextjs.org/docs/advanced-features/static-html-export
|
||||
...buildType && {
|
||||
output: buildType,
|
||||
distDir: 'dist',
|
||||
|
||||
// disable image optimization for exports
|
||||
images: { unoptimized: true },
|
||||
|
||||
// Optional: Change links `/me` -> `/me/` and emit `/me.html` -> `/me/index.html`
|
||||
// trailingSlash: true,
|
||||
},
|
||||
|
||||
// [puppeteer] https://github.com/puppeteer/puppeteer/issues/11052
|
||||
// NOTE: we may not be needing this anymore, as we use '@cloudflare/puppeteer'
|
||||
serverExternalPackages: ['puppeteer-core'],
|
||||
|
||||
webpack: (config, { isServer }) => {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
// @dqbd/tiktoken: enable asynchronous WebAssembly
|
||||
config.experiments = {
|
||||
asyncWebAssembly: true,
|
||||
layers: true,
|
||||
};
|
||||
|
||||
// fix warnings for async functions in the browser (https://github.com/vercel/next.js/issues/64792)
|
||||
if (!isServer) {
|
||||
config.output.environment = { ...config.output.environment, asyncFunction: true };
|
||||
}
|
||||
|
||||
// prevent too many small chunks (40kb min) on 'client' packs (not 'server' or 'edge-server')
|
||||
// noinspection JSUnresolvedReference
|
||||
if (typeof config.optimization.splitChunks === 'object' && config.optimization.splitChunks.minSize) {
|
||||
// noinspection JSUnresolvedReference
|
||||
config.optimization.splitChunks.minSize = 40 * 1024;
|
||||
}
|
||||
|
||||
return config;
|
||||
},
|
||||
|
||||
// Note: disabled to check whether the project becomes slower with this
|
||||
// modularizeImports: {
|
||||
// '@mui/icons-material': {
|
||||
// transform: '@mui/icons-material/{{member}}',
|
||||
// },
|
||||
// },
|
||||
|
||||
// Uncomment the following leave console messages in production
|
||||
// compiler: {
|
||||
// removeConsole: false,
|
||||
// },
|
||||
};
|
||||
|
||||
// Validate environment variables, if set at build time. Will be actually read and used at runtime.
|
||||
// This is the reason both this file and the servr/env.mjs files have this extension.
|
||||
await import('./src/server/env.mjs');
|
||||
|
||||
// conditionally enable the nextjs bundle analyzer
|
||||
if (process.env.ANALYZE_BUNDLE) {
|
||||
const { default: withBundleAnalyzer } = await import('@next/bundle-analyzer');
|
||||
nextConfig = withBundleAnalyzer({ openAnalyzer: true })(nextConfig);
|
||||
}
|
||||
|
||||
export default nextConfig;
|
||||
+160
@@ -0,0 +1,160 @@
|
||||
import type { NextConfig } from 'next';
|
||||
import type { WebpackConfigContext } from 'next/dist/server/config-shared';
|
||||
import { execSync } from 'node:child_process';
|
||||
import { readFileSync } from 'node:fs';
|
||||
|
||||
// Build information: from CI, or git commit hash
|
||||
let buildHash = process.env.NEXT_PUBLIC_BUILD_HASH || process.env.GITHUB_SHA || process.env.VERCEL_GIT_COMMIT_SHA; // Docker or custom, GitHub Actions, Vercel
|
||||
try {
|
||||
// fallback to local git commit hash
|
||||
if (!buildHash)
|
||||
buildHash = execSync('git rev-parse --short HEAD').toString().trim();
|
||||
} catch {
|
||||
// final fallback
|
||||
buildHash = '2-dev';
|
||||
}
|
||||
// The following are used by/available to Release.buildInfo(...)
|
||||
process.env.NEXT_PUBLIC_BUILD_HASH = (buildHash || '').slice(0, 10);
|
||||
process.env.NEXT_PUBLIC_BUILD_PKGVER = JSON.parse('' + readFileSync(new URL('./package.json', import.meta.url))).version;
|
||||
process.env.NEXT_PUBLIC_BUILD_TIMESTAMP = new Date().toISOString();
|
||||
process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE = process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE || (process.env.VERCEL_ENV ? `vercel-${process.env.VERCEL_ENV}` : 'local'); // Docker or custom, Vercel
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH})`);
|
||||
|
||||
// Non-default build types
|
||||
const buildType =
|
||||
process.env.BIG_AGI_BUILD === 'standalone' ? 'standalone' as const
|
||||
: process.env.BIG_AGI_BUILD === 'static' ? 'export' as const
|
||||
: undefined;
|
||||
|
||||
buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig: NextConfig = {
|
||||
reactStrictMode: !process.env.NO_STRICT_MODE, // default: enabled
|
||||
|
||||
// [exports] https://nextjs.org/docs/advanced-features/static-html-export
|
||||
...(buildType && {
|
||||
output: buildType,
|
||||
distDir: 'dist',
|
||||
|
||||
// disable image optimization for exports
|
||||
images: { unoptimized: true },
|
||||
|
||||
// Optional: Change links `/me` -> `/me/` and emit `/me.html` -> `/me/index.html`
|
||||
// trailingSlash: true,
|
||||
}),
|
||||
|
||||
// [puppeteer] https://github.com/puppeteer/puppeteer/issues/11052
|
||||
// NOTE: we may not be needing this anymore, as we use '@cloudflare/puppeteer'
|
||||
serverExternalPackages: ['puppeteer-core'],
|
||||
|
||||
webpack: (config: any, { isServer, webpack /*, dev, nextRuntime*/ }: WebpackConfigContext) => {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
// @dqbd/tiktoken: enable asynchronous WebAssembly
|
||||
config.experiments = {
|
||||
asyncWebAssembly: true,
|
||||
layers: true,
|
||||
};
|
||||
|
||||
// client-side bundling
|
||||
if (!isServer) {
|
||||
/**
|
||||
* AIX client-side
|
||||
* We replace certain server-only modules with client-side mocks, to reuse the exact same imports
|
||||
* while avoiding importing server-only code which would break the build or break at runtime.
|
||||
*/
|
||||
const serverToClientMocks: ReadonlyArray<[RegExp, string]> = [
|
||||
[/\/posthog\.server/, '/posthog.client-mock'],
|
||||
[/\/env\.server/, '/env.client-mock'],
|
||||
];
|
||||
config.plugins = [
|
||||
...config.plugins,
|
||||
...serverToClientMocks.map(([pattern, replacement]) =>
|
||||
new webpack.NormalModuleReplacementPlugin(pattern, (resource: any) => {
|
||||
// console.log(' 🧠 [WEBPACK REPLACEMENT]:', resource.request, '->', resource.request.replace(pattern, replacement));
|
||||
resource.request = resource.request.replace(pattern, replacement);
|
||||
}),
|
||||
),
|
||||
];
|
||||
|
||||
// cosmetic: fix warnings for (absent!) top-level awaits in the browser (https://github.com/vercel/next.js/issues/64792)
|
||||
config.output.environment = { ...config.output.environment, asyncFunction: true };
|
||||
}
|
||||
|
||||
// prevent too many small chunks (40kb min) on 'client' packs (not 'server' or 'edge-server')
|
||||
// noinspection JSUnresolvedReference
|
||||
if (typeof config.optimization.splitChunks === 'object' && config.optimization.splitChunks.minSize) {
|
||||
// noinspection JSUnresolvedReference
|
||||
config.optimization.splitChunks.minSize = 40 * 1024;
|
||||
}
|
||||
|
||||
return config;
|
||||
},
|
||||
|
||||
// Optional Analytics > PostHog
|
||||
skipTrailingSlashRedirect: true, // required to support PostHog trailing slash API requests
|
||||
async rewrites() {
|
||||
return [
|
||||
{
|
||||
source: '/a/ph/static/:path*',
|
||||
destination: 'https://us-assets.i.posthog.com/static/:path*',
|
||||
},
|
||||
{
|
||||
source: '/a/ph/:path*',
|
||||
destination: 'https://us.i.posthog.com/:path*',
|
||||
},
|
||||
{
|
||||
source: '/a/ph/decide',
|
||||
destination: 'https://us.i.posthog.com/decide',
|
||||
},
|
||||
{
|
||||
source: '/a/ph/flags',
|
||||
destination: 'https://us.i.posthog.com/flags',
|
||||
},
|
||||
];
|
||||
},
|
||||
|
||||
// Note: disabled to check whether the project becomes slower with this
|
||||
// modularizeImports: {
|
||||
// '@mui/icons-material': {
|
||||
// transform: '@mui/icons-material/{{member}}',
|
||||
// },
|
||||
// },
|
||||
|
||||
// Uncomment the following leave console messages in production
|
||||
// compiler: {
|
||||
// removeConsole: false,
|
||||
// },
|
||||
};
|
||||
|
||||
// Validate environment variables at build time, if required. Server env vars will be actually read and used at runtime (cloud/edge).
|
||||
import { env as validateEnv } from '~/server/env.server';
|
||||
void validateEnv; // Triggers env validation - throws if required vars are missing
|
||||
|
||||
// PostHog error reporting with source maps for production builds
|
||||
import { withPostHogConfig } from '@posthog/nextjs-config';
|
||||
if (process.env.POSTHOG_API_KEY && process.env.POSTHOG_ENV_ID) {
|
||||
console.log(' 🧠 \x1b[1mbig-AGI\x1b[0m: building with PostHog issue reporting and source maps...');
|
||||
nextConfig = withPostHogConfig(nextConfig, {
|
||||
personalApiKey: process.env.POSTHOG_API_KEY,
|
||||
envId: process.env.POSTHOG_ENV_ID,
|
||||
host: 'https://us.i.posthog.com', // backtrace upload host
|
||||
logLevel: 'error', // lowered, too noisy
|
||||
sourcemaps: {
|
||||
enabled: process.env.NODE_ENV === 'production',
|
||||
project: 'big-agi',
|
||||
version: process.env.NEXT_PUBLIC_BUILD_HASH,
|
||||
deleteAfterUpload: false, // false: leave them in the tree, which would also help debugging of open-source installs
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// conditionally enable the nextjs bundle analyzer
|
||||
import withBundleAnalyzer from '@next/bundle-analyzer';
|
||||
if (process.env.ANALYZE_BUNDLE) {
|
||||
nextConfig = withBundleAnalyzer({ openAnalyzer: true })(nextConfig) as NextConfig;
|
||||
}
|
||||
|
||||
export default nextConfig;
|
||||
Generated
+4507
-1562
File diff suppressed because it is too large
Load Diff
+58
-66
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "1.91.0",
|
||||
"version": "2.0.3",
|
||||
"private": true,
|
||||
"author": "Enrico Ros <enrico.ros@gmail.com>",
|
||||
"repository": "https://github.com/enricoros/big-agi",
|
||||
@@ -12,9 +12,11 @@
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"postinstall": "prisma generate --no-hints",
|
||||
"gen:icon-sprites": "node tools/develop/gen-icon-sprites/generate-llm-sprites.ts",
|
||||
"db:push": "prisma db push",
|
||||
"db:studio": "prisma studio",
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local"
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local",
|
||||
"sharp:win32_x64": "npm install --os=win32 --cpu=x64 sharp"
|
||||
},
|
||||
"prisma": {
|
||||
"schema": "src/server/prisma/schema.prisma"
|
||||
@@ -27,84 +29,74 @@
|
||||
"@emotion/cache": "^11.14.0",
|
||||
"@emotion/react": "^11.14.0",
|
||||
"@emotion/server": "^11.11.0",
|
||||
"@emotion/styled": "^11.14.0",
|
||||
"@mui/icons-material": "^5.16.14",
|
||||
"@mui/joy": "^5.0.0-beta.51",
|
||||
"@mui/material": "^5.16.14",
|
||||
"@next/bundle-analyzer": "^15.1.4",
|
||||
"@next/third-parties": "^15.1.4",
|
||||
"@emotion/styled": "^11.14.1",
|
||||
"@googleworkspace/drive-picker-react": "^0.2.0",
|
||||
"@mui/icons-material": "^5.18.0",
|
||||
"@mui/joy": "^5.0.0-beta.52",
|
||||
"@next/bundle-analyzer": "~15.1.12",
|
||||
"@prisma/client": "~5.22.0",
|
||||
"@t3-oss/env-nextjs": "^0.11.1",
|
||||
"@tanstack/react-query": "^5.63.0",
|
||||
"@trpc/client": "11.0.0-rc.688",
|
||||
"@trpc/next": "11.0.0-rc.688",
|
||||
"@trpc/react-query": "11.0.0-rc.688",
|
||||
"@trpc/server": "11.0.0-rc.688",
|
||||
"@vercel/analytics": "^1.4.1",
|
||||
"@vercel/speed-insights": "^1.1.0",
|
||||
"browser-fs-access": "^0.35.0",
|
||||
"cheerio": "^1.0.0",
|
||||
"dexie": "^4.0.10",
|
||||
"dexie-react-hooks": "^1.1.7",
|
||||
"diff": "^7.0.0",
|
||||
"eventsource-parser": "^3.0.0",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"mammoth": "^1.9.0",
|
||||
"nanoid": "^5.0.9",
|
||||
"next": "^15.1.4",
|
||||
"@tanstack/react-query": "5.90.10",
|
||||
"@tanstack/react-virtual": "^3.13.18",
|
||||
"@trpc/client": "11.5.1",
|
||||
"@trpc/next": "11.5.1",
|
||||
"@trpc/react-query": "11.5.1",
|
||||
"@trpc/server": "11.5.1",
|
||||
"@vercel/analytics": "^1.6.1",
|
||||
"@vercel/speed-insights": "^1.3.1",
|
||||
"browser-fs-access": "^0.38.0",
|
||||
"cheerio": "^1.1.2",
|
||||
"csv-stringify": "^6.6.0",
|
||||
"dexie": "~4.0.11",
|
||||
"dexie-react-hooks": "~1.1.7",
|
||||
"diff": "^8.0.3",
|
||||
"eventemitter3": "^5.0.4",
|
||||
"idb-keyval": "^6.2.2",
|
||||
"mammoth": "^1.11.0",
|
||||
"nanoid": "^5.1.6",
|
||||
"next": "~15.1.12",
|
||||
"nprogress": "^0.2.0",
|
||||
"pdfjs-dist": "4.10.38",
|
||||
"plantuml-encoder": "^1.4.0",
|
||||
"prismjs": "^1.29.0",
|
||||
"pdfjs-dist": "5.4.54",
|
||||
"posthog-js": "^1.341.0",
|
||||
"posthog-node": "^5.24.10",
|
||||
"prismjs": "^1.30.0",
|
||||
"puppeteer-core": "^24.36.1",
|
||||
"react": "^18.3.1",
|
||||
"react-csv": "^2.2.2",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-hook-form": "^7.54.2",
|
||||
"react-katex": "^3.0.1",
|
||||
"react-markdown": "^9.0.3",
|
||||
"react-player": "^2.16.0",
|
||||
"react-resizable-panels": "^2.1.7",
|
||||
"react-timeago": "^7.2.0",
|
||||
"react-hook-form": "^7.71.1",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-player": "^3.4.0",
|
||||
"react-resizable-panels": "^3.0.6",
|
||||
"react-timeago": "^8.3.0",
|
||||
"rehype-katex": "^7.0.1",
|
||||
"remark-gfm": "^4.0.0",
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-mark-highlight": "^0.1.1",
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.33.5",
|
||||
"superjson": "^2.2.2",
|
||||
"tesseract.js": "^6.0.0",
|
||||
"tiktoken": "^1.0.18",
|
||||
"turndown": "^7.2.0",
|
||||
"zod": "^3.24.1",
|
||||
"zod-to-json-schema": "^3.24.1",
|
||||
"zustand": "^5.0.3"
|
||||
"sharp": "^0.34.5",
|
||||
"superjson": "^2.2.6",
|
||||
"tesseract.js": "^7.0.0",
|
||||
"tiktoken": "^1.0.22",
|
||||
"turndown": "^7.2.2",
|
||||
"zod": "^4.3.6",
|
||||
"zustand": "5.0.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^7.0.0",
|
||||
"@types/node": "^22.10.5",
|
||||
"@posthog/nextjs-config": "~1.6.4",
|
||||
"@types/node": "^25.2.0",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/plantuml-encoder": "^1.4.2",
|
||||
"@types/prismjs": "^1.26.5",
|
||||
"@types/react": "^18.3.18",
|
||||
"@types/react-beautiful-dnd": "^13.1.8",
|
||||
"@types/react": "^19.2.11",
|
||||
"@types/react-csv": "^1.1.10",
|
||||
"@types/react-dom": "^18.3.5",
|
||||
"@types/react-katex": "^3.0.4",
|
||||
"@types/react-timeago": "^4.1.7",
|
||||
"@types/turndown": "^5.0.5",
|
||||
"cross-env": "^7.0.3",
|
||||
"eslint": "^9.17.0",
|
||||
"eslint-config-next": "^15.1.4",
|
||||
"prettier": "^3.4.2",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@types/turndown": "^5.0.6",
|
||||
"cross-env": "^10.1.0",
|
||||
"eslint": "^9.39.2",
|
||||
"eslint-config-next": "~15.1.12",
|
||||
"prettier": "^3.8.1",
|
||||
"prisma": "~5.22.0",
|
||||
"puppeteer-core": "^23.11.1",
|
||||
"typescript": "^5.7.3"
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^22.0.0 || ^20.0.0"
|
||||
},
|
||||
"overrides": {
|
||||
"@types/react": "^18.3.18",
|
||||
"@types/react-dom": "^18.3.5",
|
||||
"uri-js": "npm:uri-js-replace"
|
||||
"node": "^24.0.0 || ^22.0.0 || ^20.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
+19
-9
@@ -1,12 +1,17 @@
|
||||
import * as React from 'react';
|
||||
import Head from 'next/head';
|
||||
import dynamic from 'next/dynamic';
|
||||
import { MyAppProps } from 'next/app';
|
||||
import { Analytics as VercelAnalytics } from '@vercel/analytics/next';
|
||||
import { SpeedInsights as VercelSpeedInsights } from '@vercel/speed-insights/next';
|
||||
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
|
||||
|
||||
// [server-client-safe] dynamic imports to avoid webpack bundling issues with next/navigation
|
||||
const VercelAnalytics = dynamic(() => import('@vercel/analytics/next').then(mod => mod.Analytics), { ssr: false });
|
||||
const VercelSpeedInsights = dynamic(() => import('@vercel/speed-insights/next').then(mod => mod.SpeedInsights), { ssr: false });
|
||||
|
||||
|
||||
import 'katex/dist/katex.min.css';
|
||||
import '~/common/styles/CodePrism.css';
|
||||
import '~/common/styles/GithubMarkdown.css';
|
||||
@@ -14,6 +19,7 @@ import '~/common/styles/NProgress.css';
|
||||
import '~/common/styles/agi.effects.css';
|
||||
import '~/common/styles/app.styles.css';
|
||||
|
||||
import { ErrorBoundary } from '~/common/components/ErrorBoundary';
|
||||
import { Is } from '~/common/util/pwaUtils';
|
||||
import { OverlaysInsert } from '~/common/layout/overlays/OverlaysInsert';
|
||||
import { ProviderBackendCapabilities } from '~/common/providers/ProviderBackendCapabilities';
|
||||
@@ -21,7 +27,8 @@ import { ProviderBootstrapLogic } from '~/common/providers/ProviderBootstrapLogi
|
||||
import { ProviderSingleTab } from '~/common/providers/ProviderSingleTab';
|
||||
import { ProviderTheming } from '~/common/providers/ProviderTheming';
|
||||
import { SnackbarInsert } from '~/common/components/snackbar/SnackbarInsert';
|
||||
import { hasGoogleAnalytics, OptionalGoogleAnalytics } from '~/common/components/GoogleAnalytics';
|
||||
import { hasGoogleAnalytics, OptionalGoogleAnalytics } from '~/common/components/3rdparty/GoogleAnalytics';
|
||||
import { hasPostHogAnalytics, OptionalPostHogAnalytics } from '~/common/components/3rdparty/PostHogAnalytics';
|
||||
|
||||
|
||||
const Big_AGI_App = ({ Component, emotionCache, pageProps }: MyAppProps) => {
|
||||
@@ -42,18 +49,21 @@ const Big_AGI_App = ({ Component, emotionCache, pageProps }: MyAppProps) => {
|
||||
<ProviderSingleTab>
|
||||
<ProviderBackendCapabilities>
|
||||
{/* ^ Backend capabilities & SSR boundary */}
|
||||
<ProviderBootstrapLogic>
|
||||
<SnackbarInsert />
|
||||
{getLayout(<Component {...pageProps} />)}
|
||||
<OverlaysInsert />
|
||||
</ProviderBootstrapLogic>
|
||||
<ErrorBoundary outer>
|
||||
<ProviderBootstrapLogic>
|
||||
<SnackbarInsert />
|
||||
{getLayout(<Component {...pageProps} />)}
|
||||
<OverlaysInsert />
|
||||
</ProviderBootstrapLogic>
|
||||
</ErrorBoundary>
|
||||
</ProviderBackendCapabilities>
|
||||
</ProviderSingleTab>
|
||||
</ProviderTheming>
|
||||
|
||||
{hasGoogleAnalytics && <OptionalGoogleAnalytics />}
|
||||
{hasPostHogAnalytics && <OptionalPostHogAnalytics />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
{hasGoogleAnalytics && <OptionalGoogleAnalytics />}
|
||||
|
||||
</>;
|
||||
};
|
||||
|
||||
+4
-1
@@ -100,6 +100,10 @@ MyDocument.getInitialProps = async (ctx: DocumentContext) => {
|
||||
});
|
||||
|
||||
const initialProps = await Document.getInitialProps(ctx);
|
||||
|
||||
// Inject the comment before the HTML tag
|
||||
initialProps.html = `<!-- ❤ Built with Big-AGI -->\n${initialProps.html}`;
|
||||
|
||||
// This is important. It prevents Emotion to render invalid HTML.
|
||||
// See https://github.com/mui/material-ui/issues/26561#issuecomment-855286153
|
||||
const emotionStyles = extractCriticalToChunks(initialProps.html);
|
||||
@@ -107,7 +111,6 @@ MyDocument.getInitialProps = async (ctx: DocumentContext) => {
|
||||
<style
|
||||
data-emotion={`${style.key} ${style.ids.join(' ')}`}
|
||||
key={style.key}
|
||||
// eslint-disable-next-line react/no-danger
|
||||
dangerouslySetInnerHTML={{ __html: style.css }}
|
||||
/>
|
||||
));
|
||||
|
||||
@@ -18,18 +18,18 @@ import { ROUTE_APP_CHAT, ROUTE_INDEX } from '~/common/app.routes';
|
||||
import { Release } from '~/common/app.release';
|
||||
|
||||
// capabilities access
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
|
||||
// stores access
|
||||
import { getLLMsDebugInfo } from '~/common/stores/llms/store-llms';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
|
||||
import { useLogicSherpaStore } from '~/common/logic/store-logic-sherpa';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
// utils access
|
||||
import { BrowserLang, clientHostName, Is, isPwa } from '~/common/util/pwaUtils';
|
||||
import { getGA4MeasurementId } from '~/common/components/GoogleAnalytics';
|
||||
import { getGA4MeasurementId } from '~/common/components/3rdparty/GoogleAnalytics';
|
||||
import { prettyTimestampForFilenames } from '~/common/util/timeUtils';
|
||||
import { supportsClipboardRead } from '~/common/util/clipboardUtils';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
@@ -95,7 +95,6 @@ function AppDebug() {
|
||||
const cProduct = {
|
||||
capabilities: {
|
||||
mic: useCapabilityBrowserSpeechRecognition(),
|
||||
elevenLabs: useCapabilityElevenLabs(),
|
||||
textToImage: useCapabilityTextToImage(),
|
||||
},
|
||||
models: getLLMsDebugInfo(),
|
||||
@@ -109,7 +108,6 @@ function AppDebug() {
|
||||
reloads: usageCount,
|
||||
},
|
||||
release: {
|
||||
app: Release.App,
|
||||
build: frontendBuild,
|
||||
},
|
||||
};
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 2.3 MiB |
File diff suppressed because one or more lines are too long
@@ -10,7 +10,6 @@ import { createBeamVanillaStore } from '~/modules/beam/store-beam_vanilla';
|
||||
import { OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { createDConversation, DConversation } from '~/common/stores/chat/chat.conversation';
|
||||
import { createDMessageTextContent, DMessage } from '~/common/stores/chat/chat.message';
|
||||
import { getChatLLMId } from '~/common/stores/llms/store-llms';
|
||||
import { useIsMobile } from '~/common/components/useMatchMedia';
|
||||
|
||||
|
||||
@@ -21,8 +20,8 @@ function initTestConversation(): DConversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
function initTestBeamStore(messages: DMessage[], beamStore: BeamStoreApi = createBeamVanillaStore()): BeamStoreApi {
|
||||
beamStore.getState().open(messages, getChatLLMId(), false, (content) => alert(content));
|
||||
function initTestBeamStore(messages: DMessage[], beamStore: BeamStoreApi): BeamStoreApi {
|
||||
beamStore.getState().open(messages, null, false, (content) => alert(content));
|
||||
return beamStore;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,15 +6,17 @@ import ChatIcon from '@mui/icons-material/Chat';
|
||||
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import { useSpeexGlobalEngine } from '~/modules/speex/store-module-speex';
|
||||
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { animationColorRainbow } from '~/common/util/animUtils';
|
||||
import { navigateBack } from '~/common/app.routes';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition } from '~/common/components/useCapabilities';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useUICounter } from '~/common/state/store-ui';
|
||||
import { useUICounter } from '~/common/stores/store-ui';
|
||||
|
||||
|
||||
function StatusCard(props: { icon: React.JSX.Element, hasIssue: boolean, text: string, button?: React.JSX.Element }) {
|
||||
@@ -45,7 +47,7 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// external state
|
||||
const recognition = useCapabilityBrowserSpeechRecognition();
|
||||
const synthesis = useCapabilityElevenLabs();
|
||||
const speexGlobalEngine = useSpeexGlobalEngine();
|
||||
const chatIsEmpty = useChatStore(state => {
|
||||
if (!props.conversationId)
|
||||
return false;
|
||||
@@ -56,17 +58,18 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// derived state
|
||||
const outOfTheBlue = !props.conversationId;
|
||||
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenEmptyChat = outOfTheBlue || chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenRecognition = recognitionOverride || recognition.mayWork;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
|
||||
const fatalGood = overriddenRecognition && synthesis.mayWork;
|
||||
const synthesisShallWork = !!speexGlobalEngine;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesisShallWork;
|
||||
const fatalGood = overriddenRecognition && synthesisShallWork;
|
||||
|
||||
|
||||
const handleOverrideChatEmpty = React.useCallback(() => setChatEmptyOverride(true), []);
|
||||
|
||||
const handleOverrideRecognition = React.useCallback(() => setRecognitionOverride(true), []);
|
||||
|
||||
const handleConfigureElevenLabs = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
const handleConfigureVoice = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
|
||||
const handleFinishButton = React.useCallback(() => {
|
||||
if (!allGood)
|
||||
@@ -128,17 +131,17 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
{/* Text to Speech status */}
|
||||
<StatusCard
|
||||
icon={<RecordVoiceOverTwoToneIcon />}
|
||||
icon={<PhVoice />}
|
||||
text={
|
||||
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
|
||||
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
(synthesisShallWork ? 'Voice synthesis should be ready.' : 'There might be an issue with voice synthesis.')
|
||||
// + (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
}
|
||||
button={synthesis.mayWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
|
||||
button={synthesisShallWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureVoice} sx={{ mx: 1 }}>
|
||||
Configure
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!synthesis.mayWork}
|
||||
hasIssue={!synthesisShallWork}
|
||||
/>
|
||||
|
||||
{/*<Typography>*/}
|
||||
|
||||
+35
-28
@@ -5,11 +5,11 @@ import { Avatar, Box, Card, CardContent, Chip, IconButton, Link as MuiLink, List
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
|
||||
import { GitHubProjectIssueCard } from '~/common/components/GitHubProjectIssueCard';
|
||||
import { OptimaPanelGroup } from '~/common/layout/optima/panel/OptimaPanelGroup';
|
||||
import { OptimaPanelGroupedList } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { OptimaPanelIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { animationShadowRingLimey } from '~/common/util/animUtils';
|
||||
import { conversationTitle, DConversation, DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
|
||||
|
||||
import type { AppCallIntent } from './AppCall';
|
||||
import { MockPersona, useMockPersonas } from './state/useMockPersonas';
|
||||
@@ -210,7 +210,7 @@ function useConversationsByPersona() {
|
||||
}
|
||||
|
||||
|
||||
export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void }) {
|
||||
function ContactsMenuItems() {
|
||||
|
||||
// external state
|
||||
const {
|
||||
@@ -218,36 +218,43 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
|
||||
showConversations, toggleShowConversations,
|
||||
showSupport, toggleShowSupport,
|
||||
} = useAppCallStore();
|
||||
|
||||
return (
|
||||
<OptimaPanelGroupedList title='Contacts Settings'>
|
||||
|
||||
<MenuItem onClick={toggleGrayUI}>
|
||||
Grayed UI
|
||||
<Switch checked={grayUI} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={toggleShowConversations}>
|
||||
Conversations
|
||||
<Switch checked={showConversations} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={toggleShowSupport}>
|
||||
Show Support
|
||||
<Switch checked={showSupport} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
</OptimaPanelGroupedList>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void }) {
|
||||
|
||||
// external state
|
||||
const { personas } = useMockPersonas();
|
||||
const { grayUI, showConversations, showSupport } = useAppCallStore();
|
||||
const conversationsByPersona = useConversationsByPersona();
|
||||
|
||||
|
||||
// pluggable UI
|
||||
|
||||
const menuItems = React.useMemo(() => <OptimaPanelGroup title='Contacts Settings'>
|
||||
|
||||
<MenuItem onClick={toggleGrayUI}>
|
||||
Grayed UI
|
||||
<Switch checked={grayUI} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={toggleShowConversations}>
|
||||
Conversations
|
||||
<Switch checked={showConversations} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={toggleShowSupport}>
|
||||
Show Support
|
||||
<Switch checked={showSupport} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
</OptimaPanelGroup>, [grayUI, showConversations, showSupport, toggleGrayUI, toggleShowConversations, toggleShowSupport]);
|
||||
|
||||
useSetOptimaAppMenu(menuItems, 'CallUI-Contacts');
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
{/* -> Panel */}
|
||||
<OptimaPanelIn><ContactsMenuItems /></OptimaPanelIn>
|
||||
|
||||
{/* Header "Call AGI" */}
|
||||
<Box sx={{
|
||||
my: 6,
|
||||
@@ -310,7 +317,7 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
|
||||
issue={354}
|
||||
text='Call App: Support thread and compatibility matrix'
|
||||
note={<>
|
||||
Voice input uses the HTML Web Speech API, and speech output requires an ElevenLabs API Key.
|
||||
Voice input uses the HTML Web Speech API.
|
||||
</>}
|
||||
// note2='Please report any issues you encounter'
|
||||
sx={{
|
||||
|
||||
+46
-57
@@ -7,31 +7,31 @@ import CallEndIcon from '@mui/icons-material/CallEnd';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import MicNoneIcon from '@mui/icons-material/MicNone';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
|
||||
import { useChatLLMDropdown } from '../chat/components/layout-bar/useLLMDropdown';
|
||||
|
||||
import { SystemPurposeId, SystemPurposes } from '../../data';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { AixChatGenerateContent_DMessage, aixChatGenerateContent_DMessage_FromConversation } from '~/modules/aix/client/aix.client';
|
||||
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
|
||||
|
||||
import { aixChatGenerateContent_DMessage_FromConversation, AixChatGenerateContent_DMessageGuts } from '~/modules/aix/client/aix.client';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { OptimaPanelGroup } from '~/common/layout/optima/panel/OptimaPanelGroup';
|
||||
import { OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { OptimaPanelGroupedList } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { OptimaPanelIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { SpeechResult, useSpeechRecognition } from '~/common/components/speechrecognition/useSpeechRecognition';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { conversationTitle, remapMessagesSysToUsr } from '~/common/stores/chat/chat.conversation';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, messageFragmentsReduceText, messageWasInterruptedAtStart } from '~/common/stores/chat/chat.message';
|
||||
import { createErrorContentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { launchAppChat, navigateToIndex } from '~/common/app.routes';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { usePlayUrl } from '~/common/util/audio/usePlayUrl';
|
||||
import { useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
|
||||
|
||||
import type { AppCallIntent } from './AppCall';
|
||||
import { CallAvatar } from './components/CallAvatar';
|
||||
@@ -41,22 +41,17 @@ import { CallStatus } from './components/CallStatus';
|
||||
import { useAppCallStore } from './state/store-app-call';
|
||||
|
||||
|
||||
function CallMenuItems(props: {
|
||||
function CallMenu(props: {
|
||||
pushToTalk: boolean,
|
||||
setPushToTalk: (pushToTalk: boolean) => void,
|
||||
override: boolean,
|
||||
setOverride: (overridePersonaVoice: boolean) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { grayUI, toggleGrayUI } = useAppCallStore();
|
||||
const { voicesDropdown } = useElevenLabsVoiceDropdown(false, !props.override);
|
||||
|
||||
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
|
||||
|
||||
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
|
||||
|
||||
return <OptimaPanelGroup title='Call'>
|
||||
return <OptimaPanelGroupedList title='Call'>
|
||||
|
||||
<MenuItem onClick={handlePushToTalkToggle}>
|
||||
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
|
||||
@@ -64,17 +59,6 @@ function CallMenuItems(props: {
|
||||
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleChangeVoiceToggle}>
|
||||
<ListItemDecorator><RecordVoiceOverTwoToneIcon /></ListItemDecorator>
|
||||
Change Voice
|
||||
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem>
|
||||
<ListItemDecorator>{' '}</ListItemDecorator>
|
||||
{voicesDropdown}
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem onClick={toggleGrayUI}>
|
||||
@@ -86,7 +70,7 @@ function CallMenuItems(props: {
|
||||
Voice Calls Feedback
|
||||
</MenuItem>
|
||||
|
||||
</OptimaPanelGroup>;
|
||||
</OptimaPanelGroupedList>;
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +83,6 @@ export function Telephone(props: {
|
||||
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
|
||||
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
|
||||
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
|
||||
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
|
||||
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
|
||||
const [pushToTalk, setPushToTalk] = React.useState(true);
|
||||
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
|
||||
@@ -107,7 +90,7 @@ export function Telephone(props: {
|
||||
const responseAbortController = React.useRef<AbortController | null>(null);
|
||||
|
||||
// external state
|
||||
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown(llmDropdownRef);
|
||||
const { chatLLMId: modelId, chatLLMDropdown: modelDropdown } = useChatLLMDropdown(llmDropdownRef);
|
||||
const { chatTitle, reMessages } = useChatStore(useShallow(state => {
|
||||
const conversation = props.callIntent.conversationId
|
||||
? state.conversations.find(conversation => conversation.id === props.callIntent.conversationId) ?? null
|
||||
@@ -119,7 +102,7 @@ export function Telephone(props: {
|
||||
}));
|
||||
const persona = SystemPurposes[props.callIntent.personaId as SystemPurposeId] ?? undefined;
|
||||
const personaCallStarters = persona?.call?.starters ?? undefined;
|
||||
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
|
||||
// const personaVoiceSelector = React.useMemo(() => personaGetVoiceSelector(persona), [persona]);
|
||||
const personaSystemMessage = persona?.systemMessage ?? undefined;
|
||||
|
||||
// hooks and speech
|
||||
@@ -166,7 +149,6 @@ export function Telephone(props: {
|
||||
};
|
||||
|
||||
// [E] pickup -> seed message and call timer
|
||||
// FIXME: Overriding the voice will reset the call - not a desired behavior
|
||||
React.useEffect(() => {
|
||||
if (!isConnected) return;
|
||||
|
||||
@@ -186,11 +168,14 @@ export function Telephone(props: {
|
||||
|
||||
setCallMessages([createDMessageTextContent('assistant', firstMessage)]); // [state] set assistant:hello message
|
||||
|
||||
// fire/forget
|
||||
void elevenLabsSpeakText(firstMessage, personaVoiceId, true, true);
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
void speakText(firstMessage,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isConnected, personaCallStarters, personaVoiceId]);
|
||||
}, [isConnected, personaCallStarters]);
|
||||
|
||||
// [E] persona streaming response - upon new user message
|
||||
React.useEffect(() => {
|
||||
@@ -226,7 +211,7 @@ export function Telephone(props: {
|
||||
}
|
||||
|
||||
// bail if no llm selected
|
||||
if (!chatLLMId) return;
|
||||
if (!modelId) return;
|
||||
|
||||
|
||||
// Call Message Generation Prompt
|
||||
@@ -249,33 +234,40 @@ export function Telephone(props: {
|
||||
setPersonaTextInterim('💭...');
|
||||
|
||||
aixChatGenerateContent_DMessage_FromConversation(
|
||||
chatLLMId,
|
||||
modelId,
|
||||
callSystemInstruction,
|
||||
callGenerationInputHistory,
|
||||
'call',
|
||||
callMessages[0].id,
|
||||
{ abortSignal: responseAbortController.current.signal },
|
||||
(update: AixChatGenerateContent_DMessage, _isDone: boolean) => {
|
||||
(update: AixChatGenerateContent_DMessageGuts, _isDone: boolean) => {
|
||||
const updatedText = messageFragmentsReduceText(update.fragments).trim();
|
||||
if (updatedText)
|
||||
setPersonaTextInterim(finalText = updatedText);
|
||||
},
|
||||
).then((status) => {
|
||||
|
||||
// don't add the message to conversation if it was interrupted with no content
|
||||
if (messageWasInterruptedAtStart(status.lastDMessage))
|
||||
return;
|
||||
|
||||
// whether status.outcome === 'success' or not, we get a valid DMessage, eventually with Error Fragments inside
|
||||
const fullMessage = createDMessageFromFragments('assistant', status.lastDMessage.fragments);
|
||||
fullMessage.generator = status.lastDMessage.generator;
|
||||
setCallMessages(messages => [...messages, fullMessage]); // [state] append assistant:call_response
|
||||
|
||||
// fire/forget
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
if (status.outcome === 'success' && finalText?.length >= 1)
|
||||
void elevenLabsSpeakText(finalText, personaVoiceId, true, true);
|
||||
void speakText(finalText,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
}).catch((err: DOMException) => {
|
||||
if (err?.name !== 'AbortError') {
|
||||
// create an error message to explain the exception
|
||||
const errorMesage = createDMessageFromFragments('assistant', [createErrorContentFragment(err.message || err.toString())]);
|
||||
setCallMessages(messages => [...messages, errorMesage]); // [state] append assistant:call_response-ERROR
|
||||
const errorMessage = createDMessageFromFragments('assistant', [createErrorContentFragment(err.message || err.toString())]);
|
||||
setCallMessages(messages => [...messages, errorMessage]); // [state] append assistant:call_response-ERROR
|
||||
}
|
||||
}).finally(() => {
|
||||
setPersonaTextInterim(null);
|
||||
@@ -285,7 +277,7 @@ export function Telephone(props: {
|
||||
responseAbortController.current?.abort();
|
||||
responseAbortController.current = null;
|
||||
};
|
||||
}, [isConnected, callMessages, chatLLMId, personaVoiceId, personaSystemMessage, reMessages]);
|
||||
}, [callMessages, isConnected, modelId, personaSystemMessage, reMessages]);
|
||||
|
||||
// [E] Message interrupter
|
||||
const abortTrigger = isConnected && recognitionState.hasSpeech;
|
||||
@@ -311,22 +303,19 @@ export function Telephone(props: {
|
||||
const isMicEnabled = recognitionState.isAvailable;
|
||||
const isTTSEnabled = true;
|
||||
const isEnabled = isMicEnabled && isTTSEnabled;
|
||||
|
||||
|
||||
// pluggable UI
|
||||
|
||||
const menuItems = React.useMemo(() =>
|
||||
<CallMenuItems
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
override={overridePersonaVoice} setOverride={setOverridePersonaVoice} />
|
||||
, [overridePersonaVoice, pushToTalk],
|
||||
);
|
||||
|
||||
useSetOptimaAppMenu(menuItems, 'CallUI-Call');
|
||||
const micErrorMessage = recognitionState.errorMessage;
|
||||
|
||||
|
||||
return <>
|
||||
<OptimaToolbarIn>{chatLLMDropdown}</OptimaToolbarIn>
|
||||
|
||||
{/* -> Toolbar */}
|
||||
<OptimaToolbarIn>{modelDropdown}</OptimaToolbarIn>
|
||||
{/* -> Panel */}
|
||||
<OptimaPanelIn>
|
||||
<CallMenu
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
/>
|
||||
</OptimaPanelIn>
|
||||
|
||||
<Typography
|
||||
level='h1'
|
||||
@@ -350,7 +339,7 @@ export function Telephone(props: {
|
||||
callerName={isConnected ? undefined : personaName}
|
||||
statusText={isRinging ? '' /*'is calling you'*/ : isDeclined ? 'call declined' : isEnded ? 'call ended' : callElapsedTime}
|
||||
regardingText={chatTitle}
|
||||
micError={!isMicEnabled} speakError={!isTTSEnabled}
|
||||
micError={!isMicEnabled} micErrorMessage={micErrorMessage} speakError={!isTTSEnabled}
|
||||
/>
|
||||
|
||||
{/* Live Transcript, w/ streaming messages, audio indication, etc. */}
|
||||
@@ -372,7 +361,7 @@ export function Telephone(props: {
|
||||
|
||||
<ScrollToBottom stickToBottomInitial>
|
||||
|
||||
<Box sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
<Box onCopy={clipboardInterceptCtrlCForCleanup} sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
|
||||
{/* Call Messages [] */}
|
||||
{callMessages.map((message) =>
|
||||
|
||||
@@ -16,7 +16,7 @@ export function CallStatus(props: {
|
||||
callerName?: string,
|
||||
statusText: string,
|
||||
regardingText: string | null,
|
||||
micError: boolean, speakError: boolean,
|
||||
micError: boolean, micErrorMessage: string | null, speakError: boolean,
|
||||
// llmComponent?: React.JSX.Element,
|
||||
}) {
|
||||
return (
|
||||
@@ -37,7 +37,7 @@ export function CallStatus(props: {
|
||||
</Typography>}
|
||||
|
||||
{props.micError && <InlineError
|
||||
severity='danger' error='Looks like this Browser may not support speech recognition. You can try Chrome on Windows or Android instead.' />}
|
||||
severity='danger' error={props.micErrorMessage || 'Looks like this Browser may not support speech recognition. You can try Chrome on Windows or Android instead.'} />}
|
||||
|
||||
{props.speakError && <InlineError
|
||||
severity='danger' error='Text-to-speech does not appear to be configured. Please set it up in Preferences > Voice.' />}
|
||||
|
||||
+159
-88
@@ -2,15 +2,14 @@ import * as React from 'react';
|
||||
import { Panel, PanelGroup, PanelResizeHandle } from 'react-resizable-panels';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { useTheme } from '@mui/joy';
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
|
||||
import { DEV_MODE_SETTINGS } from '../settings-modal/UxLabsSettings';
|
||||
import { DiagramConfig, DiagramsModal } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
|
||||
import { TradeConfig, TradeModal } from '~/modules/trade/TradeModal';
|
||||
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import type { TradeConfig } from '~/modules/trade/TradeModal';
|
||||
import { downloadSingleChat, importConversationsFromFilesAtRest, openConversationsAtRestPicker } from '~/modules/trade/trade.client';
|
||||
import { imaginePromptFromTextOrThrow } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { useAreBeamsOpen } from '~/modules/beam/store-beam.hooks';
|
||||
import { useCapabilityTextToImage } from '~/modules/t2i/t2i.client';
|
||||
|
||||
@@ -18,9 +17,10 @@ import type { DConversation, DConversationId } from '~/common/stores/chat/chat.c
|
||||
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager';
|
||||
import { LLM_IF_ANT_PromptCaching, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { OptimaDrawerIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { PanelResizeInset } from '~/common/components/panes/GoodPanelResizeHandler';
|
||||
import { ErrorBoundary } from '~/common/components/ErrorBoundary';
|
||||
import { getLLMContextTokens, LLM_IF_ANT_PromptCaching, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { OptimaDrawerIn, OptimaPanelIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { PanelResizeInset } from '~/common/components/PanelResizeInset';
|
||||
import { Release } from '~/common/app.release';
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
|
||||
@@ -28,28 +28,31 @@ import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/u
|
||||
import { WorkspaceIdProvider } from '~/common/stores/workspace/WorkspaceIdProvider';
|
||||
import { addSnackbar, removeSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
|
||||
import { createDMessageFromFragments, createDMessagePlaceholderIncomplete, DMessageMetadata, duplicateDMessageMetadata } from '~/common/stores/chat/chat.message';
|
||||
import { createErrorContentFragment, createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragmentsNoVoid } from '~/common/stores/chat/chat.fragments';
|
||||
import { createErrorContentFragment, createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragments } from '~/common/stores/chat/chat.fragments';
|
||||
import { gcChatImageAssets } from '~/common/stores/chat/chat.gc';
|
||||
import { getChatLLMId } from '~/common/stores/llms/store-llms';
|
||||
import { getConversation, getConversationSystemPurposeId, useConversation } from '~/common/stores/chat/store-chats';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences, useSetOptimaAppMenu } from '~/common/layout/optima/useOptima';
|
||||
import { themeBgAppChatComposer } from '~/common/app.theme';
|
||||
import { useChatLLM } from '~/common/stores/llms/llms.hooks';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
|
||||
import { useIsMobile, useIsTallScreen } from '~/common/components/useMatchMedia';
|
||||
import { useLLM } from '~/common/stores/llms/llms.hooks';
|
||||
import { useModelDomain } from '~/common/stores/llms/hooks/useModelDomain';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
import { useRouterQuery } from '~/common/app.routes';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
import { useUIComplexityIsMinimal } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { ChatPane } from './components/layout-pane/ChatPane';
|
||||
import { ChatBarAltBeam } from './components/layout-bar/ChatBarAltBeam';
|
||||
import { ChatBarBeam } from './components/layout-bar/ChatBarBeam';
|
||||
import { ChatBarAltTitle } from './components/layout-bar/ChatBarAltTitle';
|
||||
import { ChatBarDropdowns } from './components/layout-bar/ChatBarDropdowns';
|
||||
import { ChatBarChat } from './components/layout-bar/ChatBarChat';
|
||||
import { ChatBeamWrapper } from './components/ChatBeamWrapper';
|
||||
import { ChatDrawerMemo } from './components/layout-drawer/ChatDrawer';
|
||||
import { ChatMessageList } from './components/ChatMessageList';
|
||||
import { Composer } from './components/composer/Composer';
|
||||
import { usePanesManager } from './components/panes/usePanesManager';
|
||||
import { PaneTitleOverlay } from './components/PaneTitleOverlay';
|
||||
import { useComposerAutoHide } from './components/composer/useComposerAutoHide';
|
||||
import { usePanesManager } from './components/panes/store-panes-manager';
|
||||
|
||||
import type { ChatExecuteMode } from './execute-mode/execute-mode.types';
|
||||
|
||||
@@ -74,24 +77,52 @@ const chatMessageListSx: SxProps = {
|
||||
flexGrow: 1,
|
||||
};
|
||||
|
||||
/*const chatMessageListBrandedSx: SxProps = {
|
||||
flexGrow: 1,
|
||||
backgroundBlendMode: 'soft-light',
|
||||
backgroundColor: themeBgApp,
|
||||
backgroundImage: 'url(https://...)',
|
||||
backgroundPosition: 'center',
|
||||
backgroundRepeat: 'no-repeat',
|
||||
backgroundSize: 'contain',
|
||||
} as const;*/
|
||||
|
||||
const chatBeamWrapperSx: SxProps = {
|
||||
flexGrow: 1,
|
||||
// we added these after removing the minSize={20} (%) from the containing panel.
|
||||
minWidth: '18rem',
|
||||
// minHeight: 'calc(100vh - 69px - var(--AGI-Nav-width))',
|
||||
};
|
||||
|
||||
const composerOpenSx: SxProps = {
|
||||
zIndex: 21, // just to allocate a surface, and potentially have a shadow
|
||||
// NOTE: disabled on 2025-03-05: conflicts with the GlobalDragOverlay's
|
||||
// zIndex: 21, // just to allocate a surface, and potentially have a shadow
|
||||
minWidth: { md: 480 }, // don't get compresses too much on desktop
|
||||
backgroundColor: themeBgAppChatComposer,
|
||||
// backgroundColor: themeBgAppChatComposer, // inlined in the Composer
|
||||
transition: 'background-color 0.5s ease-out',
|
||||
borderTop: `1px solid`,
|
||||
borderTopColor: 'rgba(var(--joy-palette-neutral-mainChannel, 99 107 116) / 0.4)',
|
||||
// hack: eats the bottom of the last message (as it has a 1px divider)
|
||||
mt: '-1px',
|
||||
};
|
||||
// NOTE: commented on 2024-05-13, as other content was stepping on the border due to it and missing zIndex
|
||||
// mt: '-1px',
|
||||
} as const;
|
||||
|
||||
const composerClosedSx: SxProps = {
|
||||
display: 'none',
|
||||
};
|
||||
const composerOpenMobileSx: SxProps = {
|
||||
zIndex: 21, // allocates the surface, possibly enables shadow if we like
|
||||
py: 0.5, // have some breathing room
|
||||
// boxShadow: '0px -1px 8px -2px rgba(0, 0, 0, 0.4)',
|
||||
...composerOpenSx,
|
||||
} as const;
|
||||
|
||||
// const composerClosedSx: SxProps = {
|
||||
// display: 'none',
|
||||
// };
|
||||
|
||||
|
||||
// Lazy-loaded Modals
|
||||
const DiagramsModalLazy = React.lazy(() => import('~/modules/aifn/digrams/DiagramsModal').then(module => ({ default: module.DiagramsModal })));
|
||||
const FlattenerModalLazy = React.lazy(() => import('~/modules/aifn/flatten/FlattenerModal').then(module => ({ default: module.FlattenerModal })));
|
||||
const TradeModalLazy = React.lazy(() => import('~/modules/trade/TradeModal').then(module => ({ default: module.TradeModal })));
|
||||
|
||||
|
||||
export function AppChat() {
|
||||
@@ -111,21 +142,25 @@ export function AppChat() {
|
||||
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
const [composerHasContent, setComposerHasContent] = React.useState(false);
|
||||
|
||||
const isMobile = useIsMobile();
|
||||
const isTallScreen = useIsTallScreen();
|
||||
|
||||
const isZenMode = useUIComplexityIsMinimal();
|
||||
|
||||
const intent = useRouterQuery<Partial<AppChatIntent>>();
|
||||
|
||||
const showAltTitleBar = useUXLabsStore(state => DEV_MODE_SETTINGS && state.labsChatBarAlt === 'title');
|
||||
|
||||
const { chatLLM } = useChatLLM();
|
||||
const { domainModelId: chatLLMId } = useModelDomain('primaryChat');
|
||||
const chatLLM = useLLM(chatLLMId) ?? null;
|
||||
|
||||
const {
|
||||
// state
|
||||
chatPanes,
|
||||
focusedPaneConversationId, // <-- key
|
||||
focusedPaneIndex,
|
||||
focusedPaneConversationId,
|
||||
// actions
|
||||
navigateHistoryInFocusedPane,
|
||||
openConversationInFocusedPane,
|
||||
@@ -147,10 +182,10 @@ export function AppChat() {
|
||||
}, [chatPanes]);
|
||||
|
||||
const beamsOpens = useAreBeamsOpen(paneBeamStores);
|
||||
const beamOpenStoreInFocusedPane = React.useMemo(() => {
|
||||
const open = focusedPaneIndex !== null ? (beamsOpens?.[focusedPaneIndex] ?? false) : false;
|
||||
return open ? paneBeamStores?.[focusedPaneIndex!] ?? null : null;
|
||||
}, [beamsOpens, focusedPaneIndex, paneBeamStores]);
|
||||
const beamOpenStoreInFocusedPane = focusedPaneIndex === null ? null
|
||||
: !beamsOpens?.[focusedPaneIndex] ? null
|
||||
: paneBeamStores?.[focusedPaneIndex] ?? null;
|
||||
const focusedChatBeamOpen = focusedPaneIndex !== null && !!beamsOpens?.[focusedPaneIndex];
|
||||
|
||||
const {
|
||||
// focused
|
||||
@@ -171,7 +206,7 @@ export function AppChat() {
|
||||
// const focusedConversationWorkspaceId = workspaceForConversationIdentity(focusedPaneConversationId);
|
||||
//// const focusedConversationWorkspace = useWorkspaceIdForConversation(focusedPaneConversationId);
|
||||
|
||||
const { mayWork: capabilityHasT2I } = useCapabilityTextToImage();
|
||||
const { mayWork: capabilityHasT2I, mayEdit: capabilityHasT2IEdit } = useCapabilityTextToImage();
|
||||
|
||||
const activeFolderId = useFolderStore(({ enableFolders, folders }) => {
|
||||
const activeFolderId = enableFolders ? _activeFolderId : null;
|
||||
@@ -179,6 +214,9 @@ export function AppChat() {
|
||||
return activeFolder?.id ?? null;
|
||||
});
|
||||
|
||||
// Composer Auto-hiding
|
||||
const forceComposerHide = !!beamOpenStoreInFocusedPane /* || !focusedPaneConversationId */; // auto-hide when no chat (the 'please select a conversation...' state) doesn't feel good
|
||||
const composerAutoHide = useComposerAutoHide(forceComposerHide, composerHasContent);
|
||||
|
||||
// Window actions
|
||||
|
||||
@@ -211,7 +249,7 @@ export function AppChat() {
|
||||
else if (outcome === 'err-t2i-unconfigured')
|
||||
optimaOpenPreferences('draw');
|
||||
else if (outcome === 'err-no-persona')
|
||||
addSnackbar({ key: 'chat-no-persona', message: 'No persona selected.', type: 'issue' });
|
||||
addSnackbar({ key: 'chat-no-persona', message: 'No persona selected.', type: 'issue', overrides: { autoHideDuration: 4000 } });
|
||||
else if (outcome === 'err-no-conversation')
|
||||
addSnackbar({ key: 'chat-no-conversation', message: 'No active conversation.', type: 'issue' });
|
||||
else if (outcome === 'err-no-last-message')
|
||||
@@ -237,7 +275,7 @@ export function AppChat() {
|
||||
// create the user:message
|
||||
// NOTE: this can lead to multiple chat messages with data refs that are referring to the same dblobs,
|
||||
// however, we already got transferred ownership of the dblobs at this point.
|
||||
const userMessage = createDMessageFromFragments('user', duplicateDMessageFragmentsNoVoid(fragments)); // [chat] create user:message to send per-chat
|
||||
const userMessage = createDMessageFromFragments('user', duplicateDMessageFragments(fragments, true)); // [chat] create user:message to send per-chat
|
||||
if (metadata) userMessage.metadata = duplicateDMessageMetadata(metadata);
|
||||
|
||||
ConversationsManager.getHandler(conversation.id).messageAppend(userMessage); // [chat] append user message in each conversation
|
||||
@@ -307,11 +345,6 @@ export function AppChat() {
|
||||
});
|
||||
}, [handleExecuteAndOutcome]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string): Promise<void> => {
|
||||
await elevenLabsSpeakText(text, undefined, true, true);
|
||||
}, []);
|
||||
|
||||
|
||||
// Chat actions
|
||||
|
||||
const handleConversationNewInFocusedPane = React.useCallback((forceNoRecycle: boolean, isIncognito: boolean) => {
|
||||
@@ -329,9 +362,10 @@ export function AppChat() {
|
||||
useFolderStore.getState().addConversationToFolder(activeFolderId, conversationId);
|
||||
|
||||
// focus the composer
|
||||
composerTextAreaRef.current?.focus();
|
||||
if (!isMobile)
|
||||
composerTextAreaRef.current?.focus();
|
||||
|
||||
}, [activeFolderId, focusedPaneConversationId, handleOpenConversationInFocusedPane, prependNewConversation, recycleNewConversationId]);
|
||||
}, [activeFolderId, focusedPaneConversationId, handleOpenConversationInFocusedPane, isMobile, prependNewConversation, recycleNewConversationId]);
|
||||
|
||||
const handleConversationImportDialog = React.useCallback(() => setTradeConfig({ dir: 'import' }), []);
|
||||
|
||||
@@ -432,15 +466,15 @@ export function AppChat() {
|
||||
const barAltTitle = showAltTitleBar ? focusedChatTitle ?? 'No Chat' : null;
|
||||
|
||||
const focusedBarContent = React.useMemo(() => beamOpenStoreInFocusedPane
|
||||
? <ChatBarAltBeam beamStore={beamOpenStoreInFocusedPane} isMobile={isMobile} />
|
||||
? <ChatBarBeam conversationTitle={focusedChatTitle ?? 'No Chat'} beamStore={beamOpenStoreInFocusedPane} isMobile={isMobile} />
|
||||
: (barAltTitle === null)
|
||||
? <ChatBarDropdowns conversationId={focusedPaneConversationId} llmDropdownRef={llmDropdownRef} personaDropdownRef={personaDropdownRef} />
|
||||
? <ChatBarChat conversationId={focusedPaneConversationId} llmDropdownRef={llmDropdownRef} personaDropdownRef={personaDropdownRef} />
|
||||
: <ChatBarAltTitle conversationId={focusedPaneConversationId} conversationTitle={barAltTitle} />
|
||||
, [barAltTitle, beamOpenStoreInFocusedPane, focusedPaneConversationId, isMobile],
|
||||
, [barAltTitle, beamOpenStoreInFocusedPane, focusedChatTitle, focusedPaneConversationId, isMobile],
|
||||
);
|
||||
|
||||
|
||||
// Disabled by default, as it lags the opening of the drawer and immediatly vanishes during the closing animation
|
||||
// Disabled by default, as it lags the opening of the drawer and immediately vanishes during the closing animation
|
||||
const isDrawerOpen = true; // useOptimaDrawerOpen();
|
||||
|
||||
const drawerContent = React.useMemo(() => !isDrawerOpen ? null :
|
||||
@@ -450,6 +484,7 @@ export function AppChat() {
|
||||
activeFolderId={activeFolderId}
|
||||
chatPanesConversationIds={paneUniqueConversationIds}
|
||||
disableNewButton={disableNewButton}
|
||||
focusedChatBeamOpen={focusedChatBeamOpen}
|
||||
onConversationActivate={handleOpenConversationInFocusedPane}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onConversationNew={handleConversationNewInFocusedPane}
|
||||
@@ -458,10 +493,10 @@ export function AppChat() {
|
||||
onConversationsImportDialog={handleConversationImportDialog}
|
||||
setActiveFolderId={setActiveFolderId}
|
||||
/>,
|
||||
[activeFolderId, disableNewButton, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isDrawerOpen, paneUniqueConversationIds],
|
||||
[activeFolderId, disableNewButton, focusedChatBeamOpen, focusedPaneConversationId, handleConversationBranch, handleConversationExport, handleConversationImportDialog, handleConversationNewInFocusedPane, handleDeleteConversations, handleOpenConversationInFocusedPane, isDrawerOpen, paneUniqueConversationIds],
|
||||
);
|
||||
|
||||
const focusedMenuItems = React.useMemo(() =>
|
||||
const focusedChatPanelContent = React.useMemo(() => !focusedPaneConversationId ? null :
|
||||
<ChatPane
|
||||
conversationId={focusedPaneConversationId}
|
||||
disableItems={!focusedPaneConversationId || isFocusedChatEmpty}
|
||||
@@ -477,8 +512,6 @@ export function AppChat() {
|
||||
[focusedPaneConversationId, handleConversationBranch, handleConversationFlatten, handleConversationReset, hasConversations, isFocusedChatEmpty, isMessageSelectionMode, isMobile, isTallScreen],
|
||||
);
|
||||
|
||||
useSetOptimaAppMenu(focusedMenuItems, 'AppChat');
|
||||
|
||||
|
||||
// Effects
|
||||
|
||||
@@ -486,7 +519,7 @@ export function AppChat() {
|
||||
React.useEffect(() => {
|
||||
// Debug: open a null chat
|
||||
if (Release.IsNodeDevBuild && intent.initialConversationId === 'null')
|
||||
openConversationInFocusedPane(null! /* for debugging purporse */);
|
||||
openConversationInFocusedPane(null! /* for debugging purpose */);
|
||||
// Open the initial conversation if set
|
||||
else if (intent.initialConversationId)
|
||||
openConversationInFocusedPane(intent.initialConversationId);
|
||||
@@ -578,8 +611,11 @@ export function AppChat() {
|
||||
|
||||
|
||||
return <>
|
||||
<OptimaDrawerIn>{drawerContent}</OptimaDrawerIn>
|
||||
|
||||
{/* -> Toolbar, -> Drawer, -> Panel*/}
|
||||
<OptimaToolbarIn>{focusedBarContent}</OptimaToolbarIn>
|
||||
<OptimaDrawerIn>{drawerContent}</OptimaDrawerIn>
|
||||
<OptimaPanelIn>{focusedChatPanelContent}</OptimaPanelIn>
|
||||
|
||||
<PanelGroup
|
||||
direction={(isMobile || isTallScreen) ? 'vertical' : 'horizontal'}
|
||||
@@ -596,20 +632,22 @@ export function AppChat() {
|
||||
const _panesCount = chatPanes.length;
|
||||
const _keyAndId = `chat-pane-${pane.paneId}`;
|
||||
const _sepId = `sep-pane-${idx}`;
|
||||
return <WorkspaceIdProvider conversationId={_paneIsFocused ? _paneConversationId : null} key={_keyAndId}>
|
||||
return <WorkspaceIdProvider conversationId={_paneIsFocused ? _paneConversationId : null} key={_keyAndId}><ErrorBoundary>
|
||||
|
||||
<Panel
|
||||
id={_keyAndId}
|
||||
order={idx}
|
||||
collapsible={chatPanes.length === 2}
|
||||
defaultSize={(_panesCount === 3 && idx === 1) ? 34 : Math.round(100 / _panesCount)}
|
||||
minSize={20}
|
||||
// minSize={20 /* IMPORTANT: this forces a reflow even on a simple on hover */}
|
||||
onClick={(event) => {
|
||||
const setFocus = chatPanes.length < 2 || !event.altKey;
|
||||
setFocusedPaneIndex(setFocus ? idx : -1);
|
||||
// Alt + Click: undocumented feature to clear focus
|
||||
if (event.altKey && chatPanes.length > 1)
|
||||
return setFocusedPaneIndex(-1);
|
||||
setFocusedPaneIndex(idx);
|
||||
}}
|
||||
onCollapse={() => {
|
||||
// NOTE: despite the delay to try to let the draggin settle, there seems to be an issue with the Pane locking the screen
|
||||
// NOTE: despite the delay to try to let the dragging settle, there seems to be an issue with the Pane locking the screen
|
||||
// setTimeout(() => removePane(idx), 50);
|
||||
// more than 2 will result in an assertion from the framework
|
||||
if (chatPanes.length === 2) removePane(idx);
|
||||
@@ -618,28 +656,45 @@ export function AppChat() {
|
||||
// for anchoring the scroll button in place
|
||||
position: 'relative',
|
||||
...(isMultiPane ? {
|
||||
marginBottom: '1px', // compensates for the -1px in `composerOpenSx` for the Composer offset
|
||||
borderRadius: '0.375rem',
|
||||
border: `2px solid ${_paneIsFocused
|
||||
borderStyle: 'solid',
|
||||
borderColor: _paneIsFocused
|
||||
? ((willMulticast || !isMultiConversationId) ? theme.palette.primary.solidBg : theme.palette.primary.solidBg)
|
||||
: ((willMulticast || !isMultiConversationId) ? theme.palette.primary.softActiveBg : theme.palette.background.level1)}`,
|
||||
: ((willMulticast || !isMultiConversationId) ? theme.palette.primary.softActiveBg : theme.palette.divider),
|
||||
borderWidth: '2px',
|
||||
// borderBottomWidth: '3px',
|
||||
// DISABLED on 2024-03-13, it gets in the way quite a lot
|
||||
// filter: (!willMulticast && !_paneIsFocused)
|
||||
// ? (!isMultiConversationId ? 'grayscale(66.67%)' /* clone of the same */ : 'grayscale(66.67%)')
|
||||
// : undefined,
|
||||
// 2025-02-27: didn't try, here's another version
|
||||
// filter: _paneIsFocused ? 'none' : 'brightness(0.94) saturate(0.9)',
|
||||
} : {
|
||||
// NOTE: this is a workaround for the 'stuck-after-collapse-close' issue. We will collapse the 'other' pane, which
|
||||
// will get it removed (onCollapse), and somehow this pane will be stuck with a pointerEvents: 'none' style, which de-facto
|
||||
// disables further interaction with the chat. This is a workaround to re-enable the pointer events.
|
||||
// The root cause seems to be a Dragstate not being reset properly, however the pointerEvents has been set since 0.0.56 while
|
||||
// The root cause seems to be a Drag state not being reset properly, however the pointerEvents has been set since 0.0.56 while
|
||||
// it was optional before: https://github.com/bvaughn/react-resizable-panels/issues/241
|
||||
pointerEvents: 'auto',
|
||||
}),
|
||||
...((_paneIsIncognito && {
|
||||
backgroundColor: theme.palette.background.level3,
|
||||
backgroundImage: 'repeating-linear-gradient(45deg, rgba(0,0,0,0.03), rgba(0,0,0,0.03) 10px, transparent 10px, transparent 20px)',
|
||||
})),
|
||||
}}
|
||||
>
|
||||
|
||||
{isMultiPane && !isZenMode && (
|
||||
<PaneTitleOverlay
|
||||
paneIdx={idx}
|
||||
conversationId={_paneConversationId}
|
||||
isFocused={_paneIsFocused}
|
||||
isIncognito={_paneIsIncognito}
|
||||
onConversationDelete={handleDeleteConversations}
|
||||
/>
|
||||
)}
|
||||
|
||||
<ScrollToBottom
|
||||
bootToBottom
|
||||
stickToBottomInitial
|
||||
@@ -653,7 +708,7 @@ export function AppChat() {
|
||||
conversationHandler={_paneChatHandler}
|
||||
capabilityHasT2I={capabilityHasT2I}
|
||||
chatLLMAntPromptCaching={chatLLM?.interfaces?.includes(LLM_IF_ANT_PromptCaching) ?? false}
|
||||
chatLLMContextTokens={chatLLM?.contextTokens ?? null}
|
||||
chatLLMContextTokens={getLLMContextTokens(chatLLM) ?? null}
|
||||
chatLLMSupportsImages={chatLLM?.interfaces?.includes(LLM_IF_OAI_Vision) ?? false}
|
||||
fitScreen={isMobile || isMultiPane}
|
||||
isMobile={isMobile}
|
||||
@@ -664,7 +719,6 @@ export function AppChat() {
|
||||
onConversationNew={handleConversationNewInFocusedPane}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={handleImagineFromText}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
sx={chatMessageListSx}
|
||||
/>
|
||||
)}
|
||||
@@ -691,50 +745,67 @@ export function AppChat() {
|
||||
</PanelResizeHandle>
|
||||
)}
|
||||
|
||||
</WorkspaceIdProvider>;
|
||||
</ErrorBoundary></WorkspaceIdProvider>;
|
||||
})}
|
||||
|
||||
</PanelGroup>
|
||||
|
||||
<Composer
|
||||
isMobile={isMobile}
|
||||
chatLLM={chatLLM}
|
||||
composerTextAreaRef={composerTextAreaRef}
|
||||
targetConversationId={focusedPaneConversationId}
|
||||
capabilityHasT2I={capabilityHasT2I}
|
||||
isMulticast={!isMultiConversationId ? null : isComposerMulticast}
|
||||
isDeveloperMode={isFocusedChatDeveloper}
|
||||
onAction={handleComposerAction}
|
||||
onConversationsImportFromFiles={handleConversationsImportFromFiles}
|
||||
onTextImagine={handleImagineFromText}
|
||||
setIsMulticast={setIsComposerMulticast}
|
||||
sx={beamOpenStoreInFocusedPane ? composerClosedSx : composerOpenSx}
|
||||
/>
|
||||
{/* Composer with auto-hide */}
|
||||
<Box {...composerAutoHide.compressorProps}>
|
||||
<div style={composerAutoHide.compressibleStyle}>
|
||||
<Composer
|
||||
isMobile={isMobile}
|
||||
chatLLM={chatLLM}
|
||||
composerTextAreaRef={composerTextAreaRef}
|
||||
targetConversationId={focusedPaneConversationId}
|
||||
capabilityHasT2I={capabilityHasT2I}
|
||||
capabilityHasT2IEdit={capabilityHasT2IEdit}
|
||||
isMulticast={!isMultiConversationId ? null : isComposerMulticast}
|
||||
isDeveloperMode={isFocusedChatDeveloper}
|
||||
onAction={handleComposerAction}
|
||||
onConversationBeamEdit={handleMessageBeamLastInFocusedPane}
|
||||
onConversationsImportFromFiles={handleConversationsImportFromFiles}
|
||||
onTextImagine={handleImagineFromText}
|
||||
setIsMulticast={setIsComposerMulticast}
|
||||
onComposerHasContent={setComposerHasContent}
|
||||
sx={isMobile ? composerOpenMobileSx : composerOpenSx}
|
||||
/>
|
||||
</div>
|
||||
</Box>
|
||||
|
||||
{/* Hover zone for auto-hide */}
|
||||
{!forceComposerHide && composerAutoHide.isHidden && <Box {...composerAutoHide.detectorProps} />}
|
||||
|
||||
{/* Diagrams */}
|
||||
{!!diagramConfig && (
|
||||
<DiagramsModal
|
||||
config={diagramConfig}
|
||||
onClose={() => setDiagramConfig(null)}
|
||||
/>
|
||||
<React.Suspense fallback={null}>
|
||||
<DiagramsModalLazy
|
||||
config={diagramConfig}
|
||||
onClose={() => setDiagramConfig(null)}
|
||||
/>
|
||||
</React.Suspense>
|
||||
)}
|
||||
|
||||
{/* Flatten */}
|
||||
{!!flattenConversationId && (
|
||||
<FlattenerModal
|
||||
conversationId={flattenConversationId}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onClose={() => setFlattenConversationId(null)}
|
||||
/>
|
||||
<React.Suspense fallback={null}>
|
||||
<FlattenerModalLazy
|
||||
conversationId={flattenConversationId}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onClose={() => setFlattenConversationId(null)}
|
||||
/>
|
||||
</React.Suspense>
|
||||
)}
|
||||
|
||||
{/* Import / Export */}
|
||||
{!!tradeConfig && (
|
||||
<TradeModal
|
||||
config={tradeConfig}
|
||||
onConversationActivate={handleOpenConversationInFocusedPane}
|
||||
onClose={() => setTradeConfig(null)}
|
||||
/>
|
||||
<React.Suspense fallback={null}>
|
||||
<TradeModalLazy
|
||||
config={tradeConfig}
|
||||
onConversationActivate={handleOpenConversationInFocusedPane}
|
||||
onClose={() => setTradeConfig(null)}
|
||||
/>
|
||||
</React.Suspense>
|
||||
)}
|
||||
|
||||
</>;
|
||||
|
||||
@@ -1,19 +1,41 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, Modal, ModalClose } from '@mui/joy';
|
||||
import { Box, IconButton, Modal } from '@mui/joy';
|
||||
import CloseFullscreenIcon from '@mui/icons-material/CloseFullscreen';
|
||||
|
||||
import { BeamStoreApi, useBeamStore } from '~/modules/beam/store-beam.hooks';
|
||||
import { BeamView } from '~/modules/beam/BeamView';
|
||||
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { themeZIndexBeamView } from '~/common/app.theme';
|
||||
|
||||
|
||||
/*const overlaySx: SxProps = {
|
||||
position: 'absolute',
|
||||
inset: 0,
|
||||
zIndex: themeZIndexBeamView, // stay on top of Message > Chips (:1), and Overlays (:2) - note: Desktop Drawer (:26)
|
||||
}*/
|
||||
const beamWrapperStyles = {
|
||||
|
||||
wrapper: {
|
||||
position: 'absolute',
|
||||
inset: 0,
|
||||
backgroundColor: 'background.level2', // darker than the expected Level1, for a change
|
||||
} as const,
|
||||
|
||||
closeContainer: {
|
||||
position: 'absolute',
|
||||
top: '0.25rem',
|
||||
// left: '0.25rem',
|
||||
left: { xs: 'calc(50% - 3rem)', md: '50%' }, // center on desktop, a bit left (for the islands) on mobile
|
||||
// transform: 'translate(-50%, 0)',
|
||||
zIndex: themeZIndexBeamView, // stay on top of Message > Chips (:1), and Overlays (:2) - note: Desktop Drawer (:26)
|
||||
} as const,
|
||||
|
||||
closeButton: {
|
||||
// color: 'white',
|
||||
// borderRadius: '25%',
|
||||
boxShadow: 'md',
|
||||
} as const,
|
||||
|
||||
} as const;
|
||||
|
||||
|
||||
export function ChatBeamWrapper(props: {
|
||||
@@ -40,15 +62,22 @@ export function ChatBeamWrapper(props: {
|
||||
|
||||
return isMaximized ? (
|
||||
<Modal open onClose={handleUnMaximize}>
|
||||
<Box sx={{
|
||||
backgroundColor: 'background.level1',
|
||||
position: 'absolute',
|
||||
inset: 0,
|
||||
}}>
|
||||
<Box sx={beamWrapperStyles.wrapper}>
|
||||
|
||||
<ScrollToBottom disableAutoStick>
|
||||
{beamView}
|
||||
</ScrollToBottom>
|
||||
<ModalClose sx={{ color: 'white', backgroundColor: 'background.surface', boxShadow: 'xs', mr: 2 }} />
|
||||
|
||||
{/* Modal-Close-alike */}
|
||||
<Box sx={beamWrapperStyles.closeContainer}>
|
||||
<GoodTooltip title='Exit maximized mode'>
|
||||
<IconButton variant='solid' onClick={handleUnMaximize} sx={beamWrapperStyles.closeButton}>
|
||||
<CloseFullscreenIcon />
|
||||
{/*<CloseRoundedIcon />*/}
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
</Box>
|
||||
|
||||
</Box>
|
||||
</Modal>
|
||||
) : (
|
||||
|
||||
@@ -7,17 +7,18 @@ import { Box, List } from '@mui/joy';
|
||||
import type { SystemPurposeExample } from '../../../data';
|
||||
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
|
||||
import type { DLLMContextTokens } from '~/common/stores/llms/llms.types';
|
||||
import { DConversationId, excludeSystemMessages } from '~/common/stores/chat/chat.conversation';
|
||||
import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { convertFilesToDAttachmentFragments } from '~/common/attachment-drafts/attachment.pipeline';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP } from '~/common/stores/chat/chat.message';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { createTextContentFragment, DMessageFragment, DMessageFragmentId } from '~/common/stores/chat/chat.fragments';
|
||||
import { openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useBrowserTranslationWarning } from '~/common/components/useIsBrowserTranslating';
|
||||
import { useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useChatOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useScrollToBottom } from '~/common/scroll-to-bottom/useScrollToBottom';
|
||||
@@ -40,7 +41,7 @@ export function ChatMessageList(props: {
|
||||
conversationHandler: ConversationHandler | null,
|
||||
capabilityHasT2I: boolean,
|
||||
chatLLMAntPromptCaching: boolean,
|
||||
chatLLMContextTokens: number | null,
|
||||
chatLLMContextTokens: DLLMContextTokens,
|
||||
chatLLMSupportsImages: boolean,
|
||||
fitScreen: boolean,
|
||||
isMobile: boolean,
|
||||
@@ -50,7 +51,6 @@ export function ChatMessageList(props: {
|
||||
onConversationNew: (forceNoRecycle: boolean, isIncognito: boolean) => void,
|
||||
onTextDiagram: (diagramConfig: DiagramConfig | null) => void,
|
||||
onTextImagine: (conversationId: DConversationId, selectedText: string) => Promise<void>,
|
||||
onTextSpeak: (selectedText: string) => Promise<void>,
|
||||
setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
sx?: SxProps,
|
||||
}) {
|
||||
@@ -64,7 +64,6 @@ export function ChatMessageList(props: {
|
||||
const { notifyBooting } = useScrollToBottom();
|
||||
const danger_experimentalHtmlWebUi = useChatAutoSuggestHTMLUI();
|
||||
const [showSystemMessages] = useChatShowSystemMessages();
|
||||
const optionalTranslationWarning = useBrowserTranslationWarning();
|
||||
const { conversationMessages, historyTokenCount } = useChatStore(useShallow(({ conversations }) => {
|
||||
const conversation = conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
@@ -76,10 +75,9 @@ export function ChatMessageList(props: {
|
||||
_composerInReferenceToCount: state.inReferenceTo?.length ?? 0,
|
||||
ephemerals: state.ephemerals?.length ? state.ephemerals : null,
|
||||
})));
|
||||
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
|
||||
|
||||
// derived state
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine, onTextSpeak } = props;
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine } = props;
|
||||
const composerCanAddInReferenceTo = _composerInReferenceToCount < 5;
|
||||
const composerHasInReferenceto = _composerInReferenceToCount > 0;
|
||||
|
||||
@@ -118,9 +116,9 @@ export function ChatMessageList(props: {
|
||||
}
|
||||
}, [conversationHandler, conversationId, onConversationExecuteHistory, props.chatLLMSupportsImages]);
|
||||
|
||||
const handleMessageContinue = React.useCallback(async (_messageId: DMessageId /* Ignored for now */) => {
|
||||
const handleMessageContinue = React.useCallback(async (_messageId: DMessageId /* Ignored for now */, continueText: null | string) => {
|
||||
if (conversationId && conversationHandler) {
|
||||
conversationHandler.messageAppend(createDMessageTextContent('user', 'Continue')); // [chat] append user:Continue
|
||||
conversationHandler.messageAppend(createDMessageTextContent('user', continueText || 'Continue')); // [chat] append user:Continue (or custom text, likely from an 'option')
|
||||
await onConversationExecuteHistory(conversationId);
|
||||
}
|
||||
}, [conversationHandler, conversationId, onConversationExecuteHistory]);
|
||||
@@ -137,8 +135,8 @@ export function ChatMessageList(props: {
|
||||
|
||||
const handleMessageBeam = React.useCallback(async (messageId: DMessageId) => {
|
||||
// Message option menu Beam
|
||||
if (!conversationId || !props.conversationHandler || !props.conversationHandler.isValid()) return;
|
||||
const inputHistory = props.conversationHandler.historyViewHeadOrThrow('chat-beam-message');
|
||||
if (!conversationId || !conversationHandler || !conversationHandler.isValid()) return;
|
||||
const inputHistory = conversationHandler.historyViewHeadOrThrow('chat-beam-message');
|
||||
if (!inputHistory.length) return;
|
||||
|
||||
// TODO: replace the Persona and Auto-Cache-hint in the history?
|
||||
@@ -151,52 +149,52 @@ export function ChatMessageList(props: {
|
||||
// assistant: do an in-place beam
|
||||
if (lastTruncatedMessage.role === 'assistant') {
|
||||
if (truncatedHistory.length >= 2)
|
||||
props.conversationHandler.beamInvoke(truncatedHistory.slice(0, -1), [lastTruncatedMessage], lastTruncatedMessage.id);
|
||||
conversationHandler.beamInvoke(truncatedHistory.slice(0, -1), [lastTruncatedMessage], lastTruncatedMessage.id);
|
||||
} else if (lastTruncatedMessage.role === 'user') {
|
||||
// user: truncate and append (but if the next message is an assistant message, import it)
|
||||
const possibleNextMessage = inputHistory[truncatedHistory.length];
|
||||
if (possibleNextMessage?.role === 'assistant')
|
||||
props.conversationHandler.beamInvoke(truncatedHistory, [possibleNextMessage], null);
|
||||
conversationHandler.beamInvoke(truncatedHistory, [possibleNextMessage], null);
|
||||
else
|
||||
props.conversationHandler.beamInvoke(truncatedHistory, [], null);
|
||||
conversationHandler.beamInvoke(truncatedHistory, [], null);
|
||||
}
|
||||
}, [conversationId, props.conversationHandler]);
|
||||
}, [conversationHandler, conversationId]);
|
||||
|
||||
const handleMessageBranch = React.useCallback((messageId: DMessageId) => {
|
||||
conversationId && onConversationBranch(conversationId, messageId, true);
|
||||
}, [conversationId, onConversationBranch]);
|
||||
|
||||
const handleMessageTruncate = React.useCallback((messageId: DMessageId) => {
|
||||
props.conversationHandler?.historyTruncateTo(messageId, 0);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.historyTruncateTo(messageId, 0);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleMessageDelete = React.useCallback((messageId: DMessageId) => {
|
||||
props.conversationHandler?.messagesDelete([messageId]);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.messagesDelete([messageId]);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleMessageAppendFragment = React.useCallback((messageId: DMessageId, fragment: DMessageFragment) => {
|
||||
props.conversationHandler?.messageFragmentAppend(messageId, fragment, false, false);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.messageFragmentAppend(messageId, fragment, false, false);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleMessageDeleteFragment = React.useCallback((messageId: DMessageId, fragmentId: DMessageFragmentId) => {
|
||||
props.conversationHandler?.messageFragmentDelete(messageId, fragmentId, false, true);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.messageFragmentDelete(messageId, fragmentId, false, true);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleMessageReplaceFragment = React.useCallback((messageId: DMessageId, fragmentId: DMessageFragmentId, newFragment: DMessageFragment) => {
|
||||
props.conversationHandler?.messageFragmentReplace(messageId, fragmentId, newFragment, false);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.messageFragmentReplace(messageId, fragmentId, newFragment, true);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleMessageToggleUserFlag = React.useCallback((messageId: DMessageId, userFlag: DMessageUserFlag, _maxPerConversation?: number) => {
|
||||
props.conversationHandler?.messageToggleUserFlag(messageId, userFlag, true /* touch */);
|
||||
conversationHandler?.messageToggleUserFlag(messageId, userFlag, true /* touch */);
|
||||
// Note: we don't support 'maxPerConversation' yet, which is supposed to turn off the flag from the beginning if it's too numerous
|
||||
// if (_maxPerConversation) {
|
||||
// ...
|
||||
// }
|
||||
}, [props.conversationHandler]);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleAddInReferenceTo = React.useCallback((item: DMetaReferenceItem) => {
|
||||
props.conversationHandler?.overlayActions.addInReferenceTo(item);
|
||||
}, [props.conversationHandler]);
|
||||
conversationHandler?.overlayActions.addInReferenceTo(item);
|
||||
}, [conversationHandler]);
|
||||
|
||||
const handleTextDiagram = React.useCallback(async (messageId: DMessageId, text: string) => {
|
||||
conversationId && onTextDiagram({ conversationId: conversationId, messageId, text });
|
||||
@@ -213,16 +211,29 @@ export function ChatMessageList(props: {
|
||||
}, [capabilityHasT2I, conversationId, onTextImagine]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string) => {
|
||||
if (!isSpeakable)
|
||||
return optimaOpenPreferences('voice');
|
||||
// sandwich the speaking with the indicator
|
||||
setIsSpeaking(true);
|
||||
await onTextSpeak(text);
|
||||
const result = await speakText(text, undefined, { label: 'Chat speak' });
|
||||
setIsSpeaking(false);
|
||||
}, [isSpeakable, onTextSpeak]);
|
||||
|
||||
// open voice preferences
|
||||
if (!result.success && (result.errorType === 'tts-no-engine' || result.errorType === 'tts-unconfigured'))
|
||||
optimaOpenPreferences('voice');
|
||||
}, []);
|
||||
|
||||
|
||||
// operate on the local selection set
|
||||
|
||||
const areAllSelectedMessagesHidden = React.useMemo(() => {
|
||||
if (selectedMessages.size === 0) return false;
|
||||
for (const messageId of selectedMessages) {
|
||||
const message = conversationMessages.find(m => m.id === messageId);
|
||||
if (message && !messageHasUserFlag(message, MESSAGE_FLAG_AIX_SKIP))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}, [selectedMessages, conversationMessages]);
|
||||
|
||||
const handleSelectAll = (selected: boolean) => {
|
||||
const newSelected = new Set<string>();
|
||||
if (selected)
|
||||
@@ -238,15 +249,15 @@ export function ChatMessageList(props: {
|
||||
};
|
||||
|
||||
const handleSelectionDelete = React.useCallback(() => {
|
||||
props.conversationHandler?.messagesDelete(Array.from(selectedMessages));
|
||||
conversationHandler?.messagesDelete(Array.from(selectedMessages));
|
||||
setSelectedMessages(new Set());
|
||||
}, [props.conversationHandler, selectedMessages]);
|
||||
}, [conversationHandler, selectedMessages]);
|
||||
|
||||
const handleSelectionHide = React.useCallback(() => {
|
||||
const handleSelectionToggleVisibility = React.useCallback(() => {
|
||||
for (let selectedMessage of Array.from(selectedMessages))
|
||||
props.conversationHandler?.messageSetUserFlag(selectedMessage, MESSAGE_FLAG_AIX_SKIP, true, true);
|
||||
conversationHandler?.messageSetUserFlag(selectedMessage, MESSAGE_FLAG_AIX_SKIP, !areAllSelectedMessagesHidden, true);
|
||||
setSelectedMessages(new Set());
|
||||
}, [props.conversationHandler, selectedMessages]);
|
||||
}, [conversationHandler, selectedMessages, areAllSelectedMessagesHidden]);
|
||||
|
||||
const { isMessageSelectionMode, setIsMessageSelectionMode } = props;
|
||||
|
||||
@@ -282,6 +293,10 @@ export function ChatMessageList(props: {
|
||||
p: 0,
|
||||
...props.sx,
|
||||
|
||||
// we added these after removing the minSize={20} (%) from the containing panel.
|
||||
minWidth: '18rem',
|
||||
// minHeight: '180px', // not need for this, as it's already an overflow scrolling container, so one can reduce it to a pixel
|
||||
|
||||
// fix for the double-border on the last message (one by the composer, one to the bottom of the message)
|
||||
// marginBottom: '-1px',
|
||||
|
||||
@@ -309,9 +324,7 @@ export function ChatMessageList(props: {
|
||||
);
|
||||
|
||||
return (
|
||||
<List role='chat-messages-list' sx={listSx}>
|
||||
|
||||
{optionalTranslationWarning}
|
||||
<List role='chat-messages-list' sx={listSx} onCopy={clipboardInterceptCtrlCForCleanup}>
|
||||
|
||||
{props.isMessageSelectionMode && (
|
||||
<MessagesSelectionHeader
|
||||
@@ -320,7 +333,8 @@ export function ChatMessageList(props: {
|
||||
onClose={() => props.setIsMessageSelectionMode(false)}
|
||||
onSelectAll={handleSelectAll}
|
||||
onDeleteMessages={handleSelectionDelete}
|
||||
onHideMessages={handleSelectionHide}
|
||||
onToggleVisibility={handleSelectionToggleVisibility}
|
||||
areAllMessagesHidden={areAllSelectedMessagesHidden}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -365,7 +379,7 @@ export function ChatMessageList(props: {
|
||||
onMessageTruncate={handleMessageTruncate}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={capabilityHasT2I ? handleTextImagine : undefined}
|
||||
onTextSpeak={isSpeakable ? handleTextSpeak : undefined}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
/>
|
||||
|
||||
);
|
||||
|
||||
@@ -13,7 +13,7 @@ import { ScaledTextBlockRenderer } from '~/modules/blocks/ScaledTextBlockRendere
|
||||
import type { DEphemeral } from '~/common/chat-overlay/store-perchat-ephemerals_slice';
|
||||
import { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
|
||||
import { adjustContentScaling, ContentScaling, lineHeightChatTextMd } from '~/common/app.theme';
|
||||
import { useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
import { useUIPreferencesStore } from '~/common/stores/store-ui';
|
||||
|
||||
|
||||
// State Pane
|
||||
|
||||
@@ -0,0 +1,194 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, IconButton, Sheet } from '@mui/joy';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import DeleteForeverIcon from '@mui/icons-material/DeleteForever';
|
||||
import EditRoundedIcon from '@mui/icons-material/EditRounded';
|
||||
import OpenInFullIcon from '@mui/icons-material/OpenInFull';
|
||||
|
||||
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { InlineTextarea } from '~/common/components/InlineTextarea';
|
||||
import { TooltipOutlined } from '~/common/components/TooltipOutlined';
|
||||
import { useConversationTitle } from '~/common/stores/chat/hooks/useConversationTitle';
|
||||
|
||||
import { panesManagerActions } from './panes/store-panes-manager';
|
||||
|
||||
|
||||
// configuration
|
||||
const ENABLE_DELETE = false;
|
||||
|
||||
|
||||
const _styles = {
|
||||
tileBar: {
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
left: '50%',
|
||||
transform: 'translateX(-50%)',
|
||||
zIndex: 10,
|
||||
padding: '0 0.125rem 0.125rem',
|
||||
fontSize: 'sm',
|
||||
fontWeight: 'md',
|
||||
borderBottomLeftRadius: '8px',
|
||||
borderBottomRightRadius: '8px',
|
||||
// boxShadow: 'xs',
|
||||
// border: '1px solid',
|
||||
// borderColor: 'background.popup',
|
||||
borderTop: 'none',
|
||||
maxWidth: '78%',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
} as const,
|
||||
titleBarIncognito: {
|
||||
backgroundImage: 'repeating-linear-gradient(45deg, rgba(0,0,0,0.1), rgba(0,0,0,0.1) 10px, transparent 10px, transparent 20px)',
|
||||
backgroundColor: 'neutral.solidBg',
|
||||
} as const,
|
||||
title: {
|
||||
flex: 1,
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
whiteSpace: 'nowrap',
|
||||
cursor: 'pointer',
|
||||
minWidth: '2.75rem',
|
||||
textAlign: 'center',
|
||||
} as const,
|
||||
toolButton: {
|
||||
'--IconButton-size': '1.5rem',
|
||||
backgroundColor: 'transparent',
|
||||
opacity: 0.5,
|
||||
transition: 'opacity 0.1s',
|
||||
'&:hover': {
|
||||
opacity: 1,
|
||||
},
|
||||
} as const,
|
||||
toolIcon: {} as const,
|
||||
toolIconLg: {
|
||||
fontSize: 'lg',
|
||||
} as const,
|
||||
} as const;
|
||||
|
||||
|
||||
export function PaneTitleOverlay(props: {
|
||||
paneIdx: number,
|
||||
conversationId: DConversationId | null,
|
||||
isFocused: boolean,
|
||||
isIncognito: boolean,
|
||||
onConversationDelete: (conversationIds: DConversationId[], bypassConfirmation: boolean) => void,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [editingTitle, setEditingTitle] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const { title, setUserTitle } = useConversationTitle(props.conversationId);
|
||||
// if (!title || title?.length < 3)
|
||||
// return null;
|
||||
|
||||
|
||||
// close tabs handlers
|
||||
|
||||
const handleCloseThis = React.useCallback(() => {
|
||||
panesManagerActions().removePane(props.paneIdx);
|
||||
}, [props.paneIdx]);
|
||||
|
||||
const handleCloseOthers = React.useCallback(() => {
|
||||
panesManagerActions().removeOtherPanes(props.paneIdx);
|
||||
}, [props.paneIdx]);
|
||||
|
||||
|
||||
// title handles
|
||||
|
||||
const handleTitleEditBegin = React.useCallback(() => {
|
||||
setEditingTitle(true);
|
||||
}, []);
|
||||
|
||||
const handleTitleEditChange = React.useCallback((newTitle: string) => {
|
||||
setUserTitle(newTitle);
|
||||
setEditingTitle(false);
|
||||
}, [setUserTitle]);
|
||||
|
||||
const handleTitleEditEnd = React.useCallback(() => {
|
||||
setEditingTitle(false);
|
||||
}, []);
|
||||
|
||||
|
||||
// delete handlers
|
||||
|
||||
const { onConversationDelete } = props;
|
||||
|
||||
const handleDeleteClicked = React.useCallback((event: React.MouseEvent) => {
|
||||
event.stopPropagation();
|
||||
if (props.conversationId)
|
||||
onConversationDelete([props.conversationId], event.shiftKey);
|
||||
}, [onConversationDelete, props.conversationId]);
|
||||
|
||||
|
||||
// don't render if not focused
|
||||
// if (!props.isFocused)
|
||||
// return null;
|
||||
|
||||
const hasTitle = title && title.length > 0;
|
||||
const color = props.isFocused ? 'primary' : 'neutral';
|
||||
const variantO = props.isFocused ? 'solid' : 'outlined';
|
||||
const variantP = props.isFocused ? 'solid' : 'plain';
|
||||
|
||||
return (
|
||||
<Sheet
|
||||
color={color}
|
||||
variant={variantO}
|
||||
sx={!props.isIncognito ? _styles.tileBar : { ..._styles.tileBar, ..._styles.titleBarIncognito }}
|
||||
>
|
||||
{/* Close Others*/}
|
||||
{/*<TooltipOutlined title='Close Other Tabs'>*/}
|
||||
{!editingTitle && <IconButton title='Close Other Tabs' size='sm' color={color} variant={variantP} onClick={handleCloseOthers} sx={_styles.toolButton}>
|
||||
<OpenInFullIcon sx={_styles.toolIcon} />
|
||||
</IconButton>}
|
||||
{/*</TooltipOutlined>*/}
|
||||
|
||||
{/* Title */}
|
||||
{editingTitle ? (
|
||||
<InlineTextarea
|
||||
initialText={title || ''}
|
||||
placeholder='Chat title...'
|
||||
invertedColors
|
||||
centerText
|
||||
onEdit={handleTitleEditChange}
|
||||
onCancel={handleTitleEditEnd}
|
||||
sx={{
|
||||
// flexGrow: 1,
|
||||
// minWidth: 120,
|
||||
mx: { md: 1 },
|
||||
}}
|
||||
/>
|
||||
) : !!props.conversationId && <>
|
||||
{hasTitle && <Box sx={_styles.title} onClick={handleTitleEditBegin}>
|
||||
{title}
|
||||
</Box>}
|
||||
{!hasTitle && <Box fontStyle='italic' onClick={handleTitleEditBegin}>
|
||||
untitled
|
||||
</Box>}
|
||||
{!hasTitle && <TooltipOutlined title='Edit Chat Title'>
|
||||
<IconButton title='' size='sm' color={color} variant={variantP} onClick={handleTitleEditBegin} sx={_styles.toolButton}>
|
||||
<EditRoundedIcon sx={_styles.toolIcon} />
|
||||
</IconButton>
|
||||
</TooltipOutlined>}
|
||||
</>}
|
||||
|
||||
{/* Delete This */}
|
||||
{ENABLE_DELETE && hasTitle && !!props.conversationId && (
|
||||
<TooltipOutlined title='Delete Chat (Shift+Click to bypass confirmation)'>
|
||||
<IconButton size='sm' variant={variantP} onClick={handleDeleteClicked} sx={_styles.toolButton}>
|
||||
<DeleteForeverIcon />
|
||||
</IconButton>
|
||||
</TooltipOutlined>
|
||||
)}
|
||||
|
||||
{/* Close This */}
|
||||
{/*<TooltipOutlined title='Close'>*/}
|
||||
{!editingTitle && <IconButton title='Close Tab' size='sm' color={color} variant={variantP} onClick={handleCloseThis} sx={_styles.toolButton}>
|
||||
<ClearIcon sx={_styles.toolIconLg} />
|
||||
</IconButton>}
|
||||
{/*</TooltipOutlined>*/}
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
@@ -1,19 +1,17 @@
|
||||
import * as React from 'react';
|
||||
import { useShallow } from 'zustand/react/shallow';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, IconButton, styled, Typography } from '@mui/joy';
|
||||
import { Box, IconButton, Typography } from '@mui/joy';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
|
||||
import MinimizeIcon from '@mui/icons-material/Minimize';
|
||||
|
||||
// import { isMacUser } from '~/common/util/pwaUtils';
|
||||
import type { ShortcutObject } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { ShortcutKey, ShortcutObject } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { useGlobalShortcutsStore } from '~/common/components/shortcuts/store-global-shortcuts';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -27,12 +25,92 @@ const hideButtonTooltip = (
|
||||
</Box>
|
||||
);
|
||||
|
||||
const hideButtonSx: SxProps = {
|
||||
'--IconButton-size': '28px',
|
||||
'--Icon-fontSize': '16px',
|
||||
'--Icon-color': 'var(--joy-palette-text-tertiary)',
|
||||
mr: -0.5,
|
||||
};
|
||||
const _styles = {
|
||||
|
||||
bar: {
|
||||
borderBottom: '1px solid',
|
||||
// borderBottomColor: 'var(--joy-palette-divider)',
|
||||
borderBottomColor: 'rgba(var(--joy-palette-neutral-mainChannel) / 0.1)',
|
||||
// borderTopColor: 'rgba(var(--joy-palette-neutral-mainChannel, 99 107 116) / 0.4)',
|
||||
// backgroundColor: 'var(--joy-palette-background-surface)',
|
||||
// paddingBlock: '0.25rem',
|
||||
paddingInline: '0.5rem',
|
||||
// layout
|
||||
display: 'flex',
|
||||
flexFlow: 'row nowrap',
|
||||
columnGap: '1.5rem', // space between shortcuts
|
||||
lineHeight: '1em',
|
||||
// animation: `${animateAppear} 0.3s ease-out`,
|
||||
// transition: 'all 0.2s ease',
|
||||
// '&:hover': {
|
||||
// backgroundColor: 'var(--joy-palette-background-level1)',
|
||||
// },
|
||||
} as const,
|
||||
|
||||
hideButton: {
|
||||
'--IconButton-size': '28px',
|
||||
'--Icon-fontSize': '16px',
|
||||
'--Icon-color': 'var(--joy-palette-text-tertiary)',
|
||||
mr: -0.5,
|
||||
} as const,
|
||||
|
||||
shortcut: {
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
whiteSpace: 'nowrap',
|
||||
gap: '2px', // space between modifiers
|
||||
marginBlock: '0.25rem',
|
||||
// transition: 'transform 0.2s ease',
|
||||
// '&:hover': {
|
||||
// transform: 'scale(1.05)',
|
||||
// },
|
||||
'&:hover > div': {
|
||||
backgroundColor: 'background.level1',
|
||||
} as const,
|
||||
cursor: 'pointer',
|
||||
[`&[aria-disabled="true"]`]: {
|
||||
opacity: 0.5,
|
||||
pointerEvents: 'none',
|
||||
} as const,
|
||||
} as const,
|
||||
|
||||
itemKeyGroup: {
|
||||
fontSize: 'xs',
|
||||
fontWeight: 'md',
|
||||
outline: '1px solid',
|
||||
outlineColor: 'neutral.outlinedBorder',
|
||||
borderRadius: 'xs',
|
||||
// backgroundColor: 'var(--joy-palette-neutral-outlinedBorder)',
|
||||
backgroundColor: 'background.popup',
|
||||
// boxShadow: 'inset 2px 0px 4px -2px var(--joy-palette-background-backdrop)',
|
||||
boxShadow: 'xs',
|
||||
// minWidth: '1rem',
|
||||
paddingBlock: '2px',
|
||||
paddingInline: '1px',
|
||||
// pointerEvents: 'none',
|
||||
cursor: 'pointer',
|
||||
transition: 'background-color 1s ease',
|
||||
display: 'flex',
|
||||
textAlign: 'center',
|
||||
// Remove the gap and use dividers instead
|
||||
gap: 0,
|
||||
'& > span': {
|
||||
position: 'relative',
|
||||
paddingInline: '4px',
|
||||
minWidth: '1rem',
|
||||
'&:not(:last-child)': {
|
||||
borderRight: '1px solid',
|
||||
borderRightColor: 'neutral.outlinedBorder',
|
||||
},
|
||||
},
|
||||
} as const,
|
||||
|
||||
itemIcon: {
|
||||
fontSize: 'md',
|
||||
} as const,
|
||||
|
||||
} as const;
|
||||
|
||||
|
||||
// const animateAppear = keyframes`
|
||||
// from {
|
||||
@@ -45,64 +123,6 @@ const hideButtonSx: SxProps = {
|
||||
// }
|
||||
// `;
|
||||
|
||||
const StatusBarContainer = styled(Box)({
|
||||
borderBottom: '1px solid',
|
||||
// borderBottomColor: 'var(--joy-palette-divider)',
|
||||
borderBottomColor: 'rgba(var(--joy-palette-neutral-mainChannel) / 0.1)',
|
||||
// borderTopColor: 'rgba(var(--joy-palette-neutral-mainChannel, 99 107 116) / 0.4)',
|
||||
// backgroundColor: 'var(--joy-palette-background-surface)',
|
||||
// paddingBlock: '0.25rem',
|
||||
paddingInline: '0.5rem',
|
||||
// layout
|
||||
display: 'flex',
|
||||
flexFlow: 'row nowrap',
|
||||
columnGap: '1.5rem', // space between shortcuts
|
||||
lineHeight: '1em',
|
||||
// animation: `${animateAppear} 0.3s ease-out`,
|
||||
// transition: 'all 0.2s ease',
|
||||
// '&:hover': {
|
||||
// backgroundColor: 'var(--joy-palette-background-level1)',
|
||||
// },
|
||||
});
|
||||
|
||||
const ShortcutContainer = styled(Box)({
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
whiteSpace: 'nowrap',
|
||||
gap: '2px', // space between modifiers
|
||||
marginBlock: '0.25rem',
|
||||
// transition: 'transform 0.2s ease',
|
||||
// '&:hover': {
|
||||
// transform: 'scale(1.05)',
|
||||
// },
|
||||
'&:hover > div': {
|
||||
backgroundColor: 'var(--joy-palette-background-level1)',
|
||||
},
|
||||
cursor: 'pointer',
|
||||
[`&[aria-disabled="true"]`]: {
|
||||
opacity: 0.5,
|
||||
pointerEvents: 'none',
|
||||
}
|
||||
});
|
||||
|
||||
const ShortcutKey = styled(Box)({
|
||||
fontSize: 'var(--joy-fontSize-xs)',
|
||||
fontWeight: 'var(--joy-fontWeight-md)',
|
||||
border: '1px solid',
|
||||
borderColor: 'var(--joy-palette-neutral-outlinedBorder)',
|
||||
borderRadius: 'var(--joy-radius-xs)',
|
||||
// backgroundColor: 'var(--joy-palette-neutral-outlinedBorder)',
|
||||
backgroundColor: 'var(--joy-palette-background-popup)',
|
||||
// boxShadow: 'inset 2px 0px 4px -2px var(--joy-palette-background-backdrop)',
|
||||
boxShadow: 'var(--joy-shadow-xs)',
|
||||
// minWidth: '1rem',
|
||||
paddingBlock: '1px',
|
||||
paddingInline: '4px',
|
||||
// pointerEvents: 'none',
|
||||
cursor: 'pointer',
|
||||
transition: 'background-color 1s ease',
|
||||
});
|
||||
|
||||
|
||||
// Display mac-style shortcuts on windows as well
|
||||
const displayMacModifiers = true;
|
||||
@@ -118,6 +138,8 @@ function _platformAwareModifier(symbol: 'Ctrl' | 'Alt' | 'Shift') {
|
||||
}
|
||||
}
|
||||
|
||||
const ShortcutItemMemo = React.memo(ShortcutItem);
|
||||
|
||||
function ShortcutItem(props: { shortcut: ShortcutObject }) {
|
||||
|
||||
const handleClicked = React.useCallback(() => {
|
||||
@@ -126,17 +148,24 @@ function ShortcutItem(props: { shortcut: ShortcutObject }) {
|
||||
}, [props.shortcut]);
|
||||
|
||||
return (
|
||||
<ShortcutContainer onClick={!props.shortcut.disabled ? handleClicked : undefined} aria-disabled={props.shortcut.disabled}>
|
||||
{!!props.shortcut.ctrl && <ShortcutKey>{_platformAwareModifier('Ctrl')}</ShortcutKey>}
|
||||
{!!props.shortcut.shift && <ShortcutKey>{_platformAwareModifier('Shift')}</ShortcutKey>}
|
||||
{/*{!!props.shortcut.altForNonMac && <ShortcutKey onClick={handleClicked}>{_platformAwareModifier('Alt')}</ShortcutKey>}*/}
|
||||
<ShortcutKey>{props.shortcut.key === 'Escape' ? 'Esc' : props.shortcut.key === 'Enter' ? '↵' : props.shortcut.key.toUpperCase()}</ShortcutKey>
|
||||
<Box
|
||||
onClick={!props.shortcut.disabled ? handleClicked : undefined}
|
||||
aria-disabled={props.shortcut.disabled}
|
||||
sx={_styles.shortcut}
|
||||
>
|
||||
<Box sx={_styles.itemKeyGroup}>
|
||||
{!!props.shortcut.ctrl && <span>{_platformAwareModifier('Ctrl')}</span>}
|
||||
{!!props.shortcut.shift && <span>{_platformAwareModifier('Shift')}</span>}
|
||||
{/*{!!props.shortcut.altForNonMac && <span>{_platformAwareModifier('Alt')}</span>}*/}
|
||||
<span>{props.shortcut.key === 'Escape' ? 'Esc' : props.shortcut.key === 'Enter' ? '↵' : props.shortcut.key.toUpperCase()}</span>
|
||||
</Box>
|
||||
<Typography level='body-xs'>{props.shortcut.description}</Typography>
|
||||
{props.shortcut.endDecoratorIcon && <props.shortcut.endDecoratorIcon sx={{ fontSize: 'md' }} />}
|
||||
</ShortcutContainer>
|
||||
{!!props.shortcut.endDecoratorIcon && <props.shortcut.endDecoratorIcon sx={_styles.itemIcon} />}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
export const StatusBarMemo = React.memo(StatusBar);
|
||||
|
||||
export function StatusBar(props: { toggleMinimized?: () => void, isMinimized?: boolean }) {
|
||||
|
||||
@@ -148,18 +177,34 @@ export function StatusBar(props: { toggleMinimized?: () => void, isMinimized?: b
|
||||
// external state
|
||||
const labsShowShortcutBar = useUXLabsStore(state => state.labsShowShortcutBar);
|
||||
const shortcuts = useGlobalShortcutsStore(useShallow(state => {
|
||||
// get visible shortcuts
|
||||
let visibleShortcuts = !labsShowShortcutBar ? [] : state.getAllShortcuts().filter(shortcut => !!shortcut.description);
|
||||
|
||||
// filter by highest level if levels are present
|
||||
const maxLevel = Math.max(...visibleShortcuts.map(s => s.level ?? 0));
|
||||
if (maxLevel > 0)
|
||||
visibleShortcuts = visibleShortcuts.filter(s => s.level === maxLevel);
|
||||
|
||||
visibleShortcuts.sort((a, b) => {
|
||||
// if they don't have a 'shift', they are sorted first
|
||||
if (a.shift !== b.shift)
|
||||
return a.shift ? 1 : -1;
|
||||
// (Hack) If the description is 'Beam', it goes last
|
||||
if (a.description === 'Beam Edit')
|
||||
return 1;
|
||||
// alphabetical for the rest
|
||||
// 1. First by level
|
||||
if ((a.level ?? 0) !== (b.level ?? 0))
|
||||
return (b.level ?? 0) - (a.level ?? 0);
|
||||
|
||||
// 2. Then by modifiers presence (no modifiers first)
|
||||
const aModifiers = (a.ctrl ? 1 : 0) + (a.shift ? 1 : 0);
|
||||
const bModifiers = (b.ctrl ? 1 : 0) + (b.shift ? 1 : 0);
|
||||
if (aModifiers !== bModifiers)
|
||||
return aModifiers - bModifiers;
|
||||
|
||||
// 3a. Special case for ShortcutKey.Esc, at the beginning
|
||||
if (a.key === ShortcutKey.Esc) return -1;
|
||||
if (b.key === ShortcutKey.Esc) return 1;
|
||||
|
||||
// 3. Special case for 'Beam Edit'
|
||||
if (a.description === 'Beam Edit') return 1;
|
||||
if (b.description === 'Beam Edit') return -1;
|
||||
|
||||
// 4. Finally alphabetically by key
|
||||
return a.key.localeCompare(b.key);
|
||||
});
|
||||
return visibleShortcuts;
|
||||
@@ -202,27 +247,30 @@ export function StatusBar(props: { toggleMinimized?: () => void, isMinimized?: b
|
||||
return null;
|
||||
|
||||
return (
|
||||
<StatusBarContainer aria-label='Status bar'>
|
||||
<Box
|
||||
aria-label='Shortcuts and status bar'
|
||||
sx={_styles.bar}
|
||||
>
|
||||
|
||||
{(!props.toggleMinimized || !COMPOSER_ENABLE_MINIMIZE) && !props.isMinimized ? (
|
||||
// Close Button
|
||||
<GoodTooltip variantOutlined arrow placement='top' title={hideButtonTooltip}>
|
||||
<IconButton size='sm' sx={hideButtonSx} onClick={handleHideShortcuts}>
|
||||
<IconButton size='sm' onClick={handleHideShortcuts} sx={_styles.hideButton}>
|
||||
<CloseRoundedIcon />
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
) : (
|
||||
// Minimize / Maximize Button - note the Maximize icon would be more correct, but also less discoverable
|
||||
<IconButton size='sm' sx={hideButtonSx} onClick={props.toggleMinimized}>
|
||||
<IconButton size='sm' onClick={props.toggleMinimized} sx={_styles.hideButton}>
|
||||
{props.isMinimized ? <ExpandLessIcon /> : <MinimizeIcon />}
|
||||
</IconButton>
|
||||
)}
|
||||
|
||||
{/* Show all shortcuts */}
|
||||
{shortcuts.map((shortcut, idx) => (
|
||||
<ShortcutItem key={shortcut.key + idx} shortcut={shortcut} />
|
||||
<ShortcutItemMemo key={shortcut.key + idx} shortcut={shortcut} />
|
||||
))}
|
||||
|
||||
</StatusBarContainer>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ export function CameraCaptureModal(props: {
|
||||
|
||||
const handleVideoDownloadClicked = React.useCallback(async () => {
|
||||
if (!videoRef.current) return;
|
||||
await downloadVideoFrame(videoRef.current, 'camera', 'image/jpeg', 0.98);
|
||||
await downloadVideoFrame(videoRef.current, 'camera', 'image/jpeg', 0.98).catch(alert);
|
||||
}, [videoRef]);
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ export function CameraCaptureModal(props: {
|
||||
backdropFilter: 'none', // using none because this is heavy
|
||||
// backdropFilter: 'blur(4px)',
|
||||
// backgroundColor: 'rgba(11 13 14 / 0.75)',
|
||||
backgroundColor: 'rgba(var(--joy-palette-neutral-darkChannel) / 0.5)',
|
||||
backgroundColor: 'rgba(var(--joy-palette-neutral-darkChannel) / 0.67)',
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
@@ -2,12 +2,10 @@ import * as React from 'react';
|
||||
import { useShallow } from 'zustand/react/shallow';
|
||||
import type { FileWithHandle } from 'browser-fs-access';
|
||||
|
||||
import { Box, Button, ButtonGroup, Card, Dropdown, Grid, IconButton, Menu, MenuButton, MenuItem, Textarea, Tooltip, Typography } from '@mui/joy';
|
||||
import { Box, Button, ButtonGroup, Card, Dropdown, Grid, IconButton, Menu, MenuButton, MenuItem, Textarea, Typography } from '@mui/joy';
|
||||
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import AddCircleOutlineIcon from '@mui/icons-material/AddCircleOutline';
|
||||
import AutoAwesomeIcon from '@mui/icons-material/AutoAwesome';
|
||||
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
|
||||
import FormatPaintTwoToneIcon from '@mui/icons-material/FormatPaintTwoTone';
|
||||
import PsychologyIcon from '@mui/icons-material/Psychology';
|
||||
import SendIcon from '@mui/icons-material/Send';
|
||||
import StopOutlinedIcon from '@mui/icons-material/StopOutlined';
|
||||
@@ -19,35 +17,35 @@ import { useChatAutoSuggestAttachmentPrompts, useChatMicTimeoutMsValue } from '.
|
||||
import { useAgiAttachmentPrompts } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
|
||||
import { useBrowseCapability } from '~/modules/browse/store-module-browsing';
|
||||
|
||||
import { DLLM, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { DLLM, getLLMContextTokens, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { llmChatPricing_adjusted } from '~/common/stores/llms/llms.pricing';
|
||||
import { AudioGenerator } from '~/common/util/audio/AudioGenerator';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
import { ButtonAttachFilesMemo, openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager';
|
||||
import { DMessageMetadata, DMetaReferenceItem, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
|
||||
import { DMessageId, DMessageMetadata, DMetaReferenceItem, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
|
||||
import { ShortcutKey, ShortcutObject, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { addSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
|
||||
import { animationEnterBelow } from '~/common/util/animUtils';
|
||||
import { browserSpeechRecognitionCapability, PLACEHOLDER_INTERIM_TRANSCRIPT, SpeechResult, useSpeechRecognition } from '~/common/components/speechrecognition/useSpeechRecognition';
|
||||
import { DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { copyToClipboard, supportsClipboardRead } from '~/common/util/clipboardUtils';
|
||||
import { createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragmentsNoVoid } from '~/common/stores/chat/chat.fragments';
|
||||
import { estimateTextTokens, glueForMessageTokens, marshallWrapDocFragments } from '~/common/stores/chat/chat.tokens';
|
||||
import { createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragments } from '~/common/stores/chat/chat.fragments';
|
||||
import { glueForMessageTokens, marshallWrapDocFragments } from '~/common/stores/chat/chat.tokens';
|
||||
import { isValidConversation, useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { getModelParameterValueOrThrow } from '~/common/stores/llms/llms.parameters';
|
||||
import { launchAppCall, removeQueryParam, useRouterQuery } from '~/common/app.routes';
|
||||
import { lineHeightTextareaMd } from '~/common/app.theme';
|
||||
import { lineHeightTextareaMd, themeBgAppChatComposer } from '~/common/app.theme';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { platformAwareKeystrokes } from '~/common/components/KeyStroke';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
import { useChatComposerOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useComposerStartupText, useLogicSherpaStore } from '~/common/logic/store-logic-sherpa';
|
||||
import { useDebouncer } from '~/common/components/useDebouncer';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
import { useUICounter, useUIPreferencesStore } from '~/common/state/store-ui';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
import { useUICounter, useUIPreferencesStore } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import type { ActileItem } from './actile/ActileProvider';
|
||||
import { providerAttachmentLabels } from './actile/providerAttachmentLabels';
|
||||
@@ -57,6 +55,7 @@ import { useActileManager } from './actile/useActileManager';
|
||||
|
||||
import type { AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
|
||||
import { LLMAttachmentDraftsAction, LLMAttachmentsList } from './llmattachments/LLMAttachmentsList';
|
||||
import { PhPaintBrush } from '~/common/components/icons/phosphor/PhPaintBrush';
|
||||
import { useAttachmentDrafts } from '~/common/attachment-drafts/useAttachmentDrafts';
|
||||
import { useLLMAttachmentDrafts } from './llmattachments/useLLMAttachmentDrafts';
|
||||
|
||||
@@ -65,23 +64,30 @@ import { chatExecuteModeCanAttach, useChatExecuteMode } from '../../execute-mode
|
||||
|
||||
import { ButtonAttachCameraMemo, useCameraCaptureModalDialog } from './buttons/ButtonAttachCamera';
|
||||
import { ButtonAttachClipboardMemo } from './buttons/ButtonAttachClipboard';
|
||||
import { ButtonAttachGoogleDriveMemo } from './buttons/ButtonAttachGoogleDrive';
|
||||
import { ButtonAttachScreenCaptureMemo } from './buttons/ButtonAttachScreenCapture';
|
||||
import { ButtonAttachWebMemo } from './buttons/ButtonAttachWeb';
|
||||
import { hasGoogleDriveCapability, useGoogleDrivePicker } from '~/common/attachment-drafts/useGoogleDrivePicker';
|
||||
import { ButtonBeamMemo } from './buttons/ButtonBeam';
|
||||
import { ButtonCallMemo } from './buttons/ButtonCall';
|
||||
import { ButtonGroupDrawRepeat } from './buttons/ButtonGroupDrawRepeat';
|
||||
import { ButtonMicContinuationMemo } from './buttons/ButtonMicContinuation';
|
||||
import { ButtonMicMemo } from './buttons/ButtonMic';
|
||||
import { ButtonMultiChatMemo } from './buttons/ButtonMultiChat';
|
||||
import { ButtonOptionsDraw } from './buttons/ButtonOptionsDraw';
|
||||
import { ComposerTextAreaActions } from './textarea/ComposerTextAreaActions';
|
||||
import { StatusBar } from '../StatusBar';
|
||||
import { ComposerTextAreaDrawActions } from './textarea/ComposerTextAreaDrawActions';
|
||||
import { StatusBarMemo } from '../StatusBar';
|
||||
import { TokenBadgeMemo } from './tokens/TokenBadge';
|
||||
import { TokenProgressbarMemo } from './tokens/TokenProgressbar';
|
||||
import { useComposerDragDrop } from './useComposerDragDrop';
|
||||
import { useTextTokenCount } from './tokens/useTextTokenCounter';
|
||||
import { useWebInputModal } from './WebInputModal';
|
||||
|
||||
|
||||
// configuration
|
||||
const zIndexComposerOverlayMic = 10;
|
||||
const SHOW_TIPS_AFTER_RELOADS = 25;
|
||||
|
||||
|
||||
const paddingBoxSx: SxProps = {
|
||||
@@ -101,20 +107,24 @@ const minimizedSx: SxProps = {
|
||||
export function Composer(props: {
|
||||
isMobile: boolean;
|
||||
chatLLM: DLLM | null;
|
||||
composerTextAreaRef: React.RefObject<HTMLTextAreaElement>;
|
||||
composerTextAreaRef: React.RefObject<HTMLTextAreaElement | null>;
|
||||
targetConversationId: DConversationId | null;
|
||||
capabilityHasT2I: boolean;
|
||||
capabilityHasT2IEdit: boolean;
|
||||
isMulticast: boolean | null;
|
||||
isDeveloperMode: boolean;
|
||||
onAction: (conversationId: DConversationId, chatExecuteMode: ChatExecuteMode, fragments: (DMessageContentFragment | DMessageAttachmentFragment)[], metadata?: DMessageMetadata) => boolean;
|
||||
onConversationBeamEdit: (conversationId: DConversationId, editMessageId?: DMessageId) => Promise<void>;
|
||||
onConversationsImportFromFiles: (files: File[]) => Promise<void>;
|
||||
onTextImagine: (conversationId: DConversationId, text: string) => void;
|
||||
setIsMulticast: (on: boolean) => void;
|
||||
onComposerHasContent: (hasContent: boolean) => void;
|
||||
sx?: SxProps;
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [composeText, debouncedText, setComposeText] = useDebouncer('', 300, 1200, true);
|
||||
const [composeText, setComposeText] = React.useState('');
|
||||
const [drawRepeat, setDrawRepeat] = React.useState(1);
|
||||
const [micContinuation, setMicContinuation] = React.useState(false);
|
||||
const [speechInterimResult, setSpeechInterimResult] = React.useState<SpeechResult | null>(null);
|
||||
const [sendStarted, setSendStarted] = React.useState(false);
|
||||
@@ -135,12 +145,13 @@ export function Composer(props: {
|
||||
labsShowCost: state.labsShowCost,
|
||||
labsShowShortcutBar: state.labsShowShortcutBar,
|
||||
})));
|
||||
const timeToShowTips = useLogicSherpaStore(state => state.usageCount >= 5);
|
||||
const timeToShowTips = useLogicSherpaStore(state => state.usageCount >= SHOW_TIPS_AFTER_RELOADS);
|
||||
const { novel: explainShiftEnter, touch: touchShiftEnter } = useUICounter('composer-shift-enter');
|
||||
const { novel: explainAltEnter, touch: touchAltEnter } = useUICounter('composer-alt-enter');
|
||||
const { novel: explainCtrlEnter, touch: touchCtrlEnter } = useUICounter('composer-ctrl-enter');
|
||||
const [startupText, setStartupText] = useComposerStartupText();
|
||||
const enterIsNewline = useUIPreferencesStore(state => state.enterIsNewline);
|
||||
const composerQuickButton = useUIPreferencesStore(state => state.composerQuickButton);
|
||||
const chatMicTimeoutMs = useChatMicTimeoutMsValue();
|
||||
const { assistantAbortible, systemPurposeId, tokenCount: _historyTokenCount, abortConversationTemp } = useChatStore(useShallow(state => {
|
||||
const conversation = state.conversations.find(_c => _c.id === props.targetConversationId);
|
||||
@@ -170,7 +181,7 @@ export function Composer(props: {
|
||||
const enableLoadURLsInComposer = hasComposerBrowseCapability && !composeText.startsWith('/');
|
||||
|
||||
// user message for attachments
|
||||
const { onConversationsImportFromFiles } = props;
|
||||
const { onConversationBeamEdit, onConversationsImportFromFiles } = props;
|
||||
const handleFilterAGIFile = React.useCallback(async (file: File): Promise<boolean> =>
|
||||
await showPromisedOverlay('composer-open-or-attach', { rejectWithValue: false }, ({ onResolve, onUserReject }) => (
|
||||
<ConfirmationModal
|
||||
@@ -186,11 +197,12 @@ export function Composer(props: {
|
||||
)), [onConversationsImportFromFiles, showPromisedOverlay]);
|
||||
|
||||
// attachments-overlay: comes from the attachments slice of the conversation overlay
|
||||
const showChatAttachments = chatExecuteModeCanAttach(chatExecuteMode, props.capabilityHasT2IEdit);
|
||||
const {
|
||||
/* items */ attachmentDrafts,
|
||||
/* append */ attachAppendClipboardItems, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* append */ attachAppendClipboardItems, attachAppendCloudFile, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* take */ attachmentsRemoveAll, attachmentsTakeAllFragments, attachmentsTakeFragmentsByType,
|
||||
} = useAttachmentDrafts(conversationOverlayStore, enableLoadURLsInComposer, chatLLMSupportsImages, handleFilterAGIFile);
|
||||
} = useAttachmentDrafts(conversationOverlayStore, enableLoadURLsInComposer, chatLLMSupportsImages, handleFilterAGIFile, showChatAttachments === 'only-images');
|
||||
|
||||
// attachments derived state
|
||||
const llmAttachmentDraftsCollection = useLLMAttachmentDrafts(attachmentDrafts, props.chatLLM, chatLLMSupportsImages);
|
||||
@@ -208,7 +220,8 @@ export function Composer(props: {
|
||||
const isMobile = props.isMobile;
|
||||
const isDesktop = !props.isMobile;
|
||||
const noConversation = !targetConversationId;
|
||||
const showChatAttachments = chatExecuteModeCanAttach(chatExecuteMode);
|
||||
|
||||
const composerTextSuffix = chatExecuteMode === 'generate-image' && isDesktop && drawRepeat > 1 ? ` x${drawRepeat}` : '';
|
||||
|
||||
const micIsRunning = !!speechInterimResult;
|
||||
// more mic way below, as we use complex hooks
|
||||
@@ -216,18 +229,14 @@ export function Composer(props: {
|
||||
|
||||
// tokens derived state
|
||||
|
||||
const tokensComposerTextDebounced = React.useMemo(() => {
|
||||
return (debouncedText && props.chatLLM)
|
||||
? estimateTextTokens(debouncedText, props.chatLLM, 'composer text')
|
||||
: 0;
|
||||
}, [props.chatLLM, debouncedText]);
|
||||
let tokensComposer = tokensComposerTextDebounced + (llmAttachmentDraftsCollection.llmTokenCountApprox || 0);
|
||||
const tokensComposerTextDebounced = useTextTokenCount(composeText, props.chatLLM, 800, 1600);
|
||||
let tokensComposer = (tokensComposerTextDebounced ?? 0) + (llmAttachmentDraftsCollection.llmTokenCountApprox || 0);
|
||||
if (props.chatLLM && tokensComposer > 0)
|
||||
tokensComposer += glueForMessageTokens(props.chatLLM);
|
||||
const tokensHistory = _historyTokenCount;
|
||||
const tokensResponseMax = getModelParameterValueOrThrow('llmResponseTokens', props.chatLLM?.initialParameters, props.chatLLM?.userParameters, 0) ?? 0;
|
||||
const tokenLimit = props.chatLLM?.contextTokens || 0;
|
||||
const tokenChatPricing = props.chatLLM?.pricing?.chat;
|
||||
const tokenLimit = getLLMContextTokens(props.chatLLM) ?? 0;
|
||||
const tokenChatPricing = React.useMemo(() => llmChatPricing_adjusted(props.chatLLM), [props.chatLLM]);
|
||||
|
||||
|
||||
// Effect: load initial text if queued up (e.g. by /link/share_targetF)
|
||||
@@ -238,6 +247,13 @@ export function Composer(props: {
|
||||
}
|
||||
}, [setComposeText, setStartupText, startupText]);
|
||||
|
||||
// Effect: notify the parent of presence/absence of content
|
||||
const isContentful = composeText.length > 0 || !!attachmentDrafts.length;
|
||||
const { onComposerHasContent } = props;
|
||||
React.useEffect(() => {
|
||||
onComposerHasContent?.(isContentful);
|
||||
}, [isContentful, onComposerHasContent]);
|
||||
|
||||
|
||||
// Overlay actions
|
||||
|
||||
@@ -298,9 +314,9 @@ export function Composer(props: {
|
||||
// prepare the fragments: content (if any) and attachments (if allowed, and any)
|
||||
const fragments: (DMessageContentFragment | DMessageAttachmentFragment)[] = [];
|
||||
if (composerText)
|
||||
fragments.push(createTextContentFragment(composerText));
|
||||
fragments.push(createTextContentFragment(composerText + composerTextSuffix));
|
||||
|
||||
const canAttach = chatExecuteModeCanAttach(_chatExecuteMode);
|
||||
const canAttach = chatExecuteModeCanAttach(_chatExecuteMode, props.capabilityHasT2IEdit);
|
||||
if (canAttach) {
|
||||
const attachmentFragments = await attachmentsTakeAllFragments('global', 'app-chat');
|
||||
fragments.push(...attachmentFragments);
|
||||
@@ -319,7 +335,7 @@ export function Composer(props: {
|
||||
if (enqueued)
|
||||
_handleClearText();
|
||||
return enqueued;
|
||||
}, [attachmentsTakeAllFragments, confirmProceedIfAttachmentsNotSupported, _handleClearText, inReferenceTo, onAction, targetConversationId]);
|
||||
}, [targetConversationId, confirmProceedIfAttachmentsNotSupported, composerTextSuffix, props.capabilityHasT2IEdit, inReferenceTo, onAction, _handleClearText, attachmentsTakeAllFragments]);
|
||||
|
||||
const handleSendAction = React.useCallback(async (chatExecuteMode: ChatExecuteMode, composerText: string): Promise<boolean> => {
|
||||
setSendStarted(true);
|
||||
@@ -445,8 +461,13 @@ export function Composer(props: {
|
||||
addSnackbar({ key: 'chat-mic-running', message: 'Please wait for the microphone to finish.', type: 'info' });
|
||||
return;
|
||||
}
|
||||
await handleSendAction('beam-content', composeText); // 'beam' button
|
||||
}, [composeText, handleSendAction, micIsRunning]);
|
||||
if (composeText) {
|
||||
await handleSendAction('beam-content', composeText); // 'beam' button
|
||||
} else {
|
||||
if (targetConversationId)
|
||||
void onConversationBeamEdit(targetConversationId); // beam-edit conversation
|
||||
}
|
||||
}, [composeText, handleSendAction, micIsRunning, onConversationBeamEdit, targetConversationId]);
|
||||
|
||||
const handleStopClicked = React.useCallback(() => {
|
||||
targetConversationId && abortConversationTemp(targetConversationId);
|
||||
@@ -493,7 +514,7 @@ export function Composer(props: {
|
||||
const cHandler = ConversationsManager.getHandler(conversationId);
|
||||
const messageToEmbed = cHandler.historyFindMessageOrThrow(messageId);
|
||||
if (messageToEmbed) {
|
||||
const fragmentsCopy = duplicateDMessageFragmentsNoVoid(messageToEmbed.fragments); // [attach] deep copy a message's fragments to attach to ego
|
||||
const fragmentsCopy = duplicateDMessageFragments(messageToEmbed.fragments, true); // [attach] deep copy a message's fragments to attach to ego
|
||||
if (fragmentsCopy.length) {
|
||||
const chatTitle = cHandler.title() ?? '';
|
||||
const messageText = messageFragmentsReduceText(fragmentsCopy);
|
||||
@@ -527,6 +548,9 @@ export function Composer(props: {
|
||||
|
||||
// Enter: primary action
|
||||
if (e.key === 'Enter') {
|
||||
// Skip if composing (e.g., CJK input methods) - issue #784
|
||||
if (e.nativeEvent.isComposing)
|
||||
return;
|
||||
|
||||
// Alt (Windows) or Option (Mac) + Enter: append the message instead of sending it
|
||||
if (e.altKey && !e.metaKey && !e.ctrlKey) {
|
||||
@@ -600,7 +624,9 @@ export function Composer(props: {
|
||||
links.forEach(link => void attachAppendUrl('input-link', link.url));
|
||||
}, [attachAppendUrl]);
|
||||
|
||||
const { openWebInputDialog, webInputDialogComponent } = useWebInputModal(handleAttachWebLinks);
|
||||
const { openWebInputDialog, webInputDialogComponent } = useWebInputModal(handleAttachWebLinks, composeText);
|
||||
|
||||
const { openGoogleDrivePicker, googleDrivePickerComponent } = useGoogleDrivePicker(attachAppendCloudFile, isMobile);
|
||||
|
||||
|
||||
// Attachments Down
|
||||
@@ -630,8 +656,12 @@ export function Composer(props: {
|
||||
const composerShortcuts: ShortcutObject[] = [];
|
||||
if (showChatAttachments) {
|
||||
composerShortcuts.push({ key: 'f', ctrl: true, shift: true, action: () => openFileForAttaching(true, handleAttachFiles), description: 'Attach File' });
|
||||
composerShortcuts.push({ key: 'l', ctrl: true, shift: true, action: openWebInputDialog, description: 'Attach Link' });
|
||||
if (supportsClipboardRead())
|
||||
composerShortcuts.push({ key: 'v', ctrl: true, shift: true, action: attachAppendClipboardItems, description: 'Attach Clipboard' });
|
||||
// Future: keep reactive state here to support Live Screen Capture and more
|
||||
// if (labsAttachScreenCapture && supportsScreenCapture)
|
||||
// composerShortcuts.push({ key: 's', ctrl: true, shift: true, action: openScreenCaptureDialog, description: 'Attach Screen Capture' });
|
||||
}
|
||||
if (recognitionState.isActive) {
|
||||
composerShortcuts.push({ key: 'm', ctrl: true, action: handleFinishMicAndSend, description: 'Mic · Send', disabled: !recognitionState.hasSpeech || sendStarted, endDecoratorIcon: TelegramIcon as any, level: 4 });
|
||||
@@ -650,7 +680,7 @@ export function Composer(props: {
|
||||
}, description: 'Microphone',
|
||||
});
|
||||
return composerShortcuts;
|
||||
}, [attachAppendClipboardItems, handleAttachFiles, handleFinishMicAndSend, recognitionState.hasSpeech, recognitionState.isActive, sendStarted, showChatAttachments, toggleRecognition]));
|
||||
}, [attachAppendClipboardItems, handleAttachFiles, handleFinishMicAndSend, openWebInputDialog, recognitionState.hasSpeech, recognitionState.isActive, sendStarted, showChatAttachments, toggleRecognition]));
|
||||
|
||||
|
||||
// ...
|
||||
@@ -662,7 +692,7 @@ export function Composer(props: {
|
||||
const isDraw = chatExecuteMode === 'generate-image';
|
||||
|
||||
const showChatInReferenceTo = !!inReferenceTo?.length;
|
||||
const showChatExtras = isText && !showChatInReferenceTo;
|
||||
const showChatExtras = isText && !showChatInReferenceTo && !assistantAbortible && composerQuickButton !== 'off';
|
||||
|
||||
const sendButtonVariant: VariantProp = (isAppend || (isMobile && isTextBeam)) ? 'outlined' : 'solid';
|
||||
|
||||
@@ -678,13 +708,15 @@ export function Composer(props: {
|
||||
: isAppend ? <SendIcon sx={{ fontSize: 18 }} />
|
||||
: isReAct ? <PsychologyIcon />
|
||||
: isTextBeam ? <ChatBeamIcon /> /* <GavelIcon /> */
|
||||
: isDraw ? <FormatPaintTwoToneIcon />
|
||||
: isDraw ? <PhPaintBrush />
|
||||
: <TelegramIcon />;
|
||||
|
||||
const beamButtonColor: ColorPaletteProp | undefined =
|
||||
!llmAttachmentDraftsCollection.canAttachAllFragments ? 'warning'
|
||||
: undefined;
|
||||
|
||||
const showTint: ColorPaletteProp | undefined = isDraw ? 'warning' : isReAct ? 'success' : undefined;
|
||||
|
||||
// stable randomization of the /verb, between '/draw', '/react'
|
||||
const placeholderAction = React.useMemo(() => {
|
||||
const actions: string[] = ['/react'];
|
||||
@@ -704,13 +736,13 @@ export function Composer(props: {
|
||||
+ (recognitionState.isAvailable ? ' · ramble' : '')
|
||||
+ '...';
|
||||
|
||||
if (isDesktop && timeToShowTips) {
|
||||
if (isDesktop && timeToShowTips && !isDraw) {
|
||||
if (explainShiftEnter)
|
||||
textPlaceholder += !enterIsNewline ? '\n\n💡 Shift + Enter to add a new line' : '\n\n💡 Shift + Enter to send';
|
||||
else if (explainAltEnter)
|
||||
textPlaceholder += platformAwareKeystrokes('\n\n💡 Tip: Alt + Enter to just append the message');
|
||||
textPlaceholder += !enterIsNewline ? '\n\n⏎ Shift + Enter to add a new line' : '\n\n➤ Shift + Enter to send';
|
||||
// else if (explainAltEnter)
|
||||
// textPlaceholder += platformAwareKeystrokes('\n\n⭳ Tip: Alt + Enter to just append the message');
|
||||
else if (explainCtrlEnter)
|
||||
textPlaceholder += platformAwareKeystrokes('\n\n💡 Tip: Ctrl + Enter to beam');
|
||||
textPlaceholder += platformAwareKeystrokes('\n\n⫷ Tip: Ctrl + Enter to beam');
|
||||
}
|
||||
|
||||
const stableGridSx: SxProps = React.useMemo(() => ({
|
||||
@@ -721,9 +753,14 @@ export function Composer(props: {
|
||||
}), [dragContainerSx]);
|
||||
|
||||
return (
|
||||
<Box aria-label='User Message' component='section' sx={props.sx}>
|
||||
<Box
|
||||
aria-label='New Message'
|
||||
component='section'
|
||||
bgcolor={showTint ? `var(--joy-palette-${showTint}-softBg)` : themeBgAppChatComposer}
|
||||
sx={props.sx}
|
||||
>
|
||||
|
||||
{!isMobile && labsShowShortcutBar && <StatusBar toggleMinimized={handleToggleMinimized} isMinimized={isMinimized} />}
|
||||
{!isMobile && labsShowShortcutBar && <StatusBarMemo toggleMinimized={handleToggleMinimized} isMinimized={isMinimized} />}
|
||||
|
||||
{/* This container is here just to let the potential statusbar fill the whole space, so we moved the padding here and not in the parent */}
|
||||
<Box sx={(!isMinimized || isMobile || !labsShowShortcutBar) ? paddingBoxSx : minimizedSx}>
|
||||
@@ -744,13 +781,16 @@ export function Composer(props: {
|
||||
<Box sx={{ flexGrow: 0, display: 'grid', gap: 1, alignSelf: 'flex-start' }}>
|
||||
|
||||
{/* [mobile] Mic button */}
|
||||
{recognitionState.isAvailable && <ButtonMicMemo variant={micVariant} color={micColor} errorMessage={recognitionState.errorMessage} onClick={handleToggleMic} />}
|
||||
{recognitionState.isAvailable && <ButtonMicMemo variant={micVariant} color={micColor === 'danger' ? 'danger' : showTint || micColor} errorMessage={recognitionState.errorMessage} onClick={handleToggleMic} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{showChatAttachments && <ButtonAttachCameraMemo isMobile onOpenCamera={openCamera} />}
|
||||
{showChatAttachments && <ButtonAttachCameraMemo color={showTint} isMobile onOpenCamera={openCamera} />}
|
||||
|
||||
{/* [mobile] Attach file button (in draw with image mode) */}
|
||||
{showChatAttachments === 'only-images' && <ButtonAttachFilesMemo color={showTint} isMobile onAttachFiles={handleAttachFiles} fullWidth multiple />}
|
||||
|
||||
{/* [mobile] [+] button */}
|
||||
{showChatAttachments && (
|
||||
{showChatAttachments === true && (
|
||||
<Dropdown>
|
||||
<MenuButton slots={{ root: IconButton }}>
|
||||
<AddCircleOutlineIcon />
|
||||
@@ -767,6 +807,11 @@ export function Composer(props: {
|
||||
<ButtonAttachWebMemo disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />
|
||||
</MenuItem>
|
||||
|
||||
{/* Responsive Google Drive button */}
|
||||
{hasGoogleDriveCapability && <MenuItem>
|
||||
<ButtonAttachGoogleDriveMemo onOpenGoogleDrivePicker={openGoogleDrivePicker} fullWidth />
|
||||
</MenuItem>}
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && <MenuItem>
|
||||
<ButtonAttachClipboardMemo onAttachClipboard={attachAppendClipboardItems} />
|
||||
@@ -791,19 +836,22 @@ export function Composer(props: {
|
||||
{/*</FormHelperText>*/}
|
||||
|
||||
{/* Responsive Open Files button */}
|
||||
<ButtonAttachFilesMemo onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
<ButtonAttachFilesMemo color={showTint} onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
|
||||
{/* Responsive Web button */}
|
||||
<ButtonAttachWebMemo disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />
|
||||
{showChatAttachments !== 'only-images' && <ButtonAttachWebMemo color={showTint} disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />}
|
||||
|
||||
{/* Responsive Google Drive button */}
|
||||
{hasGoogleDriveCapability && showChatAttachments !== 'only-images' && <ButtonAttachGoogleDriveMemo color={showTint} onOpenGoogleDrivePicker={openGoogleDrivePicker} />}
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && <ButtonAttachClipboardMemo onAttachClipboard={attachAppendClipboardItems} />}
|
||||
{supportsClipboardRead() && showChatAttachments !== 'only-images' && <ButtonAttachClipboardMemo color={showTint} onAttachClipboard={attachAppendClipboardItems} />}
|
||||
|
||||
{/* Responsive Screen Capture button */}
|
||||
{labsAttachScreenCapture && supportsScreenCapture && <ButtonAttachScreenCaptureMemo onAttachScreenCapture={handleAttachScreenCapture} />}
|
||||
{labsAttachScreenCapture && supportsScreenCapture && <ButtonAttachScreenCaptureMemo color={showTint} onAttachScreenCapture={handleAttachScreenCapture} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{labsCameraDesktop && <ButtonAttachCameraMemo onOpenCamera={openCamera} />}
|
||||
{labsCameraDesktop && <ButtonAttachCameraMemo color={showTint} onOpenCamera={openCamera} />}
|
||||
|
||||
</Box>)}
|
||||
|
||||
@@ -827,8 +875,8 @@ export function Composer(props: {
|
||||
<Textarea
|
||||
variant='outlined'
|
||||
color={isDraw ? 'warning' : isReAct ? 'success' : undefined}
|
||||
autoFocus
|
||||
minRows={isMobile ? 4 : agiAttachmentPrompts.hasData ? 3 : showChatInReferenceTo ? 4 : 5}
|
||||
autoFocus={isDesktop}
|
||||
minRows={isMobile ? 3.5 : isDraw ? 4 : agiAttachmentPrompts.hasData ? 3 : showChatInReferenceTo ? 4 : 5}
|
||||
maxRows={isMobile ? 8 : 10}
|
||||
placeholder={textPlaceholder}
|
||||
value={composeText}
|
||||
@@ -837,8 +885,12 @@ export function Composer(props: {
|
||||
onPasteCapture={handleAttachCtrlV}
|
||||
// onFocusCapture={handleFocusModeOn}
|
||||
// onBlurCapture={handleFocusModeOff}
|
||||
endDecorator={
|
||||
<ComposerTextAreaActions
|
||||
endDecorator={isDraw
|
||||
? <ComposerTextAreaDrawActions
|
||||
composerText={composeText}
|
||||
onReplaceText={setComposeText}
|
||||
/>
|
||||
: <ComposerTextAreaActions
|
||||
agiAttachmentPrompts={agiAttachmentPrompts}
|
||||
inReferenceTo={inReferenceTo}
|
||||
onAppendAndSend={handleAppendTextAndSend}
|
||||
@@ -847,6 +899,7 @@ export function Composer(props: {
|
||||
}
|
||||
slotProps={{
|
||||
textarea: {
|
||||
tabIndex: !recognitionState.isActive ? undefined : -1,
|
||||
height: '100%',
|
||||
enterKeyHint: enterIsNewline ? 'enter' : 'send',
|
||||
sx: {
|
||||
@@ -858,17 +911,17 @@ export function Composer(props: {
|
||||
}}
|
||||
sx={{
|
||||
height: '100%',
|
||||
backgroundColor: 'background.level1',
|
||||
backgroundColor: showTint ? undefined : 'background.level1',
|
||||
'&:focus-within': { backgroundColor: 'background.popup', '.within-composer-focus': { backgroundColor: 'background.popup' } },
|
||||
lineHeight: lineHeightTextareaMd,
|
||||
}} />
|
||||
|
||||
{!showChatInReferenceTo && tokenLimit > 0 && (tokensComposer > 0 || (tokensHistory + tokensResponseMax) > 0) && (
|
||||
{!showChatInReferenceTo && !isDraw && tokenLimit > 0 && (tokensComposer > 0 || (tokensHistory + tokensResponseMax) > 0) && (
|
||||
<TokenProgressbarMemo chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} />
|
||||
)}
|
||||
|
||||
{!showChatInReferenceTo && tokenLimit > 0 && (
|
||||
<TokenBadgeMemo hideBelowDollars={0.0001} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} showCost={labsShowCost} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
{!showChatInReferenceTo && !isDraw && tokenLimit > 0 && (
|
||||
<TokenBadgeMemo hideBelowDollars={0.01} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} showCost={labsShowCost} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
)}
|
||||
|
||||
</Box>
|
||||
@@ -936,7 +989,7 @@ export function Composer(props: {
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
}}>
|
||||
{!!debouncedText && <span className='preceding'>{debouncedText.endsWith(' ') ? debouncedText : debouncedText + ' '}</span>}
|
||||
{!!composeText && <span className='preceding'>{composeText.endsWith(' ') ? composeText : composeText + ' '}</span>}
|
||||
{speechInterimResult.transcript}
|
||||
<span className={speechInterimResult.interimTranscript === PLACEHOLDER_INTERIM_TRANSCRIPT ? 'placeholder' : 'interim'}>{speechInterimResult.interimTranscript}</span>
|
||||
</Typography>
|
||||
@@ -971,7 +1024,9 @@ export function Composer(props: {
|
||||
|
||||
{/* [mobile] bottom-corner secondary button */}
|
||||
{isMobile && (showChatExtras
|
||||
? <ButtonCallMemo isMobile disabled={noConversation || noLLM} onClick={handleCallClicked} />
|
||||
? (composerQuickButton === 'call'
|
||||
? <ButtonCallMemo isMobile disabled={noConversation || noLLM} onClick={handleCallClicked} />
|
||||
: <ButtonBeamMemo isMobile disabled={noConversation /*|| noLLM*/} color={beamButtonColor} hasContent={!!composeText} onClick={handleSendTextBeamClicked} />)
|
||||
: isDraw
|
||||
? <ButtonOptionsDraw isMobile onClick={handleDrawOptionsClicked} sx={{ mr: { xs: 1, md: 2 } }} />
|
||||
: <IconButton disabled sx={{ mr: { xs: 1, md: 2 } }} />
|
||||
@@ -991,7 +1046,7 @@ export function Composer(props: {
|
||||
<Button
|
||||
key='composer-act'
|
||||
fullWidth
|
||||
disabled={noConversation || noLLM}
|
||||
disabled={noConversation /* || noLLM*/}
|
||||
loading={sendStarted}
|
||||
loadingPosition='end'
|
||||
onClick={handleSendClicked}
|
||||
@@ -1022,16 +1077,17 @@ export function Composer(props: {
|
||||
{/*</Tooltip>}*/}
|
||||
|
||||
{/* [Draw] Imagine */}
|
||||
{isDraw && !!composeText && <Tooltip title='Generate an image prompt'>
|
||||
<IconButton variant='outlined' disabled={noConversation || noLLM} onClick={handleTextImagineClicked}>
|
||||
<AutoAwesomeIcon />
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{/* NOTE: disabled: as we have prompt enhancement in the TextArea (Draw Mode) already */}
|
||||
{/*{isDraw && !!composeText && <Tooltip title='Generate an image prompt'>*/}
|
||||
{/* <IconButton variant='outlined' disabled={noConversation || noLLM} onClick={handleTextImagineClicked}>*/}
|
||||
{/* <AutoAwesomeIcon />*/}
|
||||
{/* </IconButton>*/}
|
||||
{/*</Tooltip>}*/}
|
||||
|
||||
{/* Mode expander */}
|
||||
<IconButton
|
||||
variant={assistantAbortible ? 'soft' : isDraw ? undefined : undefined}
|
||||
disabled={noConversation || noLLM || chatExecuteMenuShown}
|
||||
variant={chatExecuteMenuShown ? 'outlined' : assistantAbortible ? 'soft' : isDraw ? undefined : undefined}
|
||||
disabled={noConversation /*|| chatExecuteMenuShown*/}
|
||||
onClick={showChatExecuteMenu}
|
||||
>
|
||||
<ExpandLessIcon />
|
||||
@@ -1042,7 +1098,7 @@ export function Composer(props: {
|
||||
{isDesktop && showChatExtras && !assistantAbortible && (
|
||||
<ButtonBeamMemo
|
||||
color={beamButtonColor}
|
||||
disabled={noConversation || noLLM}
|
||||
disabled={noConversation /*|| noLLM*/}
|
||||
hasContent={!!composeText}
|
||||
onClick={handleSendTextBeamClicked}
|
||||
/>
|
||||
@@ -1050,6 +1106,9 @@ export function Composer(props: {
|
||||
|
||||
</Box>
|
||||
|
||||
{/* [desktop] Draw mode N buttons */}
|
||||
{isDesktop && isDraw && <ButtonGroupDrawRepeat drawRepeat={drawRepeat} setDrawRepeat={setDrawRepeat} />}
|
||||
|
||||
{/* [desktop] Multicast switch (under the Chat button) */}
|
||||
{isDesktop && props.isMulticast !== null && <ButtonMultiChatMemo multiChat={props.isMulticast} onSetMultiChat={props.setIsMulticast} />}
|
||||
|
||||
@@ -1080,6 +1139,9 @@ export function Composer(props: {
|
||||
{/* Camera (when open) */}
|
||||
{cameraCaptureComponent}
|
||||
|
||||
{/* Google Drive Picker (when open) */}
|
||||
{googleDrivePickerComponent}
|
||||
|
||||
{/* Web Input Dialog (when open) */}
|
||||
{webInputDialogComponent}
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import * as React from 'react';
|
||||
import { Controller, useFieldArray, useForm } from 'react-hook-form';
|
||||
|
||||
import { Box, Button, FormControl, FormHelperText, IconButton, Input, Stack, Typography } from '@mui/joy';
|
||||
import { Box, Button, Chip, FormControl, FormHelperText, IconButton, Input, Stack, Typography } from '@mui/joy';
|
||||
import AddCircleOutlineRoundedIcon from '@mui/icons-material/AddCircleOutlineRounded';
|
||||
import AddIcon from '@mui/icons-material/Add';
|
||||
import BrowserUpdatedOutlinedIcon from '@mui/icons-material/BrowserUpdatedOutlined';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import LanguageRoundedIcon from '@mui/icons-material/LanguageRounded';
|
||||
import YouTubeIcon from '@mui/icons-material/YouTube';
|
||||
@@ -11,7 +13,7 @@ import { extractYoutubeVideoIDFromURL } from '~/modules/youtube/youtube.utils';
|
||||
|
||||
import { GoodModal } from '~/common/components/modals/GoodModal';
|
||||
import { addSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
|
||||
import { asValidURL } from '~/common/util/urlUtils';
|
||||
import { asValidURL, extractUrlsFromText } from '~/common/util/urlUtils';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -26,8 +28,25 @@ type WebInputModalInputs = {
|
||||
links: WebInputData[];
|
||||
}
|
||||
|
||||
const _styles = {
|
||||
|
||||
ytIcon: {
|
||||
color: 'red',
|
||||
} as const,
|
||||
|
||||
chipLink: {
|
||||
ml: 'auto',
|
||||
pr: 1.125,
|
||||
// '--Chip-radius': '4px',
|
||||
// whiteSpace: 'break-spaces',
|
||||
// gap: 1.5,
|
||||
} as const,
|
||||
|
||||
} as const;
|
||||
|
||||
|
||||
function WebInputModal(props: {
|
||||
composerText?: string,
|
||||
onClose: () => void,
|
||||
onWebLinks: (urls: WebInputData[]) => void,
|
||||
}) {
|
||||
@@ -35,13 +54,31 @@ function WebInputModal(props: {
|
||||
// state
|
||||
const { control: formControl, handleSubmit: formHandleSubmit, formState: { isValid: formIsValid, isDirty: formIsDirty } } = useForm<WebInputModalInputs>({
|
||||
values: { links: [{ url: '' }] },
|
||||
// mode: 'onChange', // validate on change
|
||||
mode: 'onChange', // validate on change
|
||||
});
|
||||
const { fields: formFields, append: formFieldsAppend, remove: formFieldsRemove } = useFieldArray({ control: formControl, name: 'links' });
|
||||
const { fields: formFields, append: formFieldsAppend, remove: formFieldsRemove, update: formFieldsUpdate } = useFieldArray({ control: formControl, name: 'links' });
|
||||
const firstInputRef = React.useRef<HTMLInputElement>(null);
|
||||
|
||||
// derived
|
||||
const urlFieldCount = formFields.length;
|
||||
const canAddMoreUrls = urlFieldCount < MAX_URLS;
|
||||
|
||||
// [effect] auto-focus first input
|
||||
React.useEffect(() => {
|
||||
setTimeout(() => {
|
||||
if (firstInputRef.current)
|
||||
firstInputRef.current.focus();
|
||||
}, 0);
|
||||
}, []);
|
||||
|
||||
|
||||
// memos
|
||||
|
||||
const extractedComposerUrls = React.useMemo(() => {
|
||||
return !props.composerText ? null : extractUrlsFromText(props.composerText);
|
||||
}, [props.composerText]);
|
||||
|
||||
const extractedUrlsCount = extractedComposerUrls?.length ?? 0;
|
||||
|
||||
// handlers
|
||||
|
||||
@@ -70,6 +107,46 @@ function WebInputModal(props: {
|
||||
}, [handleClose, onWebLinks]);
|
||||
|
||||
|
||||
// const handleAddUrl = React.useCallback((newUrl: string) => {
|
||||
// // bail if can't add
|
||||
// if (!canAddMoreUrls)
|
||||
// return addSnackbar({ key: 'max-urls', message: `Maximum ${MAX_URLS} URLs allowed`, type: 'precondition-fail' });
|
||||
//
|
||||
// // bail if already in
|
||||
// const exists = formFields.some(({ url }) => url === newUrl);
|
||||
// if (exists)
|
||||
// return addSnackbar({ key: 'duplicate-url', message: 'URL already added', type: 'info' });
|
||||
//
|
||||
// // replace the first empty field, or append
|
||||
// const emptyFieldIndex = formFields.findIndex(field => !field.url.trim());
|
||||
// if (emptyFieldIndex >= 0)
|
||||
// formFieldsUpdate(emptyFieldIndex, { url: newUrl });
|
||||
// else
|
||||
// formFieldsAppend({ url: newUrl });
|
||||
// }, [canAddMoreUrls, formFields, formFieldsAppend, formFieldsUpdate]);
|
||||
|
||||
|
||||
const handleAddAllUrls = React.useCallback(() => {
|
||||
if (!extractedComposerUrls) return;
|
||||
|
||||
// new URLs that are not already in the form
|
||||
const newURLs = extractedComposerUrls.filter(url => !formFields.some(field => field.url.trim() === url));
|
||||
if (!newURLs.length) return;
|
||||
|
||||
// find empty fields first
|
||||
for (let i = 0; i < formFields.length; i++) {
|
||||
const field = formFields[i];
|
||||
if (!field.url.trim()) {
|
||||
formFieldsUpdate(i, { url: newURLs.shift()! });
|
||||
if (!newURLs.length) break;
|
||||
}
|
||||
}
|
||||
|
||||
// append remaining
|
||||
newURLs.forEach(url => formFieldsAppend({ url }));
|
||||
}, [extractedComposerUrls, formFields, formFieldsAppend, formFieldsUpdate]);
|
||||
|
||||
|
||||
return (
|
||||
<GoodModal
|
||||
open
|
||||
@@ -89,6 +166,26 @@ function WebInputModal(props: {
|
||||
{/*You can add up to {MAX_URLS} URLs.*/}
|
||||
</Typography>
|
||||
|
||||
|
||||
{/* Modified URLs section */}
|
||||
{!!extractedUrlsCount && (
|
||||
<Box sx={{ display: 'flex', alignItems: 'center', gap: 1 }}>
|
||||
<Typography level='title-sm' startDecorator={<BrowserUpdatedOutlinedIcon />}>
|
||||
{extractedUrlsCount} URL{extractedUrlsCount > 1 ? 's' : ''} in your message
|
||||
{/*{extractedUrlsCount} URL{extractedUrlsCount > 1 ? 's' : ''} found in your message*/}
|
||||
</Typography>
|
||||
<Chip
|
||||
variant='soft'
|
||||
onClick={handleAddAllUrls}
|
||||
startDecorator={<AddCircleOutlineRoundedIcon />}
|
||||
sx={_styles.chipLink}
|
||||
>
|
||||
Add
|
||||
</Chip>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
|
||||
<form onSubmit={formHandleSubmit(handleSubmit)}>
|
||||
<Stack spacing={1}>
|
||||
{formFields.map((field, index) => (
|
||||
@@ -101,12 +198,16 @@ function WebInputModal(props: {
|
||||
<FormControl error={!!error}>
|
||||
<Box sx={{ display: 'flex', gap: 1 }}>
|
||||
<Input
|
||||
autoFocus={index === 0}
|
||||
required={index === 0}
|
||||
placeholder='https://...'
|
||||
endDecorator={extractYoutubeVideoIDFromURL(value) ? <YouTubeIcon sx={{ color: 'red' }} /> : undefined}
|
||||
endDecorator={extractYoutubeVideoIDFromURL(value) ? <YouTubeIcon sx={_styles.ytIcon} /> : undefined}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
slotProps={index !== 0 ? undefined : {
|
||||
input: {
|
||||
ref: firstInputRef,
|
||||
},
|
||||
}}
|
||||
sx={{ flex: 1 }}
|
||||
/>
|
||||
{urlFieldCount > 1 && (
|
||||
@@ -133,7 +234,7 @@ function WebInputModal(props: {
|
||||
{formIsDirty && <Button
|
||||
color='neutral'
|
||||
variant='soft'
|
||||
disabled={urlFieldCount >= MAX_URLS}
|
||||
disabled={!canAddMoreUrls}
|
||||
onClick={() => formFieldsAppend({ url: '' })}
|
||||
startDecorator={<AddIcon />}
|
||||
>
|
||||
@@ -147,7 +248,7 @@ function WebInputModal(props: {
|
||||
disabled={!formIsValid || !formIsDirty}
|
||||
sx={{ minWidth: 160, ml: 'auto' }}
|
||||
>
|
||||
Add {urlFieldCount > 1 ? `(${urlFieldCount})` : ''}
|
||||
Import {urlFieldCount > 1 ? `(${urlFieldCount})` : ''}
|
||||
</Button>
|
||||
|
||||
</Box>
|
||||
@@ -158,15 +259,20 @@ function WebInputModal(props: {
|
||||
}
|
||||
|
||||
|
||||
export function useWebInputModal(onAttachWebLinks: (urls: WebInputData[]) => void) {
|
||||
export function useWebInputModal(onAttachWebLinks: (urls: WebInputData[]) => void, composerText?: string) {
|
||||
|
||||
// state
|
||||
const [open, setOpen] = React.useState(false);
|
||||
const composerTextRef = React.useRef(composerText);
|
||||
|
||||
// copy the text to a ref, constantly - we just care about a recent snapshot, but don't want to invalidate hooks
|
||||
composerTextRef.current = composerText;
|
||||
|
||||
const openWebInputDialog = React.useCallback(() => setOpen(true), []);
|
||||
|
||||
const webInputDialogComponent = React.useMemo(() => open && (
|
||||
<WebInputModal
|
||||
composerText={composerTextRef.current}
|
||||
onClose={() => setOpen(false)}
|
||||
onWebLinks={onAttachWebLinks}
|
||||
/>
|
||||
|
||||
@@ -38,6 +38,7 @@ export function ActilePopup(props: {
|
||||
maxHeightGapPx={320}
|
||||
minWidth={320}
|
||||
noBottomPadding
|
||||
noAutoFocus={true /* we control keyboard navigation */}
|
||||
noTopPadding
|
||||
>
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { ActileItem, ActileProvider } from './ActileProvider';
|
||||
import { ActilePopup } from './ActilePopup';
|
||||
|
||||
|
||||
export const useActileManager = (providers: ActileProvider[], anchorRef: React.RefObject<HTMLElement>) => {
|
||||
export const useActileManager = (providers: ActileProvider[], anchorRef: React.RefObject<HTMLElement | null>) => {
|
||||
|
||||
// state
|
||||
const [popupOpen, setPopupOpen] = React.useState(false);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import { Box, Button, ColorPaletteProp, IconButton, Tooltip } from '@mui/joy';
|
||||
import AddAPhotoIcon from '@mui/icons-material/AddAPhoto';
|
||||
import CameraAltOutlinedIcon from '@mui/icons-material/CameraAltOutlined';
|
||||
|
||||
@@ -12,6 +12,7 @@ import { CameraCaptureModal } from '../CameraCaptureModal';
|
||||
export const ButtonAttachCameraMemo = React.memo(ButtonAttachCamera);
|
||||
|
||||
function ButtonAttachCamera(props: {
|
||||
color?: ColorPaletteProp,
|
||||
isMobile?: boolean,
|
||||
disabled?: boolean,
|
||||
fullWidth?: boolean,
|
||||
@@ -19,7 +20,7 @@ function ButtonAttachCamera(props: {
|
||||
onOpenCamera: () => void,
|
||||
}) {
|
||||
return props.isMobile ? (
|
||||
<IconButton disabled={props.disabled} onClick={props.onOpenCamera}>
|
||||
<IconButton color={props.color} disabled={props.disabled} onClick={props.onOpenCamera}>
|
||||
<AddAPhotoIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
@@ -30,8 +31,8 @@ function ButtonAttachCamera(props: {
|
||||
</Box>
|
||||
)}>
|
||||
<Button
|
||||
variant='plain'
|
||||
color='neutral'
|
||||
variant={props.color ? 'soft' : 'plain'}
|
||||
color={props.color || 'neutral'}
|
||||
disabled={props.disabled}
|
||||
fullWidth={props.fullWidth}
|
||||
startDecorator={<CameraAltOutlinedIcon />}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import { Box, Button, ColorPaletteProp, IconButton, Tooltip } from '@mui/joy';
|
||||
import ContentPasteGoIcon from '@mui/icons-material/ContentPasteGo';
|
||||
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
@@ -10,6 +10,7 @@ import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
|
||||
export const ButtonAttachClipboardMemo = React.memo(ButtonAttachClipboard);
|
||||
|
||||
function ButtonAttachClipboard(props: {
|
||||
color?: ColorPaletteProp,
|
||||
isMobile?: boolean,
|
||||
disabled?: boolean,
|
||||
fullWidth?: boolean,
|
||||
@@ -17,7 +18,7 @@ function ButtonAttachClipboard(props: {
|
||||
onAttachClipboard: () => void,
|
||||
}) {
|
||||
return props.isMobile ? (
|
||||
<IconButton disabled={props.disabled} onClick={props.onAttachClipboard}>
|
||||
<IconButton color={props.color} disabled={props.disabled} onClick={props.onAttachClipboard}>
|
||||
<ContentPasteGoIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
@@ -29,8 +30,8 @@ function ButtonAttachClipboard(props: {
|
||||
</Box>
|
||||
)}>
|
||||
<Button
|
||||
variant='plain'
|
||||
color='neutral'
|
||||
variant={props.color ? 'soft' : 'plain'}
|
||||
color={props.color || 'neutral'}
|
||||
disabled={props.disabled}
|
||||
fullWidth={props.fullWidth}
|
||||
startDecorator={<ContentPasteGoIcon />}
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, ColorPaletteProp, IconButton, Tooltip } from '@mui/joy';
|
||||
import AddToDriveRoundedIcon from '@mui/icons-material/AddToDriveRounded';
|
||||
|
||||
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
|
||||
|
||||
export const ButtonAttachGoogleDriveMemo = React.memo(ButtonAttachGoogleDrive);
|
||||
|
||||
function ButtonAttachGoogleDrive(props: {
|
||||
color?: ColorPaletteProp,
|
||||
isMobile?: boolean,
|
||||
disabled?: boolean,
|
||||
fullWidth?: boolean,
|
||||
noToolTip?: boolean,
|
||||
onOpenGoogleDrivePicker: () => void,
|
||||
}) {
|
||||
|
||||
const button = props.isMobile ? (
|
||||
<IconButton color={props.color} disabled={props.disabled} onClick={props.onOpenGoogleDrivePicker}>
|
||||
<AddToDriveRoundedIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
<Button
|
||||
variant={props.color ? 'soft' : 'plain'}
|
||||
color={props.color || 'neutral'}
|
||||
disabled={props.disabled}
|
||||
fullWidth={props.fullWidth}
|
||||
startDecorator={<AddToDriveRoundedIcon />}
|
||||
onClick={props.onOpenGoogleDrivePicker}
|
||||
sx={buttonAttachSx.desktop}
|
||||
>
|
||||
Drive
|
||||
</Button>
|
||||
);
|
||||
|
||||
return (props.noToolTip || props.isMobile) ? button : (
|
||||
<Tooltip arrow disableInteractive placement='top-start' title={
|
||||
<Box sx={buttonAttachSx.tooltip}>
|
||||
<b>Add from Google Drive</b><br />
|
||||
Attach files from your Drive
|
||||
</Box>
|
||||
}>
|
||||
{button}
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import { Box, Button, ColorPaletteProp, IconButton, Tooltip } from '@mui/joy';
|
||||
import AddRoundedIcon from '@mui/icons-material/AddRounded';
|
||||
|
||||
import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
|
||||
@@ -9,6 +9,7 @@ import { buttonAttachSx } from '~/common/components/ButtonAttachFiles';
|
||||
export const ButtonAttachNewMemo = React.memo(ButtonAttachNew);
|
||||
|
||||
function ButtonAttachNew(props: {
|
||||
color?: ColorPaletteProp,
|
||||
isMobile?: boolean,
|
||||
disabled?: boolean,
|
||||
fullWidth?: boolean,
|
||||
@@ -16,7 +17,7 @@ function ButtonAttachNew(props: {
|
||||
onAttachNew: () => void,
|
||||
}) {
|
||||
return props.isMobile ? (
|
||||
<IconButton disabled={props.disabled} onClick={props.onAttachNew}>
|
||||
<IconButton color={props.color} disabled={props.disabled} onClick={props.onAttachNew}>
|
||||
<AddRoundedIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
@@ -29,15 +30,15 @@ function ButtonAttachNew(props: {
|
||||
</Box>
|
||||
)}>
|
||||
<Button
|
||||
variant='plain'
|
||||
color='neutral'
|
||||
variant={props.color ? 'soft' : 'plain'}
|
||||
color={props.color || 'neutral'}
|
||||
disabled={props.disabled}
|
||||
fullWidth={props.fullWidth}
|
||||
startDecorator={<AddRoundedIcon />}
|
||||
onClick={props.onAttachNew}
|
||||
sx={buttonAttachSx.desktop}
|
||||
>
|
||||
New
|
||||
Note
|
||||
</Button>
|
||||
</Tooltip>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Button, IconButton, Tooltip } from '@mui/joy';
|
||||
import { Box, Button, ColorPaletteProp, IconButton, Tooltip } from '@mui/joy';
|
||||
import ScreenshotMonitorIcon from '@mui/icons-material/ScreenshotMonitor';
|
||||
|
||||
import { Is } from '~/common/util/pwaUtils';
|
||||
@@ -11,6 +11,7 @@ import { takeScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
export const ButtonAttachScreenCaptureMemo = React.memo(ButtonAttachScreenCapture);
|
||||
|
||||
function ButtonAttachScreenCapture(props: {
|
||||
color?: ColorPaletteProp,
|
||||
isMobile?: boolean,
|
||||
disabled?: boolean,
|
||||
fullWidth?: boolean,
|
||||
@@ -41,7 +42,7 @@ function ButtonAttachScreenCapture(props: {
|
||||
|
||||
|
||||
return props.isMobile ? (
|
||||
<IconButton disabled={props.disabled} onClick={handleTakeScreenCapture}>
|
||||
<IconButton color={props.color} disabled={props.disabled} onClick={handleTakeScreenCapture}>
|
||||
<ScreenshotMonitorIcon />
|
||||
</IconButton>
|
||||
) : (
|
||||
@@ -55,8 +56,8 @@ function ButtonAttachScreenCapture(props: {
|
||||
</Box>
|
||||
)}>
|
||||
<Button
|
||||
variant={capturing ? 'solid' : 'plain'}
|
||||
color={!!error ? 'danger' : 'neutral'}
|
||||
variant={capturing ? 'solid' : props.color ? 'soft' : 'plain'}
|
||||
color={!!error ? 'danger' : props.color || 'neutral'}
|
||||
disabled={props.disabled}
|
||||
fullWidth={props.fullWidth}
|
||||
loading={capturing}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user