Compare commits
1137 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6ae440d252 | |||
| c0c724afc1 | |||
| a265112ce1 | |||
| 75605ed408 | |||
| ad38ff4157 | |||
| 08c60e53b1 | |||
| d0dcb2ac02 | |||
| fbeb604b26 | |||
| c4f3b1df77 | |||
| 5a1f9caaac | |||
| 2fc70d5e95 | |||
| 43adadef78 | |||
| 96f6e7628b | |||
| 32ad82bcee | |||
| 3d72aec369 | |||
| d244ee2cca | |||
| cc8a235ae3 | |||
| ae348812de | |||
| 6053636f66 | |||
| f2e2aee672 | |||
| 11cbb2bbf0 | |||
| 30bd19d6ce | |||
| d0b5c02062 | |||
| 771192e406 | |||
| 13f502bd76 | |||
| 11055b12ca | |||
| d0ea96eec0 | |||
| 02eafc03f1 | |||
| 33d07a0313 | |||
| 763b852148 | |||
| d5b0617fd7 | |||
| e3ce83674c | |||
| 5cc5df6909 | |||
| 11d8cf8996 | |||
| eae578970e | |||
| e076953c6a | |||
| 5c455591ea | |||
| 19b3dcd927 | |||
| 702e27edbf | |||
| 7c872de9af | |||
| 53b18143e7 | |||
| d812813aac | |||
| 9505b7fd7f | |||
| 9e07822598 | |||
| 6d6604a043 | |||
| 64d5071eb4 | |||
| 4a29ff0b19 | |||
| 6acab83ac5 | |||
| a3391b46ec | |||
| 9d021a0ea9 | |||
| 5b35435136 | |||
| 38b1cd1e4b | |||
| 50e4bf30f2 | |||
| 6f8d6462b9 | |||
| 596bb1ccc6 | |||
| 8023d4fd7e | |||
| 5808c5ae27 | |||
| 0945bc1e74 | |||
| c82ea978da | |||
| 9184e28691 | |||
| 59784af72c | |||
| 8feb1881b9 | |||
| 62747e07f1 | |||
| 934511a21f | |||
| e36b71db9c | |||
| 924cd7018f | |||
| d5e91f9ce7 | |||
| f1ad8cd55e | |||
| d177c73642 | |||
| 011bcf8ccd | |||
| 7d0e5809e1 | |||
| b369148057 | |||
| 2e0105b5ed | |||
| 3f24ade8e6 | |||
| 9cdaf26174 | |||
| 3b2c604615 | |||
| 223689316b | |||
| 6456a0de0c | |||
| 57458fb32f | |||
| b2521060cc | |||
| 13b6a1ba7e | |||
| ec81d802d5 | |||
| f6eca257d6 | |||
| e744b1afcd | |||
| bfcae972f7 | |||
| 360f886c37 | |||
| 305c278e1c | |||
| ccfcf6235f | |||
| 62f7d92bb2 | |||
| f8915141c8 | |||
| 7e1e4af19b | |||
| 439c462a9b | |||
| 95aa71abd6 | |||
| 3c829cbf97 | |||
| 29a31d5ca3 | |||
| 4a8bb24c0f | |||
| 6b6c3afe0c | |||
| fd41388584 | |||
| b418b69dc3 | |||
| e1e2962a02 | |||
| f1662e174f | |||
| a73c55fc1f | |||
| 0aa923a99d | |||
| b75160bb2b | |||
| 3d515102a1 | |||
| b857cc18d8 | |||
| 4737d962db | |||
| 7ba71078a8 | |||
| bee0fa8751 | |||
| 5916dfb08d | |||
| 9d13b03923 | |||
| 48e6385ac7 | |||
| cf664ff486 | |||
| 5ccf8ba128 | |||
| 3cd5917207 | |||
| e2dcca274f | |||
| 7369e898af | |||
| 1e2c12fddb | |||
| 4f7369b940 | |||
| f566049890 | |||
| fbc2da8b09 | |||
| af70b39515 | |||
| e080d72e8a | |||
| fd24e3676a | |||
| 942cd461f5 | |||
| 9567e1cbaa | |||
| 2d5d31268e | |||
| b376608709 | |||
| 551e502caf | |||
| 9fb7fcd22f | |||
| 1cda7d195b | |||
| 4a02923dda | |||
| a8a45631c2 | |||
| eaa755d4ce | |||
| 872396a90e | |||
| 6b3a2772cc | |||
| f378733abe | |||
| 0cf8f0439d | |||
| ab53087b3a | |||
| b50923a3b7 | |||
| 1b4a8da313 | |||
| 31684c2fee | |||
| fedd4b1fda | |||
| a41667f427 | |||
| 021fa3b313 | |||
| b7ca69aa0e | |||
| 1efcadbf46 | |||
| 598a6a8e0b | |||
| 1cd441a2f5 | |||
| 783dc55d02 | |||
| 88418d1ed0 | |||
| 6a74d1900f | |||
| 5566e29bcc | |||
| 1f49195251 | |||
| c5e15ece14 | |||
| 7ceb176d70 | |||
| b93bd1bd0b | |||
| 088133ec37 | |||
| 784766442d | |||
| e014a7c828 | |||
| 224e745a71 | |||
| 28ef74f1e9 | |||
| 70091ac39b | |||
| cc1011659d | |||
| 7eaa4a11bd | |||
| 495f25e2d4 | |||
| f2396000f2 | |||
| 77533aa385 | |||
| 01b2bf6fa3 | |||
| 6d7843805e | |||
| 0a593fb2c6 | |||
| 57f277f269 | |||
| 6924e02a17 | |||
| f4b645fd78 | |||
| fdb46d3072 | |||
| 858e9d3cb3 | |||
| 52a9dc7bec | |||
| 16fbd3b6a3 | |||
| aa09e60f5f | |||
| 3b2983831d | |||
| 16e69d0d0b | |||
| 548f52c770 | |||
| 8adac0d193 | |||
| c0d3c6c982 | |||
| c1516e7be0 | |||
| 8473894be2 | |||
| d5e2fbed0e | |||
| 2dfa78fbe0 | |||
| dff83c5ede | |||
| 483f483c4a | |||
| f780daf1b1 | |||
| 5e6e5bf017 | |||
| bfe2882ac3 | |||
| 0574be04f4 | |||
| 53b5da8cb8 | |||
| 5387b17c36 | |||
| 0e854b8772 | |||
| d23f247a8c | |||
| ce13c04e96 | |||
| e55fbe9ad0 | |||
| e5a11af6d2 | |||
| 76f21f8c96 | |||
| ea4d9afff2 | |||
| d884970a02 | |||
| ee11787dcc | |||
| 13e1ba977f | |||
| 7137ebdda2 | |||
| 9b71b08fe1 | |||
| 45a18edac0 | |||
| f1b1ca0a5f | |||
| 0c1718bf9c | |||
| a934ca548e | |||
| 2896bd7287 | |||
| 5ad103a8a2 | |||
| 16916db247 | |||
| 669eb1414f | |||
| 6ed8529d6a | |||
| bb36dbc4b9 | |||
| f9e38c7220 | |||
| 2b5a051a9e | |||
| 9793236941 | |||
| 497d1c9559 | |||
| 75c4fe5e67 | |||
| f4d3d3bd28 | |||
| 853aadaa0e | |||
| 8bf23e121c | |||
| cbffc3f6d5 | |||
| 52fc4ec5d8 | |||
| ab94579a30 | |||
| 43ddc79939 | |||
| 6938c6b8d0 | |||
| ba5d835248 | |||
| 510d58ba69 | |||
| c23b0770bf | |||
| cb4fdc56a5 | |||
| 3b28767212 | |||
| a1d6cb8cd0 | |||
| 0a094ef0b0 | |||
| 17c349af94 | |||
| 97f2a19227 | |||
| 6fc2415e5d | |||
| d68c131bbc | |||
| 0b6c217da6 | |||
| 432d78fc9d | |||
| 769ca1546a | |||
| 989684884c | |||
| a2b6554e73 | |||
| 28555445c9 | |||
| 20bddfe6c6 | |||
| 01243f7422 | |||
| 741edb499c | |||
| a3fd877a75 | |||
| 0c19c4c8ac | |||
| 9ad92c19a6 | |||
| c54185e6eb | |||
| 42fae2f915 | |||
| 48f4dd8573 | |||
| 396e3a4625 | |||
| 348915c420 | |||
| 157dadcae6 | |||
| 89b39b4bec | |||
| c42625c8aa | |||
| ac0e7ad738 | |||
| bdd92e69fc | |||
| f65178c08a | |||
| 3df40f18f8 | |||
| af007699ce | |||
| b8537bc4e7 | |||
| a4c3e57899 | |||
| 065069426b | |||
| 0d1cd45813 | |||
| 090032dccd | |||
| 987458ed63 | |||
| 32bc46c46b | |||
| f3a39ad5d2 | |||
| 98c95bf436 | |||
| a687ddd2a0 | |||
| 2bce8dc31e | |||
| 2c3597f0dd | |||
| 3570d9e9cf | |||
| cb8fab47af | |||
| 58cfff3912 | |||
| d2cdf36186 | |||
| 9237fbaad5 | |||
| c6a20c475f | |||
| 6e0bb6260e | |||
| 321c52351e | |||
| 13d91508c9 | |||
| 7a770659f3 | |||
| b734087d85 | |||
| ae354434e2 | |||
| ae16b03c7f | |||
| a1ac12761d | |||
| 1aabdd4394 | |||
| 0548f6b863 | |||
| 65fc40796b | |||
| 48af71d5f1 | |||
| cafcafb582 | |||
| 29da5383ed | |||
| ba50ff3b90 | |||
| 63a7dd1ce9 | |||
| 552ffb4257 | |||
| 87461fb73e | |||
| 22fac6f3c1 | |||
| 2932e8e89d | |||
| b7ea52701a | |||
| 6d8aa3e989 | |||
| 5a158155c5 | |||
| a30ec5d023 | |||
| eff9be3c99 | |||
| 5a17801c8e | |||
| 76651be12c | |||
| 5c93af6cdc | |||
| 3dbd5158c0 | |||
| 233d92b69d | |||
| bc6bf3195e | |||
| a71588777a | |||
| 8c9445d800 | |||
| 3cecf7c0b5 | |||
| e1128fa38f | |||
| 140412cb8b | |||
| 882b8629d7 | |||
| 7056866841 | |||
| cc6afa9190 | |||
| 93f075c270 | |||
| c2f991678c | |||
| b8c2f1b73b | |||
| 9b939c9a05 | |||
| 150c295370 | |||
| c5f23ce7ca | |||
| f7254fe8f6 | |||
| 32e3a4e547 | |||
| 3622155881 | |||
| 77cc8272c5 | |||
| acff0d0ef5 | |||
| 47cf6fe688 | |||
| 2b937719dd | |||
| 551faa47db | |||
| 692c1ebfda | |||
| 72c6f616f9 | |||
| 1da4b3653e | |||
| 8ef6d1667e | |||
| 961c0b581e | |||
| 3118228a68 | |||
| a47b9b0a55 | |||
| ae0b39c9c0 | |||
| 2d90947cb9 | |||
| 78c1c3bece | |||
| bbce30b24f | |||
| 92009ed6b4 | |||
| 54db3746c7 | |||
| 58c7012314 | |||
| baf0ca2682 | |||
| 191144b010 | |||
| 65d085d169 | |||
| a39e90003e | |||
| 013186a1ad | |||
| 6dd6fb0ce8 | |||
| db590a2b76 | |||
| e58088de24 | |||
| 88dfa60238 | |||
| 03fca4b9f8 | |||
| c5f7b8e0d2 | |||
| 1d18c56810 | |||
| e59e8780b6 | |||
| ea196bb22f | |||
| 47c2d19a70 | |||
| a11ab7cd7c | |||
| b7b25688ac | |||
| c77a6bb670 | |||
| 5c65e888d7 | |||
| 69932b17c9 | |||
| 7fbafa14a2 | |||
| 9b25d89d80 | |||
| 7fb65c260e | |||
| 97f8b03b19 | |||
| 53a71224e6 | |||
| f0ed480e81 | |||
| 8010ca3a6e | |||
| c844a0c319 | |||
| 11f2a22b2e | |||
| 11cdb72370 | |||
| fe09334783 | |||
| 8c7618be49 | |||
| 648ab3e188 | |||
| e5f498c310 | |||
| 278594b543 | |||
| 649bfdc957 | |||
| 251bbcfc5b | |||
| 70e73b2c81 | |||
| 72a93f9ffa | |||
| cc9a6db859 | |||
| 1814e71cbe | |||
| 06e21d6d9a | |||
| f53053d3f6 | |||
| 214983ee82 | |||
| 19e0d36204 | |||
| 64196b29ce | |||
| 5b2e0fbff2 | |||
| 8fa735401d | |||
| e1f8230bc9 | |||
| 47f1fcd3bf | |||
| 73d0f430fa | |||
| fc812654d1 | |||
| e84a9e46c0 | |||
| c354d146ae | |||
| ce2441affe | |||
| c695d4b6d4 | |||
| be7dc82b75 | |||
| 4b5519a134 | |||
| 3dd9e56708 | |||
| a78658aac7 | |||
| 65b46cfe79 | |||
| 5d20b63f98 | |||
| 54288bb2e2 | |||
| b3be1c6e91 | |||
| bdcc0fb09f | |||
| 635e54ae07 | |||
| 58fa4465ce | |||
| 0adc273e0f | |||
| f76d5fa8ea | |||
| 9615ff44af | |||
| db69516d5f | |||
| 6e93b125d5 | |||
| a187a89444 | |||
| a4c11646af | |||
| 0a73eb2ca6 | |||
| b25dc4dbea | |||
| a268f621eb | |||
| 247b3228f9 | |||
| 63541b37ec | |||
| 3d507741e4 | |||
| 86a3d86408 | |||
| 9ce61b6ea3 | |||
| a9d97b97bb | |||
| 87f0bf16fa | |||
| 5dcdff20d4 | |||
| 151117ed5e | |||
| b7e40cfb6b | |||
| 16be43edcc | |||
| 5fe3aa56cc | |||
| 9ed75a4d55 | |||
| 7fed742bab | |||
| 16b6c0dd43 | |||
| da6555dfc7 | |||
| 351d25170b | |||
| fce4f043a4 | |||
| 90fb3945a6 | |||
| b7d56afb52 | |||
| 23c8dc27cf | |||
| 5660b592de | |||
| 9b2f938b49 | |||
| 3a4f5ffa3d | |||
| 14b8350bf1 | |||
| e9ec1361ac | |||
| a283d034e1 | |||
| 5e8fd7ea4e | |||
| 121bbd0d6f | |||
| 2db5fd545b | |||
| 3dc94c7f23 | |||
| dafc5117d2 | |||
| 2297a20a15 | |||
| ca37803be3 | |||
| e3d2327d93 | |||
| 89f3e6f955 | |||
| e79b429c5e | |||
| c240f6bd5b | |||
| 33312e0fd9 | |||
| 53533d0f9d | |||
| 6b51a9f69b | |||
| 33e1f7e21f | |||
| 7e86104ef9 | |||
| a577823b48 | |||
| e59d6b089f | |||
| a8839b71ac | |||
| 6e7aa71b0d | |||
| 1486f61511 | |||
| d68a1c34bf | |||
| 8c2bbe2eb4 | |||
| 6fff438872 | |||
| db110a9957 | |||
| 0fd14db84c | |||
| ecce20d2bf | |||
| 1e8782a177 | |||
| 17e05cf5af | |||
| 28989f8828 | |||
| dd774eedfb | |||
| b828fc0c57 | |||
| d2e0fecfb7 | |||
| 1d0e789902 | |||
| 796aeb99a4 | |||
| f756ac5fc2 | |||
| 9b779e788f | |||
| e11ca878b6 | |||
| 8ebcff6483 | |||
| f8e23b4016 | |||
| d2217eb142 | |||
| 68274d827e | |||
| 76601c1d46 | |||
| b526998c8b | |||
| fcf5316aa1 | |||
| dffef1a6e9 | |||
| ec29c63cf3 | |||
| a35f259986 | |||
| 206345b451 | |||
| 622bde003e | |||
| 9a80b8870e | |||
| cdaf97226a | |||
| 3a66f50318 | |||
| 7b27f0ed22 | |||
| ba35840cbd | |||
| 7ab347523f | |||
| ddccd78269 | |||
| 77c781e7b8 | |||
| 26030c1efe | |||
| d8313f4d0a | |||
| 5225dc34e1 | |||
| b6d9393513 | |||
| 54f66da5d8 | |||
| ae3d4750f3 | |||
| 56cb1c6d24 | |||
| 371f02c869 | |||
| a450cdaa42 | |||
| 8989bf9a4f | |||
| d41ad780c5 | |||
| ed3a752912 | |||
| 358378c7e6 | |||
| 05097af27b | |||
| 15eb6a235d | |||
| 138b043f0f | |||
| 99557b46f5 | |||
| 4d42379374 | |||
| fb207d99b9 | |||
| 188a18d6ac | |||
| e81acdf0eb | |||
| 99ba47397a | |||
| 380e07aa9c | |||
| 6aa98da2f4 | |||
| 30d2416ba2 | |||
| 695fde6f8b | |||
| 989b4461e7 | |||
| 2d0ec4df8a | |||
| 42fe23a4cf | |||
| 66b79054df | |||
| 06a2fe3fcc | |||
| 7ffc8df247 | |||
| f934bad2e4 | |||
| 302c674d70 | |||
| c9231684f6 | |||
| 3a150c063f | |||
| 91e8da3a53 | |||
| 17a36e1fc3 | |||
| 7ff9e9f75c | |||
| 3fadba76ba | |||
| 7ea232f516 | |||
| 65831fa1e9 | |||
| 58ad0ece69 | |||
| d2c7261f74 | |||
| ac9e415d08 | |||
| 42646c1ee2 | |||
| 65f0cf4c8f | |||
| b207d61f78 | |||
| dcebd08f55 | |||
| bacc153cc8 | |||
| 331ecfeae5 | |||
| d3d45c82d4 | |||
| be641a43c3 | |||
| b00479ffbb | |||
| de305dbdb9 | |||
| 7958f87c24 | |||
| e5d6d9fc16 | |||
| 052f71b1bd | |||
| df08ec2b51 | |||
| a5c89a3edd | |||
| 52d26cb825 | |||
| c46741f733 | |||
| 3f63d03572 | |||
| 9129e9b507 | |||
| 911c2e8b27 | |||
| a2de6e358c | |||
| 3971bcedda | |||
| 725b08e021 | |||
| 49755abe8b | |||
| 45cfb14219 | |||
| fa656726ef | |||
| 784d6361f8 | |||
| 66782faba4 | |||
| 8c40fadc2e | |||
| 13b97e58f5 | |||
| 50c1b84f94 | |||
| df76ec7d6f | |||
| 47539c8d44 | |||
| 6022aeee50 | |||
| 7aaae21e0c | |||
| 69db13e4c4 | |||
| 8661bf6fc8 | |||
| 85562b5888 | |||
| 9a851e342f | |||
| 5278c04051 | |||
| 21a04212d5 | |||
| 005ad5b042 | |||
| e25c0dc006 | |||
| 09d38eb57c | |||
| 19361ac7cb | |||
| 85e97e984b | |||
| dcd7f65223 | |||
| 6ee231d271 | |||
| acd34f7b8d | |||
| e339262251 | |||
| acb06bcc6d | |||
| 25da2556ac | |||
| b2f7c6f204 | |||
| 5272fa972a | |||
| 91353ced8a | |||
| 3448267344 | |||
| 34c150924e | |||
| 617f7676ce | |||
| adaff91225 | |||
| 1597675f4e | |||
| 2f92c81bee | |||
| 1e0f11d064 | |||
| b26ddc422a | |||
| 813d95b898 | |||
| 4f3f7963d0 | |||
| 4d2209ca8d | |||
| 06e866a3e8 | |||
| aee6c85349 | |||
| cd141048f5 | |||
| 01ea8c7091 | |||
| ce08f6fc50 | |||
| b16fc0b0c1 | |||
| f69245adaa | |||
| da751c06ca | |||
| 6f92a2ec2c | |||
| bb42f3cd77 | |||
| 3fd4167335 | |||
| 89820b94ef | |||
| 99564e7fa1 | |||
| 908da13317 | |||
| a905a0d6e4 | |||
| e86a83a676 | |||
| 48dcdaaa57 | |||
| c1d0093d48 | |||
| bfa4ab46b1 | |||
| cb83f2ddb0 | |||
| 14a5e3a9b8 | |||
| 7cffa7931a | |||
| c83637118c | |||
| f4cd952b1c | |||
| 34d5a32fe5 | |||
| f1e5585337 | |||
| b7c7268806 | |||
| 7f49ddb2cc | |||
| 0e7c9e3d45 | |||
| 7e6a7a2e2a | |||
| 5cc2661375 | |||
| 1aefd6836c | |||
| ae6b9c5eed | |||
| 901db54fe9 | |||
| bf0068f015 | |||
| d7e974fff4 | |||
| c611066a58 | |||
| 67d3b21414 | |||
| 3e38a71893 | |||
| 9829f99055 | |||
| 17077e4c16 | |||
| f7aed8dea6 | |||
| 9c1d5d761e | |||
| 74f8e66a70 | |||
| f626d98fcf | |||
| c0235e212f | |||
| 4d21e136bc | |||
| 49f30a8e62 | |||
| 131b0c7351 | |||
| 4e3b1706cf | |||
| 9495a509e6 | |||
| c9b22215aa | |||
| e5e2b9b8b0 | |||
| 98d791810a | |||
| 7b107df84e | |||
| bbf6e289d3 | |||
| 81bbbdf652 | |||
| 0fe49cf5a9 | |||
| ca87e4b118 | |||
| 2ad13fb1b4 | |||
| cd245a9ef6 | |||
| 4a258a32eb | |||
| 5b8c9281f1 | |||
| 60c8952863 | |||
| 9a320d451c | |||
| a3f16cae1e | |||
| 76966ebd6c | |||
| fee11748df | |||
| 871079c23d | |||
| 15d39cea06 | |||
| 02d0948982 | |||
| b96623d558 | |||
| 3875d2d1d2 | |||
| 65f5e0cb0b | |||
| 82455eb7c5 | |||
| b44deb7404 | |||
| e1939e1a50 | |||
| 0e546ddbbb | |||
| c070af524f | |||
| a27178845b | |||
| 6a8463c82e | |||
| b5ba5b524c | |||
| 4cc953edd4 | |||
| 854ff927b9 | |||
| 60ed8b3298 | |||
| 3bb05f2bed | |||
| 38793b51a3 | |||
| 7d81a84c85 | |||
| e63f6156fe | |||
| f63105ef14 | |||
| ea392d2189 | |||
| 0da8930e26 | |||
| 19229b1bbc | |||
| 65bbca20ff | |||
| 852fba0d35 | |||
| e58643908d | |||
| 38fe0b84ff | |||
| 29194f7003 | |||
| 78fe408dcd | |||
| e024e73286 | |||
| 69dce7af16 | |||
| de0bd30eda | |||
| 86bdcea181 | |||
| faa6d12430 | |||
| 67636c6b92 | |||
| 19788fcf05 | |||
| e019344d09 | |||
| bce6669874 | |||
| bde43b751a | |||
| 10ce70f444 | |||
| f52b514ea7 | |||
| 87e9cca57b | |||
| 2ce1492394 | |||
| 0a5d4f670c | |||
| 732a66d740 | |||
| 8eae1f229f | |||
| 8526380448 | |||
| e80af8fc6c | |||
| dc58062e37 | |||
| 3d9ed088cd | |||
| 5900d2bd10 | |||
| 0cb09ffc44 | |||
| a13ddf490a | |||
| 5cc02d261a | |||
| a4c83d3a00 | |||
| c171c8494b | |||
| a6777d7c88 | |||
| 2e4316d71a | |||
| 36a215ce73 | |||
| 8e2d8b16c0 | |||
| c94a7efc8c | |||
| d94c184b68 | |||
| f948664e01 | |||
| be5b650cdd | |||
| e83c5ce5e3 | |||
| 0a1e712e1b | |||
| fea23844d3 | |||
| e6ee2d4f44 | |||
| abd3acca76 | |||
| 7e076cd3c1 | |||
| bf633e17ca | |||
| fcecdd3d3f | |||
| 783f5499b5 | |||
| 5fea852e1f | |||
| 47d5f47900 | |||
| 39b26340c5 | |||
| b381700d94 | |||
| 3551c66995 | |||
| cdd78f2477 | |||
| c2fd2ca716 | |||
| d71437b9f6 | |||
| 75ececb483 | |||
| c832a7bf11 | |||
| bad8e684ee | |||
| 4a2b9a3e40 | |||
| 0dc9a80c7a | |||
| 4cde3fe399 | |||
| f580044503 | |||
| 8ec54c5d16 | |||
| fc5cbc2e1f | |||
| 6bdb666f4f | |||
| 33886f988f | |||
| 77979e14cd | |||
| a942d5ff6f | |||
| f645e5641e | |||
| 7c568c5ce6 | |||
| 502706792e | |||
| bed02cc1ca | |||
| d2aa7e5710 | |||
| 7079eb6b72 | |||
| 366538f6c3 | |||
| 2a50347fc3 | |||
| 546938d152 | |||
| 9a21cd4c8f | |||
| 95a5d1cef3 | |||
| cd2a7f43cb | |||
| c40b2e3c8a | |||
| 1287330241 | |||
| 9666e58399 | |||
| ac29d925a3 | |||
| 47d812ae12 | |||
| 346e4fba38 | |||
| 89184978b2 | |||
| 1f67008765 | |||
| 09d58e26ed | |||
| 8c3d95cef8 | |||
| 70ba39ce68 | |||
| ed80d8e468 | |||
| dbd1d5a2a8 | |||
| bfdcb740cd | |||
| 0eecb410da | |||
| fa5c68c24d | |||
| a09c3fd728 | |||
| e803721e4b | |||
| 3d715cc18d | |||
| 1eb355a221 | |||
| ec84b3ac91 | |||
| f8c34847ae | |||
| 366453d9f7 | |||
| 31b621ab3b | |||
| fcdc3266af | |||
| 6f251269cf | |||
| fb0b1d0549 | |||
| c3dcc74dc2 | |||
| acfbb10907 | |||
| ce580611bd | |||
| c01b1dabb5 | |||
| 1c7f70022a | |||
| 7e658b5efd | |||
| eee1a9a506 | |||
| da647c0e7d | |||
| 9f0d6bb17e | |||
| 2d59095ab1 | |||
| 3a0e4226e9 | |||
| 9c4f337267 | |||
| 844dbd8c2f | |||
| b483f3f322 | |||
| e00956a1a7 | |||
| 8b3201b74c | |||
| dfa3d42162 | |||
| 29722b6b32 | |||
| 9af365d116 | |||
| ee70bea6ae | |||
| a08b13ce3b | |||
| bc871bd2ac | |||
| 367dc9c662 | |||
| 1f3dca1ff7 | |||
| 25aea07acd | |||
| 77306840a8 | |||
| 79a61e1f2e | |||
| 263f33eb65 | |||
| dab170b317 | |||
| e47a0b7fcd | |||
| aeb4538bff | |||
| e5b9c259d9 | |||
| bdfed023c7 | |||
| 1c5ea78b44 | |||
| d7f689c0d5 | |||
| 00a341ab4b | |||
| fca848d82f | |||
| 43fbf90c51 | |||
| 2e1b6ae346 | |||
| 490f8bdac3 | |||
| 675474127c | |||
| 503e3f8aa6 | |||
| e56bfcb600 | |||
| 47553cb1e8 | |||
| 2d4c0e9c64 | |||
| 87d9309a8e | |||
| f35545a1b1 | |||
| 9e7a7b0d9b | |||
| 2931be7493 | |||
| dcaf30161a | |||
| cb21970040 | |||
| 4bc97c18dd | |||
| e86269cf53 | |||
| ef94c709e3 | |||
| e092790ea0 | |||
| 5364bbe6a6 | |||
| 7bce4dd234 | |||
| 02b4f444db | |||
| 2ebd629e4f | |||
| 2f61a4bb61 | |||
| b87acc5954 | |||
| 1a4628455a | |||
| e928186669 | |||
| 146391f142 | |||
| 5d265364e3 | |||
| e3ea589b13 | |||
| 26cf66be20 | |||
| 84b0e03551 | |||
| 66882b527a | |||
| 30519e4405 | |||
| 6942b7a226 | |||
| 9c19a3da25 | |||
| 7136dd2a8a | |||
| eaca40c238 | |||
| 88011d4705 | |||
| 049976aa81 | |||
| 52bcf0eff8 | |||
| 100c949d40 | |||
| aeef988e7c | |||
| 9fcfffb1c6 | |||
| a1e0c015bb | |||
| e0b0f8f764 | |||
| 4916ffd2d0 | |||
| a47422f975 | |||
| a97cfb87cc | |||
| 95e478d1d5 | |||
| 838751e93a | |||
| e82f5816b0 | |||
| 3c9e80b43c | |||
| b6d6c0d136 | |||
| 16458af245 | |||
| a8478b12f4 | |||
| d9073dd6e6 | |||
| 1f4e6dfd34 | |||
| e8cc60dc62 | |||
| 6406c8577b | |||
| 4b170a09dc | |||
| b00bc2e1e2 | |||
| 87f6bfd52e | |||
| 51d732f5bc | |||
| 8d545b8cf7 | |||
| 54e689c054 | |||
| 03a26300a9 | |||
| 983b9e09a6 | |||
| f9ebd6473f | |||
| 4b3279f062 | |||
| e90b318b74 | |||
| 2c7ee6676f | |||
| ff690493c9 | |||
| f5b526fc4b | |||
| 05b05bed67 | |||
| b2faa48f52 | |||
| 8d6e7a8cfd | |||
| ec983adb0b | |||
| 4c437e6204 | |||
| 1eb99b1936 | |||
| 0a10a983d4 | |||
| 6879976163 | |||
| 3066a4fff5 | |||
| bcec8babb4 | |||
| 5ccee6c6be | |||
| 7b72925a1f | |||
| 993dbb0e12 | |||
| 56c030994a | |||
| 9b91735684 | |||
| da50c44276 | |||
| b77bae8ff2 | |||
| 6a67c9485d | |||
| 19850c3531 | |||
| 34363f81e9 | |||
| 6d0ef76a66 | |||
| f66c88446a | |||
| 3df16c36d3 | |||
| f9069d46c3 | |||
| 8ad3a034a6 | |||
| bd7393978a | |||
| 7d66b55253 | |||
| 6c9d0c88d6 | |||
| b6871accac | |||
| 7c74821ffb | |||
| 84f39ff871 | |||
| 160937e674 | |||
| 1f4dea6f3c | |||
| da93630544 | |||
| df3d77dcee | |||
| 7ae45c3e2b | |||
| fd0e205ec1 | |||
| 9cf981d6d4 | |||
| 65a55af804 | |||
| ef296e6336 | |||
| 2c9d509311 | |||
| e9bf19e9da | |||
| 017fc5947d | |||
| 88d396666c | |||
| 84b5a8105b | |||
| bf9c0610a3 | |||
| a4d9312225 | |||
| d94a33179c | |||
| 0586243d88 | |||
| 8349704ec6 | |||
| 9c51f71d7d | |||
| ddd5ecf8ad | |||
| 77db6242d3 | |||
| b0b9fa21c5 | |||
| cb66af6438 | |||
| 4a4e516f37 | |||
| acbb4dac04 | |||
| 940f002d43 | |||
| 8d5dcaac11 | |||
| 73765990d2 | |||
| 62836d9468 | |||
| ba2f4115ce | |||
| 306a4aa2b2 | |||
| 4a0955b300 | |||
| faaf1e9848 | |||
| be850c7d6e | |||
| 65b5251738 | |||
| 70620412c9 | |||
| 6ed226c489 | |||
| 4e2b8964bf | |||
| 49871db568 | |||
| f4e3bcf55c | |||
| b58eecdbec | |||
| 2f736d097f | |||
| c9ff703592 | |||
| 40220a25de | |||
| 0e4010c1ee | |||
| dbbc2ead60 | |||
| 56e0bbeeca | |||
| 98c25d7b78 | |||
| c309aa21d6 | |||
| 8e6bf2b7a7 | |||
| c0474b72cf | |||
| 26bc0e98d7 | |||
| daa17dc72c | |||
| 2a6a7da245 | |||
| 399e7ad6c4 | |||
| 64b82059f2 | |||
| 4995e9b7fc | |||
| 8612138861 | |||
| 46dce05e10 | |||
| 3cfffa9440 | |||
| 690bd6f5c1 | |||
| 64c5ac444d | |||
| da833bbb8a | |||
| 7d785ed3dd | |||
| 674354fc79 | |||
| 3b0ca67441 | |||
| 706f5feb9c | |||
| 5628fc3951 | |||
| 5f8a8609f6 | |||
| 3c486f659b | |||
| ab91bc64a8 | |||
| 623d78c092 | |||
| 28b1ac7328 | |||
| ba1d027f24 | |||
| 7174029227 | |||
| e7e7464a14 | |||
| 4f502b6b8f | |||
| abbdddf329 | |||
| a54de2626d | |||
| f52df1c1b0 | |||
| 22e22440c9 | |||
| 7f869d2175 | |||
| 053e485c2d | |||
| 2ef75ec94b | |||
| 941168e14a | |||
| 50f3f0919a | |||
| b412236ad2 | |||
| 02cd5db630 | |||
| 9a7069caf7 | |||
| 7a61341d15 | |||
| 0376ab918a | |||
| 2a209ffb1a | |||
| 7b67116e2f | |||
| f75896f0e4 | |||
| 1bff4dd0bd | |||
| 54b2d289e2 | |||
| 37ae23a553 | |||
| 335457f0d7 | |||
| 511320982d | |||
| 98cc620ea4 | |||
| 736b59bdc7 | |||
| f968290d26 | |||
| 978fbcd428 | |||
| a3fe4bd818 | |||
| 4eacceac4e | |||
| 3f592ab28a | |||
| bb6a2d66a5 | |||
| 846360f8c1 | |||
| 17cb8451b0 | |||
| 1fd64cbcac | |||
| 3032891038 | |||
| 4f7dfe54ca | |||
| d24e9751db | |||
| f8d222ffa4 | |||
| 74cea9af7f | |||
| e39271b5cf | |||
| 8d222fd4d9 | |||
| 7aff555b61 | |||
| 50c6ecae66 | |||
| 4c8590dcdd | |||
| 03f9917518 | |||
| dc04b73e2d | |||
| fb926225d9 | |||
| 6f3b670662 | |||
| 768db4835a | |||
| a6b7f452b2 | |||
| f4f5224d6b | |||
| f4a0d4fbc0 | |||
| 161b93171f | |||
| 5249878e85 | |||
| 1a872646b6 | |||
| b17a582c57 | |||
| 6df2cf82ad | |||
| 3ea839903d | |||
| a132cab3bd | |||
| 986754b0c4 | |||
| a6113bc407 | |||
| b3a176db6c | |||
| b50d14590f | |||
| 375bfbce47 | |||
| e7dab34926 | |||
| 5776e773c0 | |||
| 704b27ea6d | |||
| 8afc5227b5 | |||
| 6d64347cae | |||
| 8663ff52dc | |||
| 9e6730f968 | |||
| 5e317a2c29 | |||
| 0209975a48 | |||
| 97a693f9eb | |||
| c90d75ab63 | |||
| 645b03ff60 | |||
| c55d43a726 | |||
| d72e30eea1 | |||
| a311d456b2 | |||
| 4d19840db8 | |||
| ba6d368226 | |||
| d3d526423f | |||
| 11ba2d6dec | |||
| 2c41453395 | |||
| 834f34c0ff | |||
| 1dcdb3ba3e | |||
| 6d514108dd | |||
| d9ef77f12d | |||
| 22465c1a4b | |||
| 2232d0cdc6 | |||
| b5f3dbf7c9 |
@@ -0,0 +1,38 @@
|
||||
# big-AGI non-code files
|
||||
/docs/
|
||||
README.md
|
||||
|
||||
# Node build artifacts
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# versioning
|
||||
.git/
|
||||
.github/
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
@@ -1,21 +0,0 @@
|
||||
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
|
||||
OPENAI_API_KEY=
|
||||
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
|
||||
OPENAI_API_HOST=
|
||||
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
|
||||
OPENAI_API_ORG_ID=
|
||||
|
||||
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
|
||||
ELEVENLABS_API_KEY=
|
||||
ELEVENLABS_API_HOST=
|
||||
ELEVENLABS_VOICE_ID=
|
||||
|
||||
# [Optional] Prodia credentials on the server side - for optional image generation
|
||||
PRODIA_API_KEY=
|
||||
|
||||
# [Optional, Search] Google Cloud API Key
|
||||
# https://console.cloud.google.com/apis/credentials -
|
||||
GOOGLE_CLOUD_API_KEY=
|
||||
# [Optional, Search] Google Custom/Programmable Search Engine ID
|
||||
# https://programmablesearchengine.google.com/
|
||||
GOOGLE_CSE_ID=
|
||||
@@ -0,0 +1,13 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: enricoros # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Omg what's happening?
|
||||
title: "[BUG]"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
Where is it happening?
|
||||
- Which device [Mobile/Desktop, os version]:
|
||||
- Which browser:
|
||||
- Which website:
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots / context**
|
||||
If applicable, please add screenshots or additional context
|
||||
@@ -0,0 +1,75 @@
|
||||
---
|
||||
name: Maintainers-Release
|
||||
about: Maintainers
|
||||
title: Release 1.2.3
|
||||
labels: ''
|
||||
assignees: enricoros
|
||||
|
||||
---
|
||||
|
||||
## Release checklist:
|
||||
|
||||
- [ ] Update the [Roadmap](https://github.com/users/enricoros/projects/4/views/2) calling out shipped features
|
||||
- [ ] Create and update a [Milestone](https://github.com/enricoros/big-agi/milestones) for the release
|
||||
- [ ] Assign this task
|
||||
- [ ] Assign all the shipped roadmap Issues
|
||||
- [ ] Assign the relevant [recently closed Isssues](https://github.com/enricoros/big-agi/issues?q=is%3Aclosed+sort%3Aupdated-desc)
|
||||
- Code changes:
|
||||
- [ ] Create a release branch 'release-x.y.z': `git checkout -b release-1.2.3`
|
||||
- [ ] Create a temporary tag `git tag v1.2.3 && git push opensource --tags`
|
||||
- [ ] Create a [New Draft GitHub Release](https://github.com/enricoros/big-agi/releases/new), and generate the automated changelog (for new contributors)
|
||||
- [ ] Update the release version in package.json, and `npm i`
|
||||
- [ ] Update in-app News [src/apps/news/news.data.tsx](/src/apps/news/news.data.tsx)
|
||||
- [ ] Update the in-app News version number
|
||||
- [ ] Update the readme with the new release
|
||||
- [ ] Copy the highlights to the [docs/changelog.md](/docs/changelog.md)
|
||||
- Release:
|
||||
- [ ] merge onto main
|
||||
- [ ] verify deployment on Vercel
|
||||
- [ ] verify container on GitHub Packages
|
||||
- create a GitHub release
|
||||
- [ ] name it 'vX.Y.Z'
|
||||
- [ ] copy the release notes and link appropriate artifacts
|
||||
- Announce:
|
||||
- [ ] Discord announcement
|
||||
- [ ] Twitter announcement
|
||||
|
||||
|
||||
## Links
|
||||
Milestone:
|
||||
Former release task:
|
||||
GitHub release:
|
||||
|
||||
|
||||
## Artifacts Generation
|
||||
|
||||
1) The following is my opensource application
|
||||
- paste README.md
|
||||
2) I am announcing a new version, 1.7.0. The following were the announcements for 1.6.0. Discord announcement, GitHub Release, in-app news.data.tsx, changelog.md.
|
||||
- paste the former: `discord announcement`, `GitHub release`, `news.data.tsx`, `changelog.md`
|
||||
3) The following is the new data I have for 1.7.0
|
||||
- paste the link to the milestone (closed) and each individual issue (content will be downloaded)
|
||||
- paste the git changelog `git log v1.6.0..v1.7.0 | clip`
|
||||
|
||||
|
||||
### news.data.TSX
|
||||
|
||||
```markdown
|
||||
I need the following from you:
|
||||
|
||||
1. a table summarizing all the new features in 1.2.3 (description, significance, usefulness, do not link the commit, but have the issue number), which will be used for the artifacts later
|
||||
2. after the table score each feature from a user impact and magnitude point of view
|
||||
3. Improve the table, in decreasing order of importance for features, fixing any detail that's missing, in particular check if there are commits of significance from a user or developer point of view, which are not contained in the table
|
||||
4. I want you then to update the news.data.tsx for the new release
|
||||
```
|
||||
|
||||
### GitHub release
|
||||
|
||||
Now paste the former release (or 1.5.0 which was accurate and great), including the new contributors and
|
||||
some stats (# of commits, etc.), and roll it for the new release.
|
||||
|
||||
### Discord announcement
|
||||
|
||||
```markdown
|
||||
Can you generate my 1.2.3 big-AGI discord announcement from the GitHub Release announcement, and the in-app News?
|
||||
```
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Roadmap request
|
||||
about: Suggest a roadmap item
|
||||
title: "[Roadmap]"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Why**
|
||||
The reason behind the request - we love it to be framed for "users will be able to do x" rather than quick-aging hype-tech-of-the-day requests
|
||||
|
||||
**Concise description**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Requirements**
|
||||
If you can, please detail the changes you expect in UX, user workflows, technology, architecture (if not, the reviewers will do it for you)
|
||||
@@ -7,11 +7,15 @@
|
||||
# To get a newer version, you will need to update the SHA.
|
||||
# You can also reference a tag or branch, but the action may change without warning.
|
||||
|
||||
name: Create and publish a Docker image
|
||||
name: Create and publish Docker images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
branches:
|
||||
- main
|
||||
#- main-stable # Disabled as the v* tag is used for stable releases
|
||||
tags:
|
||||
- 'v*' # Trigger on version tags (e.g., v1.7.0)
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
@@ -26,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
@@ -40,11 +44,17 @@ jobs:
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=raw,value=development,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/main-stable' }}
|
||||
type=ref,event=tag # Use the tag name as a tag for tag builds
|
||||
type=semver,pattern={{version}} # Generate semantic versioning tags for tag builds
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
@@ -26,7 +26,8 @@ yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
.env
|
||||
.env.*
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"singleAttributePerLine": false,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "all",
|
||||
"endOfLine": "lf",
|
||||
"printWidth": 160
|
||||
}
|
||||
}
|
||||
@@ -1,41 +1,56 @@
|
||||
# Test
|
||||
FROM node:18-alpine as test-target
|
||||
ENV NODE_ENV=development
|
||||
ENV PATH $PATH:/usr/src/app/node_modules/.bin
|
||||
# Base
|
||||
FROM node:18-alpine AS base
|
||||
ENV NEXT_TELEMETRY_DISABLED 1
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
# Dependencies
|
||||
FROM base AS deps
|
||||
WORKDIR /app
|
||||
|
||||
# Dependency files
|
||||
COPY package*.json ./
|
||||
COPY prisma ./prisma
|
||||
|
||||
# CI and release builds should use npm ci to fully respect the lockfile.
|
||||
# Local development may use npm install for opportunistic package updates.
|
||||
ARG npm_install_command=ci
|
||||
RUN npm $npm_install_command
|
||||
# Install dependencies, including dev (release builds should use npm ci)
|
||||
ENV NODE_ENV development
|
||||
RUN npm ci
|
||||
|
||||
# Builder
|
||||
FROM base AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy development deps and source
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
FROM test-target as build-target
|
||||
ENV NODE_ENV=production
|
||||
|
||||
# Use build tools, installed as development packages, to produce a release build.
|
||||
# Build the application
|
||||
ENV NODE_ENV production
|
||||
RUN npm run build
|
||||
|
||||
# Reduce installed packages to production-only.
|
||||
# Reduce installed packages to production-only
|
||||
RUN npm prune --production
|
||||
|
||||
# Archive
|
||||
FROM node:18-alpine as archive-target
|
||||
ENV NODE_ENV=production
|
||||
ENV PATH $PATH:/usr/src/app/node_modules/.bin
|
||||
# Runner
|
||||
FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
# As user
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
|
||||
# Include only the release build and production packages.
|
||||
COPY --from=build-target /usr/src/app/node_modules node_modules
|
||||
COPY --from=build-target /usr/src/app/.next .next
|
||||
# Copy Built app
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/public public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next .next
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/node_modules node_modules
|
||||
|
||||
# Minimal ENV for production
|
||||
ENV NODE_ENV production
|
||||
ENV PATH $PATH:/app/node_modules/.bin
|
||||
|
||||
# Run as non-root user
|
||||
USER nextjs
|
||||
|
||||
# Expose port 3000 for the application to listen on
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["next", "start"]
|
||||
# Start the application
|
||||
CMD ["next", "start"]
|
||||
@@ -1,37 +1,87 @@
|
||||
# `BIG-AGI` 🤖💬
|
||||
# BIG-AGI 🧠✨
|
||||
|
||||
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
|
||||
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
|
||||
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
|
||||
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
|
||||
Welcome to big-AGI 👋, the GPT application for professionals that need function, form,
|
||||
simplicity, and speed. Powered by the latest models from 7 vendors and
|
||||
open-source model servers, `big-AGI` offers best-in-class Voice and Chat with AI Personas,
|
||||
visualizations, coding, drawing, calling, and quite more -- all in a polished UX.
|
||||
|
||||
[](https://big-agi.com)
|
||||
Pros use big-AGI. 🚀 Developers love big-AGI. 🤖
|
||||
|
||||
[](https://big-agi.com)
|
||||
|
||||
Or fork & run on Vercel
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
|
||||
|
||||
## Useful 👊
|
||||
## 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
big-AGI is an open book; our **[public roadmap](https://github.com/users/enricoros/projects/4/views/2)**
|
||||
shows the current developments and future ideas.
|
||||
|
||||
- Got a suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
|
||||
- Want to contribute? [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
|
||||
|
||||
### What's New in 1.7.3 · Dec 13, 2023 · Attachment Theory 🌟
|
||||
|
||||
- **Attachments System Overhaul**: Drag, paste, link, snap, text, images, PDFs and more. [#251](https://github.com/enricoros/big-agi/issues/251)
|
||||
- **Desktop Webcam Capture**: Image capture now available as Labs feature. [#253](https://github.com/enricoros/big-agi/issues/253)
|
||||
- **Independent Browsing**: Full browsing support with Browserless. [Learn More](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
|
||||
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
|
||||
- Optimized Voice Input and Performance
|
||||
- Latest Ollama and Oobabooga models
|
||||
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
|
||||
- [1.7.1]: Improved Ollama chats. [#270](https://github.com/enricoros/big-agi/issues/270)
|
||||
- [1.7.2]: OpenRouter login & free models 🎁
|
||||
- [1.7.3]: Mistral Platform support. [#273](https://github.com/enricoros/big-agi/issues/273)
|
||||
|
||||
### What's New in 1.6.0 - Nov 28, 2023
|
||||
|
||||
- **Web Browsing**: Download web pages within chats - [browsing guide](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Branching Discussions**: Create new conversations from any message
|
||||
- **Keyboard Navigation**: Swift chat navigation with new shortcuts (e.g. ctrl+alt+left/right)
|
||||
- **Performance Boost**: Faster rendering for a smoother experience
|
||||
- **UI Enhancements**: Refined interface based on user feedback
|
||||
- **New Features**: Anthropic Claude 2.1, `/help` command, and Flattener tool
|
||||
- **For Developers**: Code quality upgrades and snackbar notifications
|
||||
|
||||
### What's New in 1.5.0 - Nov 19, 2023
|
||||
|
||||
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
|
||||
- **Visualization Tool**: Create data representations with our new visualization capabilities
|
||||
- **Ollama Local Models**: Leverage local models support with our comprehensive guide
|
||||
- **Text Tools**: Enjoy tools including highlight differences to refine your content
|
||||
- **Mermaid Diagramming**: Render complex diagrams with our Mermaid language support
|
||||
- **OpenAI 1106 Chat Models**: Experience the cutting-edge capabilities of the latest OpenAI models
|
||||
- **SDXL Support**: Enhance your image generation with SDXL support for Prodia
|
||||
- **Cloudflare OpenAI API Gateway**: Integrate with Cloudflare for a robust API gateway
|
||||
- **Helicone for Anthropic**: Utilize Helicone's tools for Anthropic models
|
||||
|
||||
Check out the [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), or
|
||||
the [past releases changelog](docs/changelog.md).
|
||||
|
||||
## ✨ Key Features 👊
|
||||
|
||||

|
||||
[More](docs/pixels/big-AGI-compo2b.png), [screenshots](docs/pixels).
|
||||
|
||||
- Engaging AI Personas
|
||||
- Clean UX, w/ tokens counters
|
||||
- Privacy: user-owned API keys and localStorage
|
||||
- Human I/O: Advanced voice support (TTS, STT)
|
||||
- Machine I/O: PDF import & Summarization, code execution
|
||||
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
|
||||
- Coming up: automatic-AGI reasoning
|
||||
- **AI Personas**: Tailor your AI interactions with customizable personas
|
||||
- **Sleek UI/UX**: A smooth, intuitive, and mobile-responsive interface
|
||||
- **Efficient Interaction**: Voice commands, OCR, and drag-and-drop file uploads
|
||||
- **Multiple AI Models**: Choose from a variety of leading AI providers
|
||||
- **Privacy First**: Self-host and use your own API keys for full control
|
||||
- **Advanced Tools**: Execute code, import PDFs, and summarize documents
|
||||
- **Seamless Integrations**: Enhance functionality with various third-party services
|
||||
- **Open Roadmap**: Contribute to the progress of big-AGI
|
||||
|
||||
## Support 🙌
|
||||
## 💖 Support
|
||||
|
||||
[//]: # ([](https://discord.gg/MkH4qj2Jp9))
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
|
||||
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
|
||||
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
|
||||
energy!
|
||||
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
|
||||
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
|
||||
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
|
||||
* send PRs! ...
|
||||
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
|
||||
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
|
||||
@@ -40,62 +90,14 @@ Or fork & run on Vercel
|
||||
|
||||
<br/>
|
||||
|
||||
## Latest Drops 🚀
|
||||
|
||||
#### 🚨 April: more #big-agi-energy
|
||||
|
||||
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
|
||||
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
|
||||
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
|
||||
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
|
||||
- 🎉 **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
|
||||
- 🎉 **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- 🎉 **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- 🎉 **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- 🎉 **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
|
||||
- 🎉 **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- 🎉 Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- 🎉 Rendering: Markdown, SVG, improved Code blocks
|
||||
- 🎉 Integrations: OpenAI organization ID
|
||||
- 🎉 [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
|
||||
[awesome-agi](https://github.com/enricoros/awesome-agi)
|
||||
- 🎉 [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
|
||||
<!-- p><a href="docs/pixels/gif_typing_040123.gif"><img src="docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
|
||||
|
||||
#### March: first release
|
||||
|
||||
- 🎉 **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
|
||||
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
|
||||
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
|
||||
- 🎉 **Syntax highlighting** - for multiple languages 🌈
|
||||
- 🎉 **Code Execution: Sandpack
|
||||
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
|
||||
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
|
||||
- 🎉 Real-time streaming of AI responses ⚡
|
||||
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
|
||||
- 🎉 Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
|
||||
- 🎉 Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
|
||||
- 🌙 Dark model - Wide mode ⛶
|
||||
|
||||
<br/>
|
||||
|
||||
## Why this? 💡
|
||||
|
||||
Because the official Chat ___lacks important features___, is ___more limited than the api___, at times
|
||||
___slow or unavailable___, and you cannot deploy it yourself, remix it, add features, or share it with
|
||||
your friends.
|
||||
Our users report that ___big-AGI is faster___, ___more reliable___, and ___features rich___
|
||||
with features that matter to them.
|
||||
|
||||

|
||||
|
||||
## Code 🧩
|
||||
## 🧩 Develop
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
Clone this repo, install the dependencies, and run the development server:
|
||||
Clone this repo, install the dependencies (all locally), and run the development server (which auto-watches the
|
||||
files for changes):
|
||||
|
||||
```bash
|
||||
git clone https://github.com/enricoros/big-agi.git
|
||||
@@ -104,12 +106,55 @@ npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Now the app should be running on `http://localhost:3000`
|
||||
The development app will be running on `http://localhost:3000`. Development builds have the advantage of not requiring
|
||||
a build step, but can be slower than production builds. Also, development builds won't have timeout on edge functions.
|
||||
|
||||
### Integrations:
|
||||
## 🌐 Deploy manually
|
||||
|
||||
The _production_ build of the application is optimized for performance and is performed by the `npm run build` command,
|
||||
after installing the required dependencies.
|
||||
|
||||
```bash
|
||||
# .. repeat the steps above up to `npm install`, then:
|
||||
npm run build
|
||||
npm run start --port 3000
|
||||
```
|
||||
|
||||
The app will be running on the specified port, e.g. `http://localhost:3000`.
|
||||
|
||||
Want to deploy with username/password? See the [Authentication](docs/deploy-authentication.md) guide.
|
||||
|
||||
## 🐳 Deploy with Docker
|
||||
|
||||
For more detailed information on deploying with Docker, please refer to the [docker deployment documentation](docs/deploy-docker.md).
|
||||
|
||||
Build and run:
|
||||
|
||||
```bash
|
||||
docker build -t big-agi .
|
||||
docker run -d -p 3000:3000 big-agi
|
||||
```
|
||||
|
||||
Or run the official container:
|
||||
|
||||
- manually: `docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi`
|
||||
- or, with docker-compose: `docker-compose up` or see [the documentation](docs/deploy-docker.md) for a composer file with integrated browsing
|
||||
|
||||
## ☁️ Deploy on Cloudflare Pages
|
||||
|
||||
Please refer to the [Cloudflare deployment documentation](docs/deploy-cloudflare.md).
|
||||
|
||||
## 🚀 Deploy on Vercel
|
||||
|
||||
Create your GitHub fork, create a Vercel project over that fork, and deploy it. Or press the button below for convenience.
|
||||
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
|
||||
|
||||
## Integrations:
|
||||
|
||||
* Local models: Ollama, Oobabooga, LocalAi, etc.
|
||||
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Settings > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
|
||||
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
|
||||
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
|
||||
|
||||
@@ -124,4 +169,4 @@ This project is licensed under the MIT License.
|
||||
|
||||
[//]: # ([](https://github.com/enricoros/big-agi/issues))
|
||||
|
||||
Made with 💙
|
||||
Made with 💙
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
import { createEmptyReadableStream, safeErrorString, serverFetchOrThrow } from '~/server/wire';
|
||||
|
||||
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
|
||||
|
||||
|
||||
/* NOTE: Why does this file even exist?
|
||||
|
||||
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
|
||||
and that would force us to use base64 encoding for the audio data, which would be a waste of
|
||||
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
|
||||
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
|
||||
and client-side vs. the tRPC implementation. So at lease we recycle the input structures.
|
||||
|
||||
*/
|
||||
const handler = async (req: Request) => {
|
||||
try {
|
||||
|
||||
// construct the upstream request
|
||||
const {
|
||||
elevenKey, text, voiceId, nonEnglish,
|
||||
streaming, streamOptimization,
|
||||
} = speechInputSchema.parse(await req.json());
|
||||
const path = `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}` + (streaming ? `/stream?optimize_streaming_latency=${streamOptimization || 1}` : '');
|
||||
const { headers, url } = elevenlabsAccess(elevenKey, path);
|
||||
const body: ElevenlabsWire.TTSRequest = {
|
||||
text: text,
|
||||
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
|
||||
};
|
||||
|
||||
// elevenlabs POST
|
||||
const upstreamResponse: Response = await serverFetchOrThrow(url, 'POST', headers, body);
|
||||
|
||||
// NOTE: this is disabled, as we pass-through what we get upstream for speed, as it is not worthy
|
||||
// to wait for the entire audio to be downloaded before we send it to the client
|
||||
// if (!streaming) {
|
||||
// const audioArrayBuffer = await upstreamResponse.arrayBuffer();
|
||||
// return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
// }
|
||||
|
||||
// stream the data to the client
|
||||
const audioReadableStream = upstreamResponse.body || createEmptyReadableStream();
|
||||
return new Response(audioReadableStream, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
|
||||
} catch (error: any) {
|
||||
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + error.cause : '');
|
||||
console.log(`api/elevenlabs/speech: fetch issue: ${fetchOrVendorError}`);
|
||||
return new Response(`[Issue] elevenlabs: ${fetchOrVendorError}`, { status: 500 });
|
||||
}
|
||||
};
|
||||
|
||||
export const runtime = 'edge';
|
||||
export { handler as POST };
|
||||
@@ -0,0 +1,2 @@
|
||||
export const runtime = 'edge';
|
||||
export { openaiStreamingRelayHandler as POST } from '~/modules/llms/transports/server/openai/openai.streaming';
|
||||
@@ -0,0 +1,19 @@
|
||||
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterEdge } from '~/server/api/trpc.router-edge';
|
||||
import { createTRPCFetchContext } from '~/server/api/trpc.server';
|
||||
|
||||
const handlerEdgeRoutes = (req: Request) =>
|
||||
fetchRequestHandler({
|
||||
router: appRouterEdge,
|
||||
endpoint: '/api/trpc-edge',
|
||||
req,
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? '<no-path>'}:`, error)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
export const runtime = 'edge';
|
||||
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
|
||||
@@ -0,0 +1,19 @@
|
||||
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
|
||||
|
||||
import { appRouterNode } from '~/server/api/trpc.router-node';
|
||||
import { createTRPCFetchContext } from '~/server/api/trpc.server';
|
||||
|
||||
const handlerNodeRoutes = (req: Request) =>
|
||||
fetchRequestHandler({
|
||||
router: appRouterNode,
|
||||
endpoint: '/api/trpc-node',
|
||||
req,
|
||||
createContext: createTRPCFetchContext,
|
||||
onError:
|
||||
process.env.NODE_ENV === 'development'
|
||||
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? '<no-path>'}:`, error)
|
||||
: undefined,
|
||||
});
|
||||
|
||||
export const runtime = 'nodejs';
|
||||
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
|
||||
@@ -0,0 +1,14 @@
|
||||
# Very simple docker-compose file to run the app on http://localhost:3000 (or http://127.0.0.1:3000).
|
||||
#
|
||||
# For more examples, such runnin big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:main
|
||||
ports:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
@@ -0,0 +1,126 @@
|
||||
## Changelog
|
||||
|
||||
This is a high-level changelog. Calls out some of the high level features batched
|
||||
by release.
|
||||
|
||||
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
|
||||
|
||||
### 1.8.0 - Dec 2023
|
||||
|
||||
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
|
||||
- milestone: [1.8.0](https://github.com/enricoros/big-agi/milestone/8)
|
||||
|
||||
### What's New in 1.7.3 · Dec 13, 2023 · Attachment Theory 🌟
|
||||
|
||||
- **Attachments System Overhaul**: Drag, paste, link, snap, text, images, PDFs and more. [#251](https://github.com/enricoros/big-agi/issues/251)
|
||||
- **Desktop Webcam Capture**: Image capture now available as Labs feature. [#253](https://github.com/enricoros/big-agi/issues/253)
|
||||
- **Independent Browsing**: Full browsing support with Browserless. [Learn More](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
|
||||
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
|
||||
- Optimized Voice Input and Performance
|
||||
- Latest Ollama and Oobabooga models
|
||||
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
|
||||
- [1.7.1]: Improved Ollama chats. [#270](https://github.com/enricoros/big-agi/issues/270)
|
||||
- [1.7.2]: OpenRouter login & free models 🎁
|
||||
- [1.7.3]: Mistral Platform support. [#273](https://github.com/enricoros/big-agi/issues/273)
|
||||
|
||||
### What's New in 1.6.0 - Nov 28, 2023 · Surf's Up
|
||||
|
||||
- **Web Browsing**: Download web pages within chats - [browsing guide](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
|
||||
- **Branching Discussions**: Create new conversations from any message
|
||||
- **Keyboard Navigation**: Swift chat navigation with new shortcuts (e.g. ctrl+alt+left/right)
|
||||
- **Performance Boost**: Faster rendering for a smoother experience
|
||||
- **UI Enhancements**: Refined interface based on user feedback
|
||||
- **New Features**: Anthropic Claude 2.1, `/help` command, and Flattener tool
|
||||
- **For Developers**: Code quality upgrades and snackbar notifications
|
||||
|
||||
### What's New in 1.5.0 - Nov 19, 2023 · Loaded
|
||||
|
||||
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
|
||||
- **Visualization Tool**: Create data representations with our new visualization capabilities
|
||||
- **Ollama Local Models**: Leverage local models support with our comprehensive guide
|
||||
- **Text Tools**: Enjoy tools including highlight differences to refine your content
|
||||
- **Mermaid Diagramming**: Render complex diagrams with our Mermaid language support
|
||||
- **OpenAI 1106 Chat Models**: Experience the cutting-edge capabilities of the latest OpenAI models
|
||||
- **SDXL Support**: Enhance your image generation with SDXL support for Prodia
|
||||
- **Cloudflare OpenAI API Gateway**: Integrate with Cloudflare for a robust API gateway
|
||||
- **Helicone for Anthropic**: Utilize Helicone's tools for Anthropic models
|
||||
|
||||
For Developers:
|
||||
|
||||
- Runtime Server-Side configuration: https://github.com/enricoros/big-agi/issues/189. Env vars are
|
||||
not required to be set at build time anymore. The frontend will roundtrip to the backend at the
|
||||
first request to get the configuration. See
|
||||
https://github.com/enricoros/big-agi/blob/main/src/modules/backend/backend.router.ts.
|
||||
- CloudFlare developers: please change the deployment command to
|
||||
`rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`,
|
||||
as we transitioned to the App router in NextJS 14. The documentation in
|
||||
[docs/deploy-cloudflare.md](../docs/deploy-cloudflare.md) is updated
|
||||
|
||||
### 1.4.0: Sept/Oct: scale OUT
|
||||
|
||||
- **Expanded Model Support**: Azure and [OpenRouter](https://openrouter.ai/docs#models) models, including gpt-4-32k
|
||||
- **Share and clone** conversations with public links
|
||||
- Removed the 20 chats hard limit ([Ashesh3](https://github.com/enricoros/big-agi/pull/158))
|
||||
- Latex Rendering
|
||||
- Augmented Chat modes (Labs)
|
||||
|
||||
### July/Aug: More Better Faster
|
||||
|
||||
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
|
||||
- **Anthropic models** support, e.g. Claude
|
||||
- **Backup/Restore** - save chats, and restore them later
|
||||
- **[Local model support with Oobabooga server](../docs/config-local-oobabooga)** - run your own LLMs!
|
||||
- **Flatten conversations** - conversations summarizer with 4 modes
|
||||
- **Fork conversations** - create a new chat, to try with different endings
|
||||
- New commands: /s to add a System message, and /a for an Assistant message
|
||||
- New Chat modes: Write-only - just appends the message, without assistant response
|
||||
- Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
|
||||
- Fixes on the HTML block - particularly useful to see error pages
|
||||
|
||||
### June: scale UP
|
||||
|
||||
- **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
|
||||
- **Cleaner UI** - with rationalized Settings, Modals, and Configurators
|
||||
- **Dynamic Models Configurator** - easy connection with different model vendors
|
||||
- **Multiple Model Vendors Support** framework to support many LLM vendors
|
||||
- **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
|
||||
- Support for GPT-4-32k
|
||||
- Improved Dialogs and Messages
|
||||
- Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
|
||||
|
||||
### April / May: more #big-agi-energy
|
||||
|
||||
- **[Google Search](../docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
|
||||
Search
|
||||
- **[Reason+Act](../docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
|
||||
- **[Image Generation](../docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
|
||||
- **[Voice Synthesis](../docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
|
||||
- **[Precise Token Counter](../docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
|
||||
- **[Install Mobile APP](../docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- **[UI language](../docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
|
||||
- **[SVG Drawing](../docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- Rendering: Markdown, SVG, improved Code blocks
|
||||
- Integrations: OpenAI organization ID
|
||||
- [Cloudflare deployment instructions](../docs/deploy-cloudflare.md),
|
||||
[awesome-agi](https://github.com/enricoros/awesome-agi)
|
||||
- [Typing Avatars](../docs/pixels/gif_typing_040123.gif) ⌨️
|
||||
<!-- p><a href="../docs/pixels/gif_typing_040123.gif"><img src="../docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
|
||||
|
||||
### March: first release
|
||||
|
||||
- **[AI Personas](../docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
|
||||
- **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
|
||||
- **Context** - Attach or [Drag & Drop files](../docs/pixels/feature_drop_target.png) to add them to the prompt 📁
|
||||
- **Syntax highlighting** - for multiple languages 🌈
|
||||
- **Code Execution: Sandpack** -
|
||||
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
|
||||
- Chat with GPT-4 and 3.5 Turbo 🧠💨
|
||||
- Real-time streaming of AI responses ⚡
|
||||
- **Voice Input** 🎙️ - works great on Chrome / Windows
|
||||
- Integration: **[Paste.gg](../docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
|
||||
- Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
|
||||
- 🌙 Dark model - Wide mode ⛶
|
||||
@@ -0,0 +1,87 @@
|
||||
# Configuring Azure OpenAI Service with `big-AGI`
|
||||
|
||||
The entire procedure takes about 5 minutes and involves creating an Azure account,
|
||||
setting up the Azure OpenAI service, deploying models, and configuring `big-AGI`
|
||||
to access these models.
|
||||
|
||||
Please note that Azure operates on a 'pay-as-you-go' pricing model and requires
|
||||
credit card information tied to a 'subscription' to the Azure service.
|
||||
|
||||
## Configuring `big-AGI`
|
||||
|
||||
If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follows:
|
||||
|
||||
1. Launch the `big-AGI` application
|
||||
2. Go to the **Models** settings
|
||||
3. Add a Vendor and select **Azure OpenAI**
|
||||
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
|
||||
- Enter the API Key (e.g., 'fd5...........................ba')
|
||||
|
||||
The deployed models are now available in the application. If you don't have a configured
|
||||
Azure OpenAI service instance, continue with the next section.
|
||||
|
||||
## Setting Up Azure
|
||||
|
||||
### Step 1: Azure Account & Subscription
|
||||
|
||||
1. Create an account on [azure.microsoft.com](https://azure.microsoft.com/en-us/)
|
||||
2. Go to the [Azure Portal](https://portal.azure.com/)
|
||||
3. Click on **Create a resource** in the top left corner
|
||||
4. Search for **Subscription** and select **[Create Subscription](https://portal.azure.com/#create/Microsoft.Subscription)**
|
||||
- Fill in the required fields and click on **Create**
|
||||
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
|
||||
|
||||
### Step 2: Apply for Azure OpenAI Service
|
||||
|
||||
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
|
||||
and acceptance should be quick (even as low as minutes).
|
||||
|
||||
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
|
||||
2. Click on **Apply for access**
|
||||
- Fill in the required fields (including the subscription ID) and click on **Apply**
|
||||
|
||||
Once your application is accepted, you can create OpenAI resources on Azure.
|
||||
|
||||
### Step 3: Create Azure OpenAI Resource
|
||||
|
||||
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
|
||||
|
||||
1. Click on **Create a resource** in the top left corner
|
||||
2. Search for **OpenAI** and select **[Create OpenAI](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)**
|
||||
3. Fill in the necessary fields on the **Create OpenAI** page
|
||||

|
||||
- Select the subscription
|
||||
- Select a resource group or create a new one
|
||||
- Select the region. Note that the region determines the available models.
|
||||
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
- Name the service (e.g., `your-openai-api-1234`)
|
||||
- Select a pricing tier (e.g., `S0` for standard)
|
||||
- Select: "All networks, including the internet, can access this resource."
|
||||
- Click on **Review + create** and then **Create**
|
||||
|
||||
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
|
||||
the OpenAI Service instance page to get this information.
|
||||
|
||||
- Click on **Go to resource**
|
||||
- Click on **Develop**
|
||||
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
|
||||
- Copy `KEY 1`
|
||||
|
||||
### Step 4: Deploy Models
|
||||
|
||||
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
|
||||
|
||||
1. Click on **Model Deployments > Manage Deployments**
|
||||
2. Click on **+Create New Deployment**
|
||||

|
||||
- Select the model you want to deploy
|
||||
- Optionally select a version
|
||||
- name the model, e.g., `gpt4-32k-0613`
|
||||
|
||||
Repeat as necessary for each model you want to deploy.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Azure OpenAI Service Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
|
||||
- [Guide: Create an Azure OpenAI Resource](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
|
||||
- [Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
|
||||
@@ -0,0 +1,87 @@
|
||||
# Browse Functionality in big-AGI 🌐
|
||||
|
||||
Allows users to load web pages across various components of `big-AGI`. This feature is supported by Puppeteer-based
|
||||
browsing services, which are the most common way to render web pages in a headless environment.
|
||||
|
||||
Once configured, the Browsing service provides this functionality:
|
||||
|
||||
- **Paste a URL**: Simply paste/drag a URL into the chat, and `big-AGI` will load and attach the page (very effective)
|
||||
- **Use /browse**: Type `/browse [URL]` in the chat to command `big-AGI` to load the specified web page
|
||||
- **ReAct**: ReAct will automatically use the `loadURL()` function whenever a URL is encountered
|
||||
|
||||
First of all, you need to procure a Puppteer web browsing service endpoint. `big-AGI` supports services like:
|
||||
|
||||
| Service | Working | Type | Location | Special Features |
|
||||
|--------------------------------------------------------------------------------------|---------|-------------|----------------|---------------------------------------------|
|
||||
| [BrightData Scraping Browser](https://brightdata.com/products/scraping-browser) | Yes | Proprietary | Cloud | Advanced scraping tools, global IP pool |
|
||||
| [Cloudflare Browser Rendering](https://developers.cloudflare.com/browser-rendering/) | ? | Proprietary | Cloud | Integrated CDN, optimized browser rendering |
|
||||
| ⬇️ [Browserless 2.0](#-browserless-20) | Okay | OpenSource | Local (Docker) | Parallelism, debug viewer, advanced APIs |
|
||||
| ⬇️ [Your Chrome Browser (ALPHA)](#-your-own-chrome-browser) | Alpha | Proprietary | Local (Chrome) | Personal, experimental use (ALPHA!) |
|
||||
| other Puppeteer-based WSS Services | ? | Varied | Cloud/Local | Service-specific features |
|
||||
|
||||
## Configuration
|
||||
|
||||
1. **Procure an Endpoint**
|
||||
- Ensure that your browsing service is running (remote or local) and has a WebSocket endpoint available
|
||||
- Write down the address: `wss://${auth}@{some host}:{port}`, or ws:// for local services on your machine
|
||||
|
||||
2. **Configure `big-AGI`**
|
||||
- navigate to **Preferences** > **Tools** > **Browse**
|
||||
- Enter the 'wss://...' connection string provided by your browsing service
|
||||
|
||||
3. **Enable Features**: Choose which browse-related features you want to enable:
|
||||
- **Attach URLs**: Automatically load and attach a page when pasting a URL into the composer
|
||||
- **/browse Command**: Use the `/browse` command in the chat to load a web page
|
||||
- **ReAct**: Enable the `loadURL()` function in ReAct for advanced interactions
|
||||
|
||||
### 🌐 Browserless 2.0
|
||||
|
||||
[Browserless 2.0](https://github.com/browserless/browserless) is a Docker-based service that provides a headless
|
||||
browsing experience compatible with `big-AGI`. An open-source solution that simplifies web automation tasks,
|
||||
in a scalable manner.
|
||||
|
||||
Launch Browserless with:
|
||||
|
||||
```bash
|
||||
docker run -p 9222:3000 browserless/chrome:latest
|
||||
```
|
||||
|
||||
Now you can use the following connection string in `big-AGI`: `ws://127.0.0.1:9222`.
|
||||
You can also browse to [http://127.0.0.1:9222](http://127.0.0.1:9222) to see the Browserless debug viewer
|
||||
and configure some options.
|
||||
|
||||
Note: if you are using `docker-compose`, please see the
|
||||
[docker/docker-compose-browserless.yaml](docker/docker-compose-browserless.yaml) file for an example
|
||||
on how to run `big-AGI` and Browserless simultaneously in a single application.
|
||||
|
||||
### 🌐 Your own Chrome browser
|
||||
|
||||
***EXPERIMENTAL - UNTESTED*** - You can use your own Chrome browser as a browsing service, by configuring it to expose
|
||||
a WebSocket endpoint.
|
||||
|
||||
- close all the Chrome instances (on Windows, check the Task Manager if still running)
|
||||
- start Chrome with the following command line options (on Windows, you can edit the shortcut properties):
|
||||
- `--remote-debugging-port=9222`
|
||||
- go to http://localhost:9222/json/version and copy the `webSocketDebuggerUrl` value
|
||||
- it should be something like: `ws://localhost:9222/...`
|
||||
- paste the value into the Endpoint configuration (see point 2 in the configuration)
|
||||
|
||||
### Server-Side Configuration
|
||||
|
||||
You can set the Puppeteer WebSocket endpoint (`PUPPETEER_WSS_ENDPOINT`) in the deployment before running it.
|
||||
This is useful for self-hosted instances or when you want to pre-configure the endpoint for all users, and will
|
||||
allow your to skip points 2 and 3 above.
|
||||
|
||||
Always deploy your own user authentication, authorization and security solution. For this feature, the tRPC
|
||||
route that provides browsing service, shall be secured with a user authentication and authorization solution,
|
||||
to prevent unauthorized access to the browsing service.
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter any issues or have questions about configuring the browse functionality, join our community on Discord for support and discussions.
|
||||
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
---
|
||||
|
||||
Enjoy the enhanced browsing experience within `big-AGI` and explore the web without ever leaving your chat!
|
||||
@@ -0,0 +1,34 @@
|
||||
# Local LLM integration with `localai`
|
||||
|
||||
Integrate local Large Language Models (LLMs) with [LocalAI](https://localai.io).
|
||||
|
||||
_Last updated Nov 7, 2023_
|
||||
|
||||
## Instructions
|
||||
|
||||
### LocalAI installation and configuration
|
||||
|
||||
Follow the guide at: https://localai.io/basics/getting_started/
|
||||
|
||||
For instance with [Use luna-ai-llama2 with docker compose](https://localai.io/basics/getting_started/#example-use-luna-ai-llama2-model-with-docker-compose):
|
||||
|
||||
- clone LocalAI
|
||||
- get the model
|
||||
- copy the prompt template
|
||||
- start docker
|
||||
- -> the server will be listening on `localhost:8080`
|
||||
- verify it works by going to [http://localhost:8080/v1/models](http://localhost:8080/v1/models) on
|
||||
your browser and seeing listed the model you downloaded
|
||||
|
||||
### Integrating LocalAI with big-AGI
|
||||
|
||||
- Go to Models > Add a model source of type: **LocalAI**
|
||||
- Enter the address: `http://localhost:8080` (default)
|
||||
- If running remotely, replace localhost with the IP of the machine. Make sure to use the **IP:Port** format
|
||||
- Load the models
|
||||
- Select model & Chat
|
||||
|
||||
> NOTE: LocalAI does not list details about the mdoels. Every model is assumed to be
|
||||
> capable of chatting, and with a context window of 4096 tokens.
|
||||
> Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/transports/server/openai/models.data.ts)
|
||||
> file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
|
||||
@@ -0,0 +1,61 @@
|
||||
# Local LLM Integration with `text-web-ui` :llama:
|
||||
|
||||
Integrate local Large Language Models (LLMs) with
|
||||
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
|
||||
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
|
||||
|
||||
_Last updated on Dec 7, 2023_
|
||||
|
||||
### Components
|
||||
|
||||
The implementation of local LLMs involves the following components:
|
||||
|
||||
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
|
||||
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
|
||||
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
|
||||
|
||||
## Instructions
|
||||
|
||||
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
|
||||
|
||||
### Text-web-ui Installation & Configuration:
|
||||
|
||||
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation):
|
||||
- Follow the instructions in the official page (basicall clone the repo and run a script) [~10 minutes]
|
||||
- Stop the Web UI as we need to modify the startup flags to enable the OpenAI API
|
||||
2. Enable the **openai extension**
|
||||
- Edit `CMD_FLAGS.txt`
|
||||
- Make sure that `--listen --api` is present and uncommented
|
||||
3. Restart text-generation-webui
|
||||
- Double-click on "start"
|
||||
- You should see something like:
|
||||
```
|
||||
2023-12-07 21:51:21 INFO:Loading the extension "openai"...
|
||||
2023-12-07 21:51:21 INFO:OpenAI-compatible API URL:
|
||||
|
||||
http://0.0.0.0:5000
|
||||
...
|
||||
INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)
|
||||
Running on local URL: http://0.0.0.0:7860
|
||||
```
|
||||
- This shows that:
|
||||
- The Web UI is running on port 7860: http://127.0.0.1:7860
|
||||
- **The OpenAI API is running on port 5000: http://127.0.0.1:5000**
|
||||
4. Load your first model
|
||||
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
|
||||
- Switch to the **Model** tab
|
||||
- Download, for instance, `TheBloke/Llama-2-7B-Chat-GPTQ`
|
||||
- Select the model once it's loaded
|
||||
|
||||
### Integrating text-web-ui with big-AGI:
|
||||
1. Integrating Text-Generation-WebUI with big-AGI:
|
||||
- Go to Models > Add a model source of type: **Oobabooga**
|
||||
- Enter the address: `http://127.0.0.1:5000`
|
||||
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
|
||||
- Load the models
|
||||
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
|
||||
- Select model & Chat
|
||||
|
||||

|
||||
|
||||
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
|
||||
@@ -0,0 +1,86 @@
|
||||
# `Ollama` x `big-AGI` :llama:
|
||||
|
||||
This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama.ai/library) to
|
||||
[big-AGI](https://big-agi.com) for a professional AI/AGI operation and a good UI/Conversational
|
||||
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
|
||||
editing tools, models switching, personas, and more.
|
||||
|
||||
_Last updated Dec 11, 2023_
|
||||
|
||||

|
||||
|
||||
## Quick Integration Guide
|
||||
|
||||
1. **Ensure Ollama API Server is Running**: Follow the official instructions to get Ollama up and running on your machine
|
||||
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**
|
||||
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`)
|
||||
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models
|
||||
> Optional: use the Ollama Admin interface to see which models are available and 'Pull' them in your local machine. Note
|
||||
that this operation will likely timeout due to Edge Functions timeout on the big-AGI server while pulling, and
|
||||
you'll have to press the 'Pull' button again, until a green message appears.
|
||||
5. **Chat with Ollama models**: select an Ollama model and begin chatting with AI personas
|
||||
|
||||
### Ollama: installation and Setup
|
||||
|
||||
For detailed instructions on setting up the Ollama API server, please refer to the
|
||||
[Ollama download page](https://ollama.ai/download) and [instructions for linux](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
|
||||
|
||||
### Visual Guide
|
||||
|
||||
* After adding the `Ollama` model vendor, entering the IP address of an Ollama server, and refreshing models:
|
||||
<img src="pixels/config-ollama-1-models.png" alt="config-local-ollama-1-models.png" style="max-width: 320px;">
|
||||
|
||||
* The `Ollama` admin panel, with the `Pull` button highlighted, after pulling the "Yi" model:
|
||||
<img src="pixels/config-ollama-2-admin-pull.png" alt="config-local-ollama-2-admin-pull.png" style="max-width: 320px;">
|
||||
|
||||
* You can now switch model/persona dynamically and text/voice chat with the models:
|
||||
<img src="pixels/config-ollama-3-chat.png" alt="config-local-ollama-3-chat.png" style="max-width: 320px;">
|
||||
|
||||
### Advanced: Model parameters
|
||||
|
||||
For users who wish to delve deeper into advanced settings, `big-AGI` offers additional configuration options, such
|
||||
as the model temperature, maximum tokens, etc.
|
||||
|
||||
### Advanced: Ollama under a reverse proxy
|
||||
|
||||
You can elegantly expose your Ollama server to the internet (and thus make it easier to use from your server-side
|
||||
big-AGI deployments) by exposing it on an http/https URL, such as: `https://yourdomain.com/ollama`
|
||||
|
||||
On Ubuntu Servers, you will need to install `nginx` and configure it to proxy requests to Ollama.
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install nginx
|
||||
sudo apt install certbot python3-certbot-nginx
|
||||
sudo certbot --nginx -d yourdomain.com
|
||||
```
|
||||
|
||||
Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and add the following block:
|
||||
|
||||
```nginx
|
||||
location /ollama/ {
|
||||
proxy_pass http://localhost:11434;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
|
||||
# Disable buffering for the streaming responses
|
||||
proxy_buffering off;
|
||||
}
|
||||
```
|
||||
|
||||
Reach out to our community if you need help with this.
|
||||
|
||||
### Community and Support
|
||||
|
||||
Join our community to share your experiences, get help, and discuss best practices:
|
||||
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
|
||||
|
||||
---
|
||||
|
||||
`big-AGI` is committed to providing a powerful, intuitive, and privacy-respecting AI experience.
|
||||
We are excited for you to explore the possibilities with Ollama models. Happy creating!
|
||||
@@ -0,0 +1,31 @@
|
||||
# OpenRouter Configuration
|
||||
|
||||
[OpenRouter](https://openrouter.ai) is a standalone, premium service
|
||||
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
|
||||
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
|
||||
This document details the process of integrating OpenRouter with big-AGI.
|
||||
|
||||
### 1. OpenRouter Account Setup and API Key Generation
|
||||
|
||||
1. Register for an OpenRouter account at [openrouter.ai](https://openrouter.ai) by clicking on Sign In > Continue with Google.
|
||||
2. Top up your account (minimum $5) by navigating to [openrouter.ai/account](https://openrouter.ai/account) > Add Credits > Pay with Stripe.
|
||||
3. Generate an API key at [openrouter.ai/keys](https://openrouter.ai/keys) > API Key > Generate API Key.
|
||||
- **Remember to copy and securely store your API key** - the key will not be displayed again and will be in the format `sk-or-v1-...`.
|
||||
- Keep the key confidential as it can be used to expend your credits.
|
||||
|
||||
### 2. Integrating OpenRouter with big-AGI
|
||||
|
||||
1. Launch big-AGI, and navigate to the AI **Models** settings.
|
||||
2. Add a Vendor, and select **OpenRouter**.
|
||||

|
||||
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
|
||||

|
||||
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
|
||||
|
||||
### Pricing
|
||||
|
||||
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
|
||||
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
|
||||
|
||||
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
|
||||
credits - a single prompt may cost $1 or more, at the time of writing.
|
||||
@@ -0,0 +1,45 @@
|
||||
# Authentication
|
||||
|
||||
`big-AGI` does not come with built-in authentication. To secure your deployment, you can implement authentication
|
||||
in one of the following ways:
|
||||
|
||||
1. Build `big-AGI` with support for ⬇️ [HTTP Authentication](#http-authentication)
|
||||
2. Utilize user authentication features provided by your ⬇️ [cloud deployment platform](#cloud-deployments-authentication)
|
||||
3. Develop a custom authentication solution
|
||||
|
||||
<br/>
|
||||
|
||||
### HTTP Authentication
|
||||
|
||||
[HTTP Basic Authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication) is a simple method
|
||||
to secure your application.
|
||||
|
||||
To enable it in `big-AGI`, you **must manually build the application**:
|
||||
|
||||
- Build `big-AGI` with HTTP authentication enabled:
|
||||
- Clone the repository
|
||||
- Rename `middleware_BASIC_AUTH.ts` to `middleware.ts`
|
||||
- Build: usual simple build procedure (e.g. [Deploy manually](../README.md#-deploy-manually) or [Deploying with Docker](deploy-docker.md))
|
||||
|
||||
- Configure the following [environment variables](environment-variables.md) before launching `big-AGI`:
|
||||
```dotenv
|
||||
HTTP_BASIC_AUTH_USERNAME=<your username>
|
||||
HTTP_BASIC_AUTH_PASSWORD=<your password>
|
||||
```
|
||||
|
||||
- Start the application 🔒
|
||||
|
||||
<br/>
|
||||
|
||||
### Cloud Deployments Authentication
|
||||
|
||||
> This approach allows you to enable authentication without rebuilding the application by using the features
|
||||
> provided by your cloud platform to manage user accounts and access.
|
||||
|
||||
Many cloud deployment platforms offer built-in authentication mechanisms. Refer to the platform's documentation
|
||||
for setup instructions:
|
||||
|
||||
1. [CloudFlare Access / Zero Trust](https://www.cloudflare.com/zero-trust/products/access/)
|
||||
2. [Vercel Authentication](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/vercel-authentication)
|
||||
3. [Vercel Password Protection](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/password-protection)
|
||||
4. Let us know when you test more solutions (Heroku, AWS IAM, Google IAP, etc.)
|
||||
@@ -1,55 +1,68 @@
|
||||
# Deploying Next.js App on Cloudflare Pages
|
||||
# Deploying a Next.js App on Cloudflare Pages
|
||||
|
||||
Follow these steps to deploy your Next.js app on Cloudflare Pages. This guide is based on
|
||||
the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
with a few additional steps.
|
||||
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
|
||||
>
|
||||
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
|
||||
> edge functions, so we cannot deploy this project on Cloudflare Pages.
|
||||
>
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
|
||||
> parts of this application will not work.
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
|
||||
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
|
||||
>
|
||||
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
|
||||
> and convert the Node runtime to Edge runtime once Prisma supports it.
|
||||
|
||||
## Step 1: Fork the Repository
|
||||
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
|
||||
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
with some additional steps.
|
||||
|
||||
Fork the repository to your own GitHub account.
|
||||
## Step 1: Repository Forking
|
||||
|
||||
## Step 2: Connect Cloudflare Pages to Your GitHub Account
|
||||
Fork the repository to your personal GitHub account.
|
||||
|
||||
1. Go to the Cloudflare Pages section and click the `Create a project` button.
|
||||
2. Click `Connect To Git` and give Cloudflare Pages either All GitHub account Repo access or selected Repo access. We
|
||||
recommend using selected Repo access and selecting the forked repo from step 1.
|
||||
## Step 2: Linking Cloudflare Pages to Your GitHub Account
|
||||
|
||||
## Step 3: Setup Build and Deployments
|
||||
1. Navigate to the Cloudflare Pages section and click on the `Create a project` button.
|
||||
2. Click `Connect To Git` and grant Cloudflare Pages access to either all GitHub account repositories or selected repositories.
|
||||
We recommend using selected Repo access and selecting the forked repository from step 1.
|
||||
|
||||
1. Once you select the forked GitHub repo, click the `Begin Setup` button.
|
||||
2. On this page, set your `Project name`, `Production branch` (e.g., main), and your Build settings.
|
||||
3. Select `Next.js` from the `Framework preset` dropdown menu.
|
||||
4. Leave the preset filled Build command and Build output directory as preset defaults.
|
||||
5. Set `Environmental variables` (advanced) on this page to configure some variables as follows:
|
||||
## Step 3: Configuring Build and Deployments
|
||||
|
||||
| Variable | Value |
|
||||
|---------------------------|---------|
|
||||
| `GO_VERSION` | `1.16` |
|
||||
| `NEXT_TELEMETRY_DISABLED` | `1` |
|
||||
| `NODE_VERSION` | `17` |
|
||||
| `PHP_VERSION` | `7.4` |
|
||||
| `PYTHON_VERSION` | `3.7` |
|
||||
| `RUBY_VERSION` | `2.7.1` |
|
||||
1. After selecting the forked GitHub repository, click the **Begin Setup** button
|
||||
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
|
||||
3. Choose `Next.js` from the **Framework preset** dropdown menu
|
||||
4. Set a custom **Build Command**:
|
||||
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
|
||||
- see the tradeoffs for this deletion on the notice at the top
|
||||
5. Keep the **Build output directory** as default
|
||||
6. Click the **Save and Deploy** button
|
||||
|
||||
6. Click the `Save and Deploy` button.
|
||||
## Step 4: Monitoring the Deployment Process
|
||||
|
||||
## Step 4: Monitor the Deployment Process
|
||||
Observe the process as it initializes your build environment, clones the GitHub repository, builds the application, and deploys it
|
||||
to the Cloudflare Network. Once complete, proceed to the project you created.
|
||||
|
||||
Watch the process run to initialize your build environment, clone the GitHub repo, build the application, and deploy to
|
||||
the Cloudflare Network. Once that is done, proceed to the project you created.
|
||||
## Step 5: Required: Set the `nodejs_compat` compatibility flag
|
||||
|
||||
## Step 5: Set up a Custom Domain
|
||||
1. Navigate to the [Settings > Functions](https://dash.cloudflare.com/?to=/:account/pages/view/:pages-project/settings/functions) page of your newly created project
|
||||
2. Scroll to `Compatibility flags` and enter "`nodejs_compat`" for both **Production** and **Preview** environments.
|
||||
It should look like this: 
|
||||
3. Re-deploy your project for the new flags to take effect
|
||||
|
||||
## Step 6: (Optional) Custom Domain Configuration
|
||||
|
||||
Use the `Custom domains` tab to set up your domain via CNAME.
|
||||
|
||||
## Step 6: Configure Access Policy and Web Analytics
|
||||
## Step 7: (Optional) Access Policy and Web Analytics Configuration
|
||||
|
||||
Go to the `Settings` page and enable the following settings:
|
||||
Navigate to the `Settings` page and enable the following settings:
|
||||
|
||||
1. Access Policy: Restrict [preview deployments](https://developers.cloudflare.com/pages/platform/preview-deployments/)
|
||||
to members of your Cloudflare account via one-time pin and restrict primary `*.YOURPROJECT.pages.dev` domain.
|
||||
See [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
|
||||
for more information.
|
||||
Refer to [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
|
||||
for more details.
|
||||
2. Enable Web Analytics.
|
||||
|
||||
Now you have successfully deployed your Next.js app on Cloudflare Pages.
|
||||
Congratulations! You have successfully deployed your Next.js app on Cloudflare Pages.
|
||||
@@ -1,26 +1,60 @@
|
||||
# Deploy `big-AGI` with Docker 🐳
|
||||
# Deploying `big-AGI` with Docker
|
||||
|
||||
Deploy the big-AGI application using Docker containers for a consistent, efficient, and automated deployment process. Enjoy faster development cycles, easier collaboration, and seamless environment management. 🚀
|
||||
Utilize Docker containers to deploy the big-AGI application for an efficient and automated deployment process.
|
||||
Docker ensures faster development cycles, easier collaboration, and seamless environment management.
|
||||
|
||||
Docker is a platform for developing, packaging, and deploying applications as lightweight containers, ensuring consistent behavior across environments.
|
||||
## Build and run your container 🔧
|
||||
|
||||
## `big-AGI` Docker Components
|
||||
1. **Clone big-AGI**
|
||||
```bash
|
||||
git clone https://github.com/enricoros/big-agi.git
|
||||
cd big-agi
|
||||
```
|
||||
2. **Build the Docker Image**: Build a local docker image from the provided Dockerfile:
|
||||
```bash
|
||||
docker build -t big-agi .
|
||||
```
|
||||
3. **Run the Docker Container**: start a Docker container from the newly built image,
|
||||
and expose its http port 3000 to your `localhost:3000` using:
|
||||
```bash
|
||||
docker run -d -p 3000:3000 big-agi
|
||||
```
|
||||
4. Browse to [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a Docker image of the application.
|
||||
## Documentation
|
||||
|
||||
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a
|
||||
Docker image of the application.
|
||||
|
||||
### Dockerfile
|
||||
|
||||
The [`Dockerfile`](../Dockerfile) sets up a Node.js environment, installs dependencies, and creates a production-ready version of the application.
|
||||
The [`Dockerfile`](../Dockerfile) describes how to create a Docker image. It establishes a Node.js environment,
|
||||
installs dependencies, and creates a production-ready version of the application as a local container.
|
||||
|
||||
### GitHub Actions Workflow
|
||||
### Official container images
|
||||
|
||||
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates building and publishing the Docker image when changes are pushed to the `main` branch.
|
||||
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates the
|
||||
building and publishing of the Docker images to the GitHub Container Registry (ghcr) when changes are
|
||||
pushed to the `main` branch.
|
||||
|
||||
## Deploy Steps
|
||||
Official pre-built containers: [ghcr.io/enricoros/big-agi](https://github.com/enricoros/big-agi/pkgs/container/big-agi)
|
||||
|
||||
1. Clone the big-AGI repository
|
||||
2. Navigate to the project directory
|
||||
3. Build the Docker image using the provided Dockerfile
|
||||
4. Run the Docker container with the built image
|
||||
Run official pre-built containers:
|
||||
```bash
|
||||
docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi
|
||||
```
|
||||
|
||||
Embrace the benefits of Docker for a reliable and efficient big-AGI deployment. 🎉
|
||||
### Run official containers
|
||||
|
||||
In addition, the repository also includes a `docker-compose.yaml` file, configured to run the pre-built
|
||||
'ghcr image'. This file is used to define the `big-agi` service, the ports to expose, and the command to run.
|
||||
|
||||
If you have Docker Compose installed, you can run the Docker container with `docker-compose up`
|
||||
to pull the Docker image (if it hasn't been pulled already) and start a Docker container. If you want to
|
||||
update the image to the latest version, you can run `docker-compose pull` before starting the service.
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Leverage Docker's capabilities for a reliable and efficient big-AGI deployment.
|
||||
@@ -0,0 +1,31 @@
|
||||
# This file is used to run `big-AGI` and `browserless` with Docker Compose.
|
||||
#
|
||||
# The two containers are linked together and `big-AGI` is configured to use `browserless`
|
||||
# as its Puppeteer endpoint (from the containers intranet, it is available browserless:3000).
|
||||
#
|
||||
# From your host, you can access big-AGI on http://127.0.0.1:3000 and browserless on http://127.0.0.1:9222.
|
||||
#
|
||||
# To start the containers, run:
|
||||
# docker-compose -f docs/docker/docker-compose-browserless.yaml up
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:main
|
||||
ports:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
depends_on:
|
||||
- browserless
|
||||
|
||||
browserless:
|
||||
image: browserless/chrome:latest
|
||||
ports:
|
||||
- "9222:3000" # Map host's port 9222 to container's port 3000
|
||||
environment:
|
||||
- MAX_CONCURRENT_SESSIONS=10
|
||||
@@ -0,0 +1,124 @@
|
||||
# Environment Variables
|
||||
|
||||
This document provides an explanation of the environment variables used in the big-AGI application.
|
||||
|
||||
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.mjs`](../src/server/env.mjs).
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
Environment variables can be set by creating a `.env` file in the root directory of the project.
|
||||
|
||||
The following is an example `.env` for copy-paste convenience:
|
||||
|
||||
```bash
|
||||
# Database
|
||||
POSTGRES_PRISMA_URL=
|
||||
POSTGRES_URL_NON_POOLING=
|
||||
|
||||
# LLMs
|
||||
OPENAI_API_KEY=
|
||||
OPENAI_API_HOST=
|
||||
OPENAI_API_ORG_ID=
|
||||
AZURE_OPENAI_API_ENDPOINT=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
ANTHROPIC_API_HOST=
|
||||
MISTRAL_API_KEY=
|
||||
OLLAMA_API_HOST=
|
||||
OPENROUTER_API_KEY=
|
||||
|
||||
# Model Observability: Helicone
|
||||
HELICONE_API_KEY=
|
||||
|
||||
# Text-To-Speech
|
||||
ELEVENLABS_API_KEY=
|
||||
ELEVENLABS_API_HOST=
|
||||
ELEVENLABS_VOICE_ID=
|
||||
# Text-To-Image
|
||||
PRODIA_API_KEY=
|
||||
# Google Custom Search
|
||||
GOOGLE_CLOUD_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
# Browse
|
||||
PUPPETEER_WSS_ENDPOINT=
|
||||
|
||||
# Backend Analytics
|
||||
BACKEND_ANALYTICS=
|
||||
|
||||
# Backend HTTP Basic Authentication
|
||||
HTTP_BASIC_AUTH_USERNAME=
|
||||
HTTP_BASIC_AUTH_PASSWORD=
|
||||
```
|
||||
|
||||
## Variables Documentation
|
||||
|
||||
### Database
|
||||
|
||||
To enable features such as Chat Link Shring, you need to connect the backend to a database. We require
|
||||
serverless Postgres, which is available on Vercel, Neon and more.
|
||||
|
||||
Also make sure that you run `npx prisma db:push` to create the initial schema on the database for the
|
||||
first time (or update it on a later stage).
|
||||
|
||||
| Variable | Description |
|
||||
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `POSTGRES_PRISMA_URL` | The URL of the Postgres database used by Prisma - example: `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15` |
|
||||
| `POSTGRES_URL_NON_POOLING` | The URL of the Postgres database without pooling |
|
||||
|
||||
### LLMs
|
||||
|
||||
The following variables when set will enable the corresponding LLMs on the server-side, without
|
||||
requiring the user to enter an API key
|
||||
|
||||
| Variable | Description | Required |
|
||||
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
|
||||
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
|
||||
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
|
||||
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
|
||||
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
|
||||
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
|
||||
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
|
||||
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
|
||||
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-ollama.md](config-ollama.md) | |
|
||||
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
|
||||
|
||||
### Model Observability: Helicone
|
||||
|
||||
Helicone provides observability to your LLM calls. It is a paid service, with a generous free tier.
|
||||
It is currently supported for:
|
||||
|
||||
- **Anthropic**: by setting the Helicone API key, Helicone is automatically activated
|
||||
- **OpenAI**: you also need to set `OPENAI_API_HOST` to `oai.hconeai.com`, to enable routing
|
||||
|
||||
| Variable | Description |
|
||||
|--------------------|--------------------------|
|
||||
| `HELICONE_API_KEY` | The API key for Helicone |
|
||||
|
||||
### Specials
|
||||
|
||||
Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
|
||||
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
|
||||
| **Browse** | |
|
||||
| `PUPPETEER_WSS_ENDPOINT` | Puppeteer WebSocket endpoint - used for browsing, etc. |
|
||||
| **Backend** | |
|
||||
| `BACKEND_ANALYTICS` | Semicolon-separated list of analytics flags (see backend.analytics.ts). Flags: `domain` logs the responding domain. |
|
||||
| `HTTP_BASIC_AUTH_USERNAME` | Username for HTTP Basic Authentication. See the [Authentication](deploy-authentication.md) guide. |
|
||||
| `HTTP_BASIC_AUTH_PASSWORD` | Password for HTTP Basic Authentication. |
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 283 KiB After Width: | Height: | Size: 279 KiB |
|
Before Width: | Height: | Size: 255 KiB After Width: | Height: | Size: 209 KiB |
|
Before Width: | Height: | Size: 626 KiB After Width: | Height: | Size: 618 KiB |
|
After Width: | Height: | Size: 29 KiB |
|
After Width: | Height: | Size: 42 KiB |
|
After Width: | Height: | Size: 32 KiB |
|
After Width: | Height: | Size: 370 KiB |
|
After Width: | Height: | Size: 34 KiB |
|
After Width: | Height: | Size: 37 KiB |
|
After Width: | Height: | Size: 48 KiB |
|
After Width: | Height: | Size: 730 KiB |
|
After Width: | Height: | Size: 38 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 8.2 KiB After Width: | Height: | Size: 5.8 KiB |
|
Before Width: | Height: | Size: 3.8 MiB After Width: | Height: | Size: 1.6 MiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 84 KiB After Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 79 KiB After Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 3.7 KiB After Width: | Height: | Size: 2.6 KiB |
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 5.6 KiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 9.7 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 195 KiB After Width: | Height: | Size: 157 KiB |
|
Before Width: | Height: | Size: 192 KiB After Width: | Height: | Size: 156 KiB |
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 49 KiB |
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* Middleware to protect `big-AGI` with HTTP Basic Authentication
|
||||
*
|
||||
* For more information on how to deploy with HTTP Basic Authentication, see:
|
||||
* - [deploy-authentication.md](docs/deploy-authentication.md)
|
||||
*/
|
||||
|
||||
import type { NextRequest } from 'next/server';
|
||||
import { NextResponse } from 'next/server';
|
||||
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export function middleware(request: NextRequest) {
|
||||
|
||||
// Validate deployment configuration
|
||||
if (!process.env.HTTP_BASIC_AUTH_USERNAME || !process.env.HTTP_BASIC_AUTH_PASSWORD) {
|
||||
console.warn('HTTP Basic Authentication is enabled but not configured');
|
||||
return new Response('Unauthorized/Unconfigured', unauthResponse);
|
||||
}
|
||||
|
||||
// Request client authentication if no credentials are provided
|
||||
const authHeader = request.headers.get('authorization');
|
||||
if (!authHeader?.startsWith('Basic '))
|
||||
return new Response('Unauthorized', unauthResponse);
|
||||
|
||||
// Request authentication if credentials are invalid
|
||||
const base64Credentials = authHeader.split(' ')[1];
|
||||
const credentials = Buffer.from(base64Credentials, 'base64').toString('ascii');
|
||||
const [username, password] = credentials.split(':');
|
||||
if (
|
||||
!username || !password ||
|
||||
username !== process.env.HTTP_BASIC_AUTH_USERNAME ||
|
||||
password !== process.env.HTTP_BASIC_AUTH_PASSWORD
|
||||
)
|
||||
return new Response('Unauthorized', unauthResponse);
|
||||
|
||||
return NextResponse.next();
|
||||
}
|
||||
|
||||
|
||||
// Response to send when authentication is required
|
||||
const unauthResponse: ResponseInit = {
|
||||
status: 401,
|
||||
headers: {
|
||||
'WWW-Authenticate': 'Basic realm="Secure big-AGI"',
|
||||
},
|
||||
};
|
||||
|
||||
export const config = {
|
||||
matcher: [
|
||||
// Include root
|
||||
'/',
|
||||
// Include pages
|
||||
'/(call|index|news|personas|link)(.*)',
|
||||
// Include API routes
|
||||
'/api(.*)',
|
||||
// Note: this excludes _next, /images etc..
|
||||
],
|
||||
};
|
||||
@@ -1,25 +0,0 @@
|
||||
/** @type {import('next').NextConfig} */
|
||||
const nextConfig = {
|
||||
reactStrictMode: true,
|
||||
env: {
|
||||
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
|
||||
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
|
||||
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
|
||||
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
|
||||
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
|
||||
},
|
||||
webpack(config, { isServer, dev }) {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
// @dqbd/tiktoken: enable asynchronous WebAssembly
|
||||
config.experiments = {
|
||||
asyncWebAssembly: true,
|
||||
layers: true,
|
||||
};
|
||||
|
||||
return config;
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = nextConfig;
|
||||
@@ -0,0 +1,41 @@
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig = {
|
||||
reactStrictMode: true,
|
||||
|
||||
// Note: disabled to chech whether the project becomes slower with this
|
||||
// modularizeImports: {
|
||||
// '@mui/icons-material': {
|
||||
// transform: '@mui/icons-material/{{member}}',
|
||||
// },
|
||||
// },
|
||||
|
||||
// [puppeteer] https://github.com/puppeteer/puppeteer/issues/11052
|
||||
experimental: {
|
||||
serverComponentsExternalPackages: ['puppeteer-core'],
|
||||
},
|
||||
|
||||
webpack: (config, _options) => {
|
||||
// @mui/joy: anything material gets redirected to Joy
|
||||
config.resolve.alias['@mui/material'] = '@mui/joy';
|
||||
|
||||
// @dqbd/tiktoken: enable asynchronous WebAssembly
|
||||
config.experiments = {
|
||||
asyncWebAssembly: true,
|
||||
layers: true,
|
||||
};
|
||||
|
||||
return config;
|
||||
},
|
||||
};
|
||||
|
||||
// Validate environment variables, if set at build time. Will be actually read and used at runtime.
|
||||
// This is the reason both this file and the servr/env.mjs files have this extension.
|
||||
await import('./src/server/env.mjs');
|
||||
|
||||
// conditionally enable the nextjs bundle analyzer
|
||||
if (process.env.ANALYZE_BUNDLE) {
|
||||
const { default: withBundleAnalyzer } = await import('@next/bundle-analyzer');
|
||||
nextConfig = withBundleAnalyzer({ openAnalyzer: true })(nextConfig);
|
||||
}
|
||||
|
||||
export default nextConfig;
|
||||
@@ -1,45 +1,71 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "0.9.1",
|
||||
"version": "1.7.3",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint"
|
||||
"lint": "next lint",
|
||||
"env:pull": "npx vercel env pull .env.development.local",
|
||||
"postinstall": "prisma generate",
|
||||
"db:push": "prisma db push",
|
||||
"db:studio": "prisma studio"
|
||||
},
|
||||
"dependencies": {
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@emotion/react": "^11.10.8",
|
||||
"@emotion/server": "^11.10.0",
|
||||
"@emotion/styled": "^11.10.8",
|
||||
"@mui/icons-material": "^5.11.16",
|
||||
"@mui/joy": "^5.0.0-alpha.77",
|
||||
"@tanstack/react-query": "^4.29.5",
|
||||
"@vercel/analytics": "^1.0.0",
|
||||
"eventsource-parser": "^1.0.0",
|
||||
"next": "^13.3.2",
|
||||
"pdfjs-dist": "^3.5.141",
|
||||
"@emotion/cache": "^11.11.0",
|
||||
"@emotion/react": "^11.11.1",
|
||||
"@emotion/server": "^11.11.0",
|
||||
"@emotion/styled": "^11.11.0",
|
||||
"@mui/icons-material": "^5.14.19",
|
||||
"@mui/joy": "^5.0.0-beta.17",
|
||||
"@next/bundle-analyzer": "^14.0.4",
|
||||
"@prisma/client": "^5.7.0",
|
||||
"@sanity/diff-match-patch": "^3.1.1",
|
||||
"@t3-oss/env-nextjs": "^0.7.1",
|
||||
"@tanstack/react-query": "^4.36.1",
|
||||
"@trpc/client": "^10.44.1",
|
||||
"@trpc/next": "^10.44.1",
|
||||
"@trpc/react-query": "^10.44.1",
|
||||
"@trpc/server": "^10.44.1",
|
||||
"@vercel/analytics": "^1.1.1",
|
||||
"browser-fs-access": "^0.35.0",
|
||||
"eventsource-parser": "^1.1.1",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"next": "^14.0.4",
|
||||
"pdfjs-dist": "4.0.269",
|
||||
"plantuml-encoder": "^1.4.0",
|
||||
"prismjs": "^1.29.0",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-markdown": "^8.0.7",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"uuid": "^9.0.0",
|
||||
"zustand": "^4.3.7"
|
||||
"react-katex": "^3.0.1",
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-timeago": "^7.2.0",
|
||||
"remark-gfm": "^4.0.0",
|
||||
"superjson": "^2.2.1",
|
||||
"tesseract.js": "^5.0.3",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4",
|
||||
"zustand": "~4.3.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.16.3",
|
||||
"@types/prismjs": "^1.26.0",
|
||||
"@types/react": "^18.2.0",
|
||||
"@types/react-dom": "^18.2.1",
|
||||
"@types/uuid": "^9.0.1",
|
||||
"eslint": "^8.39.0",
|
||||
"eslint-config-next": "^13.3.2",
|
||||
"prettier": "^2.8.8",
|
||||
"typescript": "^5.0.4"
|
||||
"@cloudflare/puppeteer": "^0.0.5",
|
||||
"@types/node": "^20.10.4",
|
||||
"@types/plantuml-encoder": "^1.4.2",
|
||||
"@types/prismjs": "^1.26.3",
|
||||
"@types/react": "^18.2.43",
|
||||
"@types/react-dom": "^18.2.17",
|
||||
"@types/react-katex": "^3.0.4",
|
||||
"@types/react-timeago": "^4.1.6",
|
||||
"@types/uuid": "^9.0.7",
|
||||
"eslint": "^8.55.0",
|
||||
"eslint-config-next": "^14.0.4",
|
||||
"prettier": "^3.1.1",
|
||||
"prisma": "^5.7.0",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^20.0.0 || ^18.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,40 +1,42 @@
|
||||
import * as React from 'react';
|
||||
import Head from 'next/head';
|
||||
import { MyAppProps } from 'next/app';
|
||||
import { Analytics as VercelAnalytics } from '@vercel/analytics/react';
|
||||
import { AppProps } from 'next/app';
|
||||
import { CacheProvider, EmotionCache } from '@emotion/react';
|
||||
import { CssBaseline, CssVarsProvider } from '@mui/joy';
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
|
||||
|
||||
import '@/common/styles/GithubMarkdown.css';
|
||||
import { Brand } from '@/common/brand';
|
||||
import { createEmotionCache, theme } from '@/common/theme';
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
|
||||
import 'katex/dist/katex.min.css';
|
||||
import '~/common/styles/CodePrism.css';
|
||||
import '~/common/styles/GithubMarkdown.css';
|
||||
|
||||
import { ProviderBackend } from '~/common/state/ProviderBackend';
|
||||
import { ProviderSnacks } from '~/common/state/ProviderSnacks';
|
||||
import { ProviderTRPCQueryClient } from '~/common/state/ProviderTRPCQueryClient';
|
||||
import { ProviderTheming } from '~/common/state/ProviderTheming';
|
||||
|
||||
|
||||
// Client-side cache, shared for the whole session of the user in the browser.
|
||||
const clientSideEmotionCache = createEmotionCache();
|
||||
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) =>
|
||||
<>
|
||||
|
||||
export interface MyAppProps extends AppProps {
|
||||
emotionCache?: EmotionCache;
|
||||
}
|
||||
<Head>
|
||||
<title>{Brand.Title.Common}</title>
|
||||
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
|
||||
</Head>
|
||||
|
||||
<ProviderTheming emotionCache={emotionCache}>
|
||||
<ProviderTRPCQueryClient>
|
||||
<ProviderSnacks>
|
||||
<ProviderBackend>
|
||||
<Component {...pageProps} />
|
||||
</ProviderBackend>
|
||||
</ProviderSnacks>
|
||||
</ProviderTRPCQueryClient>
|
||||
</ProviderTheming>
|
||||
|
||||
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps }: MyAppProps) {
|
||||
const [queryClient] = React.useState(() => new QueryClient());
|
||||
return <>
|
||||
<CacheProvider value={emotionCache}>
|
||||
<Head>
|
||||
<title>{Brand.Title.Common}</title>
|
||||
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
|
||||
</Head>
|
||||
{/* Rect-query provider */}
|
||||
<QueryClientProvider client={queryClient}>
|
||||
<CssVarsProvider defaultMode='light' theme={theme}>
|
||||
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
|
||||
<CssBaseline />
|
||||
<Component {...pageProps} />
|
||||
</CssVarsProvider>
|
||||
</QueryClientProvider>
|
||||
</CacheProvider>
|
||||
<VercelAnalytics debug={false} />
|
||||
|
||||
</>;
|
||||
}
|
||||
|
||||
// enables the React Query API invocation
|
||||
export default apiQuery.withTRPC(MyApp);
|
||||
@@ -1,16 +1,15 @@
|
||||
import * as React from 'react';
|
||||
import { AppType } from 'next/app';
|
||||
import { AppType, MyAppProps } from 'next/app';
|
||||
import { default as Document, DocumentContext, DocumentProps, Head, Html, Main, NextScript } from 'next/document';
|
||||
import createEmotionServer from '@emotion/server/create-instance';
|
||||
import { getInitColorSchemeScript } from '@mui/joy/styles';
|
||||
|
||||
import { Brand } from '@/common/brand';
|
||||
import { MyAppProps } from './_app';
|
||||
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
|
||||
import { Brand } from '~/common/app.config';
|
||||
import { bodyFontClassName, createEmotionCache } from '~/common/app.theme';
|
||||
|
||||
|
||||
interface MyDocumentProps extends DocumentProps {
|
||||
emotionStyleTags: JSX.Element[];
|
||||
emotionStyleTags: React.JSX.Element[];
|
||||
}
|
||||
|
||||
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
@@ -19,7 +18,6 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<Head>
|
||||
{/* Meta (missing Title, set by the App or Page) */}
|
||||
<meta name='description' content={Brand.Meta.Description} />
|
||||
<meta name='keywords' content={Brand.Meta.Keywords} />
|
||||
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
|
||||
|
||||
{/* Favicons & PWA */}
|
||||
@@ -32,7 +30,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
|
||||
|
||||
{/* Opengraph */}
|
||||
<meta property='og:title' content={Brand.Meta.Title} />
|
||||
<meta property='og:title' content={Brand.Title.Common} />
|
||||
<meta property='og:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
|
||||
<meta property='og:url' content={Brand.URIs.Home} />
|
||||
@@ -42,7 +40,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
{/* Twitter */}
|
||||
<meta property='twitter:card' content='summary_large_image' />
|
||||
<meta property='twitter:url' content={Brand.URIs.Home} />
|
||||
<meta property='twitter:title' content={Brand.Meta.Title} />
|
||||
<meta property='twitter:title' content={Brand.Title.Common} />
|
||||
<meta property='twitter:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
|
||||
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
|
||||
|
||||
|
||||
function parseApiParameters(apiKey?: string) {
|
||||
return {
|
||||
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
|
||||
apiHeaders: {
|
||||
'Content-Type': 'application/json',
|
||||
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function rethrowElevenLabsError(response: Response) {
|
||||
if (!response.ok) {
|
||||
let errorPayload: object | null = null;
|
||||
try {
|
||||
errorPayload = await response.json();
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
console.error('Error in ElevenLabs API:', errorPayload);
|
||||
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
|
||||
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
|
||||
|
||||
const response = await fetch(`https://${apiHost}${apiPath}`, {
|
||||
method: 'GET',
|
||||
headers: apiHeaders,
|
||||
});
|
||||
|
||||
await rethrowElevenLabsError(response);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
|
||||
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
|
||||
|
||||
const response = await fetch(`https://${apiHost}${apiPath}`, {
|
||||
method: 'POST',
|
||||
headers: apiHeaders,
|
||||
body: JSON.stringify(body),
|
||||
signal,
|
||||
});
|
||||
|
||||
await rethrowElevenLabsError(response);
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
|
||||
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
|
||||
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
|
||||
text: text,
|
||||
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
|
||||
};
|
||||
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
|
||||
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
|
||||
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
|
||||
} catch (error) {
|
||||
console.error('Error posting to ElevenLabs', error);
|
||||
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,48 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
|
||||
import { getFromElevenLabs } from './speech';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
|
||||
|
||||
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
|
||||
|
||||
// bring category != 'premade to the top
|
||||
voicesList.voices.sort((a, b) => {
|
||||
if (a.category === 'premade' && b.category !== 'premade') return 1;
|
||||
if (a.category !== 'premade' && b.category === 'premade') return -1;
|
||||
return 0;
|
||||
});
|
||||
|
||||
// map to our own response format
|
||||
const response: ElevenLabs.API.Voices.Response = {
|
||||
voices: voicesList.voices.map((voice, idx) => ({
|
||||
id: voice.voice_id,
|
||||
name: voice.name,
|
||||
description: voice.description,
|
||||
previewUrl: voice.preview_url,
|
||||
category: voice.category,
|
||||
default: idx === 0,
|
||||
})),
|
||||
};
|
||||
|
||||
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
|
||||
} catch (error) {
|
||||
console.error('Error fetching voices from ElevenLabs:', error);
|
||||
return new NextResponse(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
error: error?.toString() || error || 'Network issue',
|
||||
}),
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,27 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
try {
|
||||
const requestBodyJson = await req.json();
|
||||
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
|
||||
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
|
||||
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
|
||||
return new NextResponse(JSON.stringify({
|
||||
message: upstreamResponse.choices[0].message,
|
||||
} satisfies OpenAI.API.Chat.Response));
|
||||
} catch (error: any) {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,30 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
try {
|
||||
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
|
||||
// keep working on this
|
||||
const requestBodyJson = await req.json();
|
||||
const { api } = await toApiChatRequest(requestBodyJson);
|
||||
|
||||
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
|
||||
|
||||
// flatten IDs (most recent first)
|
||||
return new NextResponse(JSON.stringify({
|
||||
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
|
||||
} satisfies OpenAI.API.Models.Response));
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,117 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
import { createParser } from 'eventsource-parser';
|
||||
|
||||
import { OpenAI } from '@/modules/openai/openai.types';
|
||||
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
|
||||
|
||||
|
||||
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
|
||||
|
||||
// Handle the abort event when the connection is closed by the client
|
||||
signal.addEventListener('abort', () => {
|
||||
console.log('Client closed the connection.');
|
||||
});
|
||||
|
||||
// begin event streaming from the OpenAI API
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
let upstreamResponse: Response;
|
||||
try {
|
||||
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
|
||||
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
|
||||
} catch (error: any) {
|
||||
console.log(error);
|
||||
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
|
||||
return new ReadableStream({
|
||||
start: controller => {
|
||||
controller.enqueue(encoder.encode(message));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// decoding and re-encoding loop
|
||||
|
||||
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
|
||||
|
||||
let hasBegun = false;
|
||||
|
||||
// stream response (SSE) from OpenAI is split into multiple chunks. this function
|
||||
// will parse the event into a text stream, and re-emit it to the client
|
||||
const upstreamParser = createParser(event => {
|
||||
|
||||
// ignore reconnect interval
|
||||
if (event.type !== 'event')
|
||||
return;
|
||||
|
||||
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
|
||||
if (event.data === '[DONE]') {
|
||||
controller.close();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
|
||||
|
||||
// ignore any 'role' delta update
|
||||
if (json.choices[0].delta?.role)
|
||||
return;
|
||||
|
||||
// stringify and send the first packet as a JSON object
|
||||
if (!hasBegun) {
|
||||
hasBegun = true;
|
||||
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
|
||||
model: json.model,
|
||||
};
|
||||
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
|
||||
}
|
||||
|
||||
// transmit the text stream
|
||||
const text = json.choices[0].delta?.content || '';
|
||||
controller.enqueue(encoder.encode(text));
|
||||
|
||||
} catch (error) {
|
||||
// maybe parse error
|
||||
console.error('Error parsing OpenAI response', error);
|
||||
controller.error(error);
|
||||
}
|
||||
});
|
||||
|
||||
// https://web.dev/streams/#asynchronous-iteration
|
||||
const decoder = new TextDecoder();
|
||||
for await (const upstreamChunk of upstreamResponse.body as any)
|
||||
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
|
||||
|
||||
};
|
||||
|
||||
return new ReadableStream({
|
||||
start: onReadableStreamStart,
|
||||
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<Response> {
|
||||
try {
|
||||
const requestBodyJson = await req.json();
|
||||
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
|
||||
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
|
||||
return new NextResponse(chatResponseStream);
|
||||
} catch (error: any) {
|
||||
if (error.name === 'AbortError') {
|
||||
console.log('Fetch request aborted in handler');
|
||||
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
|
||||
} else if (error.code === 'ECONNRESET') {
|
||||
console.log('Connection reset by the client in handler');
|
||||
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
|
||||
} else {
|
||||
console.error('Fetch request failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,88 +0,0 @@
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Prodia } from '@/modules/prodia/prodia.types';
|
||||
|
||||
|
||||
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
|
||||
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
|
||||
});
|
||||
|
||||
|
||||
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
|
||||
const response = await fetch('https://api.prodia.com/v1/job', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
...prodiaHeaders(apiKey),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(jobRequest),
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
console.log('Bad Prodia Response:', await response.text());
|
||||
throw new Error(`Bad Prodia Response: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
|
||||
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
|
||||
headers: prodiaHeaders(apiKey),
|
||||
});
|
||||
if (response.status !== 200)
|
||||
throw new Error(`Bad Prodia Response: ${response.status}`);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest) {
|
||||
// timeout, in seconds
|
||||
const timeout = 15;
|
||||
const tStart = Date.now();
|
||||
|
||||
try {
|
||||
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
|
||||
|
||||
// crate the job, getting back a job ID
|
||||
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
|
||||
model: prodiaModelId,
|
||||
prompt,
|
||||
...(!!cfgScale && { cfg_scale: cfgScale }),
|
||||
...(!!steps && { steps }),
|
||||
...(!!negativePrompt && { negative_prompt: negativePrompt }),
|
||||
...(!!seed && { seed }),
|
||||
};
|
||||
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
|
||||
|
||||
// poll the job status until it's done
|
||||
let sleepDelay = 2000;
|
||||
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
|
||||
await new Promise(resolve => setTimeout(resolve, sleepDelay));
|
||||
job = await getJobStatus(apiKey, job.job);
|
||||
if (sleepDelay > 250)
|
||||
sleepDelay /= 2;
|
||||
}
|
||||
|
||||
// check for success
|
||||
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
|
||||
if (job.status !== 'succeeded' || !job.imageUrl)
|
||||
throw new Error(`Prodia image generation failed within ${elapsed}s`);
|
||||
|
||||
// respond with the image URL
|
||||
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
|
||||
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
|
||||
return new NextResponse(JSON.stringify(response));
|
||||
|
||||
} catch (error) {
|
||||
console.error('Handler failed:', error);
|
||||
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
|
||||
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
|
||||
return new NextResponse(JSON.stringify(response), { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,48 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Prodia } from '@/modules/prodia/prodia.types';
|
||||
|
||||
|
||||
// for lack of an API
|
||||
const HARDCODED_MODELS: Prodia.API.Models.Response = {
|
||||
models: [
|
||||
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
|
||||
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
|
||||
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
|
||||
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
|
||||
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
|
||||
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
|
||||
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
|
||||
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
|
||||
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
|
||||
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
|
||||
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
|
||||
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
|
||||
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
|
||||
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
|
||||
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
|
||||
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
|
||||
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
|
||||
],
|
||||
};
|
||||
|
||||
// sort by priority
|
||||
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
try {
|
||||
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
|
||||
// noinspection JSUnusedLocalSymbols
|
||||
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
|
||||
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
|
||||
} catch (error: any) {
|
||||
console.error('Handler failed:', error);
|
||||
return new NextResponse(`[Issue] ${error}`, { status: 400 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,50 +0,0 @@
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { PasteGG } from '@/modules/pastegg/pastegg.types';
|
||||
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
|
||||
|
||||
|
||||
/**
|
||||
* 'Proxy' that uploads a file to paste.gg.
|
||||
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
|
||||
*/
|
||||
export default async function handler(req: NextRequest) {
|
||||
|
||||
try {
|
||||
|
||||
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
|
||||
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
|
||||
throw new Error('Invalid options');
|
||||
|
||||
const paste = await pasteGgPost(title, fileName, fileContent, origin);
|
||||
console.log(`Posted to paste.gg`, paste);
|
||||
|
||||
if (paste?.status !== 'success')
|
||||
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
|
||||
|
||||
return new NextResponse(JSON.stringify({
|
||||
type: 'success',
|
||||
url: `https://paste.gg/${paste.result.id}`,
|
||||
expires: paste.result.expires || 'never',
|
||||
deletionKey: paste.result.deletion_key || 'none',
|
||||
created: paste.result.created_at,
|
||||
} satisfies PasteGG.API.Publish.Response));
|
||||
|
||||
} catch (error) {
|
||||
|
||||
console.error('Error posting to paste.gg', error);
|
||||
return new NextResponse(JSON.stringify({
|
||||
type: 'error',
|
||||
error: error?.toString() || 'Network issue',
|
||||
} satisfies PasteGG.API.Publish.Response), { status: 500 });
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -1,47 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server';
|
||||
|
||||
import { Search } from '@/modules/search/search.types';
|
||||
import { objectToQueryString } from '@/modules/search/search.client';
|
||||
|
||||
|
||||
export default async function handler(req: NextRequest): Promise<NextResponse> {
|
||||
const { searchParams } = new URL(req.url);
|
||||
|
||||
const customSearchParams: Search.Wire.RequestParams = {
|
||||
q: searchParams.get('query') || '',
|
||||
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
|
||||
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
|
||||
num: 5,
|
||||
};
|
||||
|
||||
try {
|
||||
if (!customSearchParams.key || !customSearchParams.cx) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error('Missing API Key or Custom Search Engine ID');
|
||||
}
|
||||
|
||||
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
|
||||
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
|
||||
|
||||
if (data.error) {
|
||||
// noinspection ExceptionCaughtLocallyJS
|
||||
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
|
||||
}
|
||||
|
||||
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
|
||||
title: result.title,
|
||||
link: result.link,
|
||||
snippet: result.snippet,
|
||||
})) || [];
|
||||
return new NextResponse(JSON.stringify(apiResponse));
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('Handler failed:', error);
|
||||
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
export const config = {
|
||||
runtime: 'edge',
|
||||
};
|
||||
@@ -0,0 +1,14 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppCall } from '../src/apps/call/AppCall';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function CallPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppCall />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,53 +1,18 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Container, useTheme } from '@mui/joy';
|
||||
import { AppChat } from '../src/apps/chat/AppChat';
|
||||
import { useShowNewsOnUpdate } from '../src/apps/news/news.hooks';
|
||||
|
||||
import { NoSSR } from '@/common/components/NoSSR';
|
||||
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { Chat } from '../src/apps/chat/Chat';
|
||||
import { SettingsModal } from '../src/apps/settings/SettingsModal';
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function Home() {
|
||||
// state
|
||||
const [settingsShown, setSettingsShown] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
const apiKey = useSettingsStore(state => state.apiKey);
|
||||
const centerMode = useSettingsStore(state => state.centerMode);
|
||||
|
||||
|
||||
// show the Settings Dialog at startup if the API key is required but not set
|
||||
React.useEffect(() => {
|
||||
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
|
||||
setSettingsShown(true);
|
||||
}, [apiKey]);
|
||||
|
||||
export default function ChatPage() {
|
||||
// show the News page on updates
|
||||
useShowNewsOnUpdate();
|
||||
|
||||
return (
|
||||
/**
|
||||
* Note the global NoSSR wrapper
|
||||
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
|
||||
*/
|
||||
<NoSSR>
|
||||
|
||||
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
|
||||
boxShadow: {
|
||||
xs: 'none',
|
||||
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
|
||||
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
|
||||
},
|
||||
}}>
|
||||
|
||||
<Chat onShowSettings={() => setSettingsShown(true)} />
|
||||
|
||||
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
|
||||
|
||||
</Container>
|
||||
|
||||
</NoSSR>
|
||||
<AppLayout>
|
||||
<AppChat />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Box, Typography } from '@mui/joy';
|
||||
|
||||
import { useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
import { apiQuery } from '~/common/util/trpc.client';
|
||||
import { navigateToIndex } from '~/common/app.routes';
|
||||
import { openLayoutModelsSetup } from '~/common/layout/store-applayout';
|
||||
|
||||
|
||||
function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
|
||||
|
||||
// external state
|
||||
const { data, isError, error, isLoading } = apiQuery.backend.exchangeOpenRouterKey.useQuery({ code: props.openRouterCode || '' }, {
|
||||
enabled: !!props.openRouterCode,
|
||||
refetchOnWindowFocus: false,
|
||||
staleTime: Infinity,
|
||||
});
|
||||
|
||||
// derived state
|
||||
const isErrorInput = !props.openRouterCode;
|
||||
const openRouterKey = data?.key ?? undefined;
|
||||
const isSuccess = !!openRouterKey;
|
||||
|
||||
|
||||
// Success: save the key and redirect to the chat app
|
||||
React.useEffect(() => {
|
||||
if (!isSuccess)
|
||||
return;
|
||||
|
||||
// 1. Save the key as the client key
|
||||
useModelsStore.getState().setOpenRoutersKey(openRouterKey);
|
||||
|
||||
// 2. Navigate to the chat app
|
||||
navigateToIndex(true).then(() => openLayoutModelsSetup());
|
||||
|
||||
}, [isSuccess, openRouterKey]);
|
||||
|
||||
return (
|
||||
<Box sx={{
|
||||
flexGrow: 1,
|
||||
backgroundColor: 'background.level1',
|
||||
overflowY: 'auto',
|
||||
display: 'flex', justifyContent: 'center',
|
||||
p: { xs: 3, md: 6 },
|
||||
}}>
|
||||
|
||||
<Box sx={{
|
||||
// my: 'auto',
|
||||
display: 'flex', flexDirection: 'column', alignItems: 'center',
|
||||
gap: 4,
|
||||
}}>
|
||||
|
||||
<Typography level='title-lg'>
|
||||
Welcome Back
|
||||
</Typography>
|
||||
|
||||
{isLoading && <Typography level='body-sm'>Loading...</Typography>}
|
||||
|
||||
{isErrorInput && <InlineError error='There was an issue retrieving the code from OpenRouter.' />}
|
||||
|
||||
{isError && <InlineError error={error} />}
|
||||
|
||||
{data && (
|
||||
<Typography level='body-md'>
|
||||
Success! You can now close this window.
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
</Box>
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This page will be invoked by OpenRouter as a Callback
|
||||
*
|
||||
* Docs: https://openrouter.ai/docs#oauth
|
||||
* Example URL: https://localhost:3000/link/callback_openrouter?code=SomeCode
|
||||
*/
|
||||
export default function Page() {
|
||||
|
||||
// get the 'code=...' from the URL
|
||||
const { query } = useRouter();
|
||||
const { code: openRouterCode } = query;
|
||||
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<CallbackOpenRouterPage openRouterCode={openRouterCode as (string | undefined)} />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { AppChatLink } from '../../../src/apps/link/AppChatLink';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function ChatLinkPage() {
|
||||
const { query } = useRouter();
|
||||
const chatLinkId = query?.chatLinkId as string ?? '';
|
||||
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<AppChatLink linkId={chatLinkId} />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,141 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Alert, Box, Button, Typography } from '@mui/joy';
|
||||
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
|
||||
|
||||
import { setComposerStartupText } from '../../src/apps/chat/components/composer/store-composer';
|
||||
|
||||
import { callBrowseFetchPage } from '~/modules/browse/browse.client';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
import { LogoProgress } from '~/common/components/LogoProgress';
|
||||
import { asValidURL } from '~/common/util/urlUtils';
|
||||
import { navigateToIndex } from '~/common/app.routes';
|
||||
|
||||
|
||||
/**
|
||||
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
|
||||
* See the /public/manifest.json for how this is configured. Parameters:
|
||||
* - text: the text to share
|
||||
* - url: the URL to share
|
||||
* - if the URL is a valid URL, it will be downloaded and the content will be shared
|
||||
* - if the URL is not a valid URL, it will be shared as text
|
||||
* - title: the title of the shared content
|
||||
*/
|
||||
function AppShareTarget() {
|
||||
// state
|
||||
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
|
||||
const [intentText, setIntentText] = React.useState<string | null>(null);
|
||||
const [intentURL, setIntentURL] = React.useState<string | null>(null);
|
||||
const [isDownloading, setIsDownloading] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const { query } = useRouter();
|
||||
|
||||
|
||||
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
|
||||
setComposerStartupText(text);
|
||||
void navigateToIndex(true);
|
||||
}, []);
|
||||
|
||||
|
||||
// Detect the share Intent from the query
|
||||
React.useEffect(() => {
|
||||
// skip when query is not parsed yet
|
||||
if (!Object.keys(query).length)
|
||||
return;
|
||||
|
||||
// single item from the query
|
||||
let queryTextItem: string[] | string | null = query.url || query.text || null;
|
||||
if (Array.isArray(queryTextItem))
|
||||
queryTextItem = queryTextItem[0];
|
||||
|
||||
// check if the item is a URL
|
||||
const url = asValidURL(queryTextItem);
|
||||
if (url)
|
||||
setIntentURL(url);
|
||||
else if (queryTextItem)
|
||||
setIntentText(queryTextItem);
|
||||
else
|
||||
setErrorMessage('No text or url. Received: ' + JSON.stringify(query));
|
||||
|
||||
}, [query.url, query.text, query]);
|
||||
|
||||
|
||||
// Text -> Composer
|
||||
React.useEffect(() => {
|
||||
if (intentText)
|
||||
queueComposerTextAndLaunchApp(intentText);
|
||||
}, [intentText, queueComposerTextAndLaunchApp]);
|
||||
|
||||
|
||||
// URL -> download -> Composer
|
||||
React.useEffect(() => {
|
||||
if (intentURL) {
|
||||
setIsDownloading(true);
|
||||
callBrowseFetchPage(intentURL)
|
||||
.then(page => {
|
||||
if (page.stopReason !== 'error')
|
||||
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + page.content + '\n```\n');
|
||||
else
|
||||
setErrorMessage('Could not read any data' + page.error ? ': ' + page.error : '');
|
||||
})
|
||||
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
|
||||
.finally(() => setIsDownloading(false));
|
||||
}
|
||||
}, [intentURL, queueComposerTextAndLaunchApp]);
|
||||
|
||||
|
||||
return (
|
||||
|
||||
<Box sx={{
|
||||
backgroundColor: 'background.level2',
|
||||
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
|
||||
flexGrow: 1,
|
||||
}}>
|
||||
|
||||
{/* Logo with Circular Progress */}
|
||||
<LogoProgress showProgress={isDownloading} />
|
||||
|
||||
{/* Title */}
|
||||
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
|
||||
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
|
||||
</Typography>
|
||||
|
||||
{/* Possible Error */}
|
||||
{errorMessage && <>
|
||||
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
|
||||
<Typography>{errorMessage}</Typography>
|
||||
</Alert>
|
||||
<Button
|
||||
variant='solid' color='danger'
|
||||
onClick={() => navigateToIndex()}
|
||||
endDecorator={<ArrowBackIcon />}
|
||||
sx={{ mt: 2 }}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
</>}
|
||||
|
||||
{/* URL under analysis */}
|
||||
<Typography level='body-xs'>
|
||||
{intentURL}
|
||||
</Typography>
|
||||
</Box>
|
||||
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
|
||||
* Example URL: https://localhost:3000/link/share_target?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
|
||||
*/
|
||||
export default function LaunchPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppShareTarget />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppNews } from '../src/apps/news/AppNews';
|
||||
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function NewsPage() {
|
||||
// update the last seen news version
|
||||
useMarkNewsAsSeen();
|
||||
|
||||
return (
|
||||
<AppLayout suspendAutoModelsSetup>
|
||||
<AppNews />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AppPersonas } from '../src/apps/personas/AppPersonas';
|
||||
|
||||
import { AppLayout } from '~/common/layout/AppLayout';
|
||||
|
||||
|
||||
export default function PersonasPage() {
|
||||
return (
|
||||
<AppLayout>
|
||||
<AppPersonas />
|
||||
</AppLayout>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
// Prisma is the ORM for server-side (API) access to the database
|
||||
//
|
||||
// This file defines the schema for the database.
|
||||
// - make sure to run 'prisma generate' after making changes to this file
|
||||
// - make sure to run 'prisma db push' to sync the remote database with the schema
|
||||
//
|
||||
// Database is optional: when the environment variables are not set, the database is not used at all,
|
||||
// and the storage of data in Big-AGI is limited to client-side (browser) storage.
|
||||
//
|
||||
// The database is used for:
|
||||
// - the 'sharing' function, to let users share the chats with each other
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = env("POSTGRES_PRISMA_URL") // uses connection pooling
|
||||
directUrl = env("POSTGRES_URL_NON_POOLING") // uses a direct connection
|
||||
}
|
||||
|
||||
//
|
||||
// Storage of Linked Data
|
||||
//
|
||||
model LinkStorage {
|
||||
id String @id @default(uuid())
|
||||
|
||||
ownerId String
|
||||
visibility LinkStorageVisibility
|
||||
|
||||
dataType LinkStorageDataType
|
||||
dataTitle String?
|
||||
dataSize Int
|
||||
data Json
|
||||
|
||||
upVotes Int @default(0)
|
||||
downVotes Int @default(0)
|
||||
flagsCount Int @default(0)
|
||||
readCount Int @default(0)
|
||||
writeCount Int @default(1)
|
||||
|
||||
// time-based expiration
|
||||
expiresAt DateTime?
|
||||
|
||||
// manual deletion
|
||||
deletionKey String
|
||||
isDeleted Boolean @default(false)
|
||||
deletedAt DateTime?
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
|
||||
enum LinkStorageVisibility {
|
||||
PUBLIC
|
||||
UNLISTED
|
||||
PRIVATE
|
||||
}
|
||||
|
||||
enum LinkStorageDataType {
|
||||
CHAT_V1
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"name": "big-AGI",
|
||||
"short_name": "AGI",
|
||||
"theme_color": "#434356",
|
||||
"background_color": "#B9B9C6",
|
||||
"short_name": "big-AGI",
|
||||
"theme_color": "#32383E",
|
||||
"background_color": "#9FA6AD",
|
||||
"description": "Personal AGI App",
|
||||
"display": "standalone",
|
||||
"start_url": "/",
|
||||
@@ -23,5 +23,15 @@
|
||||
"sizes": "1024x1024",
|
||||
"type": "image/png"
|
||||
}
|
||||
]
|
||||
],
|
||||
"share_target": {
|
||||
"action": "/link/share_target",
|
||||
"method": "GET",
|
||||
"enctype": "application/x-www-form-urlencoded",
|
||||
"params": {
|
||||
"title": "title",
|
||||
"text": "text",
|
||||
"url": "url"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
import * as React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Container, Sheet } from '@mui/joy';
|
||||
|
||||
import { AppCallQueryParams } from '~/common/app.routes';
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
|
||||
import { CallUI } from './CallUI';
|
||||
import { CallWizard } from './CallWizard';
|
||||
|
||||
|
||||
export function AppCall() {
|
||||
// external state
|
||||
const { query } = useRouter();
|
||||
|
||||
// derived state
|
||||
const { conversationId, personaId } = query as any as AppCallQueryParams;
|
||||
const validInput = !!conversationId && !!personaId;
|
||||
|
||||
return (
|
||||
<Sheet variant='solid' color='neutral' invertedColors sx={{
|
||||
display: 'flex', flexDirection: 'column', justifyContent: 'center',
|
||||
flexGrow: 1,
|
||||
overflowY: 'auto',
|
||||
minHeight: 96,
|
||||
}}>
|
||||
|
||||
<Container maxWidth='sm' sx={{
|
||||
display: 'flex', flexDirection: 'column',
|
||||
alignItems: 'center',
|
||||
minHeight: '80dvh', justifyContent: 'space-evenly',
|
||||
gap: { xs: 2, md: 4 },
|
||||
}}>
|
||||
|
||||
{!validInput && <InlineError error={`Something went wrong. ${JSON.stringify(query)}`} />}
|
||||
|
||||
{validInput && (
|
||||
<CallWizard conversationId={conversationId}>
|
||||
<CallUI conversationId={conversationId} personaId={personaId} />
|
||||
</CallWizard>
|
||||
)}
|
||||
|
||||
</Container>
|
||||
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,392 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
import { useRouter } from 'next/router';
|
||||
|
||||
import { Box, Card, ListItemDecorator, MenuItem, Switch, Typography } from '@mui/joy';
|
||||
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
|
||||
import CallEndIcon from '@mui/icons-material/CallEnd';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import ChatOutlinedIcon from '@mui/icons-material/ChatOutlined';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import MicNoneIcon from '@mui/icons-material/MicNone';
|
||||
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
|
||||
|
||||
import { useChatLLMDropdown } from '../chat/components/applayout/useLLMDropdown';
|
||||
|
||||
import { EXPERIMENTAL_speakTextStream } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { SystemPurposeId, SystemPurposes } from '../../data';
|
||||
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
|
||||
import { streamChat } from '~/modules/llms/transports/streamChat';
|
||||
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
|
||||
|
||||
import { Link } from '~/common/components/Link';
|
||||
import { SpeechResult, useSpeechRecognition } from '~/common/components/useSpeechRecognition';
|
||||
import { conversationTitle, createDMessage, DMessage, useChatStore } from '~/common/state/store-chats';
|
||||
import { playSoundUrl, usePlaySoundUrl } from '~/common/util/audioUtils';
|
||||
import { useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
|
||||
import { CallAvatar } from './components/CallAvatar';
|
||||
import { CallButton } from './components/CallButton';
|
||||
import { CallMessage } from './components/CallMessage';
|
||||
import { CallStatus } from './components/CallStatus';
|
||||
|
||||
|
||||
function CallMenuItems(props: {
|
||||
pushToTalk: boolean,
|
||||
setPushToTalk: (pushToTalk: boolean) => void,
|
||||
override: boolean,
|
||||
setOverride: (overridePersonaVoice: boolean) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { voicesDropdown } = useElevenLabsVoiceDropdown(false, !props.override);
|
||||
|
||||
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
|
||||
|
||||
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
|
||||
|
||||
return <>
|
||||
|
||||
<MenuItem onClick={handlePushToTalkToggle}>
|
||||
<ListItemDecorator>{props.pushToTalk ? <MicNoneIcon /> : <MicIcon />}</ListItemDecorator>
|
||||
Push to talk
|
||||
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleChangeVoiceToggle}>
|
||||
<ListItemDecorator><RecordVoiceOverIcon /></ListItemDecorator>
|
||||
Change Voice
|
||||
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem>
|
||||
<ListItemDecorator>{' '}</ListItemDecorator>
|
||||
{voicesDropdown}
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem component={Link} href='https://github.com/enricoros/big-agi/issues/175' target='_blank'>
|
||||
<ListItemDecorator><ChatOutlinedIcon /></ListItemDecorator>
|
||||
Voice Calls Feedback
|
||||
</MenuItem>
|
||||
|
||||
</>;
|
||||
}
|
||||
|
||||
|
||||
export function CallUI(props: {
|
||||
conversationId: string,
|
||||
personaId: string,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
|
||||
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
|
||||
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
|
||||
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
|
||||
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
|
||||
const [pushToTalk, setPushToTalk] = React.useState(true);
|
||||
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
|
||||
const responseAbortController = React.useRef<AbortController | null>(null);
|
||||
|
||||
// external state
|
||||
const { push: routerPush } = useRouter();
|
||||
const { chatLLMId, chatLLMDropdown } = useChatLLMDropdown();
|
||||
const { chatTitle, messages } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
chatTitle: conversation ? conversationTitle(conversation) : 'no conversation',
|
||||
messages: conversation ? conversation.messages : [],
|
||||
};
|
||||
}, shallow);
|
||||
const persona = SystemPurposes[props.personaId as SystemPurposeId] ?? undefined;
|
||||
const personaCallStarters = persona?.call?.starters ?? undefined;
|
||||
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
|
||||
const personaSystemMessage = persona?.systemMessage ?? undefined;
|
||||
|
||||
// hooks and speech
|
||||
const [speechInterim, setSpeechInterim] = React.useState<SpeechResult | null>(null);
|
||||
const onSpeechResultCallback = React.useCallback((result: SpeechResult) => {
|
||||
setSpeechInterim(result.done ? null : { ...result });
|
||||
if (result.done) {
|
||||
const transcribed = result.transcript.trim();
|
||||
if (transcribed.length >= 1)
|
||||
setCallMessages(messages => [...messages, createDMessage('user', transcribed)]);
|
||||
}
|
||||
}, []);
|
||||
const { isSpeechEnabled, isRecording, isRecordingAudio, isRecordingSpeech, startRecording, stopRecording, toggleRecording } = useSpeechRecognition(onSpeechResultCallback, 1000);
|
||||
|
||||
// derived state
|
||||
const isRinging = stage === 'ring';
|
||||
const isConnected = stage === 'connected';
|
||||
const isDeclined = stage === 'declined';
|
||||
const isEnded = stage === 'ended';
|
||||
|
||||
|
||||
/// Sounds
|
||||
|
||||
// pickup / hangup
|
||||
React.useEffect(() => {
|
||||
!isRinging && playSoundUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
|
||||
}, [isRinging, isConnected]);
|
||||
|
||||
// ringtone
|
||||
usePlaySoundUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
|
||||
|
||||
/// CONNECTED
|
||||
|
||||
const handleCallStop = () => {
|
||||
stopRecording();
|
||||
setStage('ended');
|
||||
};
|
||||
|
||||
// [E] pickup -> seed message and call timer
|
||||
// FIXME: Overriding the voice will reset the call - not a desired behavior
|
||||
React.useEffect(() => {
|
||||
if (!isConnected) return;
|
||||
|
||||
// show the call timer
|
||||
setCallElapsedTime('00:00');
|
||||
const start = Date.now();
|
||||
const interval = setInterval(() => {
|
||||
const elapsedSeconds = Math.floor((Date.now() - start) / 1000);
|
||||
const minutes = Math.floor(elapsedSeconds / 60);
|
||||
const seconds = elapsedSeconds % 60;
|
||||
setCallElapsedTime(`${minutes < 10 ? '0' : ''}${minutes}:${seconds < 10 ? '0' : ''}${seconds}`);
|
||||
}, 1000);
|
||||
|
||||
// seed the first message
|
||||
const phoneMessages = personaCallStarters || ['Hello?', 'Hey!'];
|
||||
const firstMessage = phoneMessages[Math.floor(Math.random() * phoneMessages.length)];
|
||||
|
||||
setCallMessages([createDMessage('assistant', firstMessage)]);
|
||||
// fire/forget
|
||||
void EXPERIMENTAL_speakTextStream(firstMessage, personaVoiceId);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isConnected, personaCallStarters, personaVoiceId]);
|
||||
|
||||
// [E] persona streaming response - upon new user message
|
||||
React.useEffect(() => {
|
||||
// only act when we have a new user message
|
||||
if (!isConnected || callMessages.length < 1 || callMessages[callMessages.length - 1].role !== 'user')
|
||||
return;
|
||||
switch (callMessages[callMessages.length - 1].text) {
|
||||
// do not respond
|
||||
case 'Stop.':
|
||||
return;
|
||||
// command: close the call
|
||||
case 'Goodbye.':
|
||||
setStage('ended');
|
||||
setTimeout(() => {
|
||||
void routerPush('/');
|
||||
}, 2000);
|
||||
return;
|
||||
// command: regenerate answer
|
||||
case 'Retry.':
|
||||
case 'Try again.':
|
||||
setCallMessages(messages => messages.slice(0, messages.length - 2));
|
||||
return;
|
||||
// command: restart chat
|
||||
case 'Restart.':
|
||||
setCallMessages([]);
|
||||
return;
|
||||
}
|
||||
|
||||
// bail if no llm selected
|
||||
if (!chatLLMId) return;
|
||||
|
||||
// temp fix: when the chat has no messages, only assume a single system message
|
||||
const chatMessages: { role: VChatMessageIn['role'], text: string }[] = messages.length > 0
|
||||
? messages
|
||||
: personaSystemMessage
|
||||
? [{ role: 'system', text: personaSystemMessage }]
|
||||
: [];
|
||||
|
||||
// 'prompt' for a "telephone call"
|
||||
// FIXME: can easily run ouf of tokens - if this gets traction, we'll fix it
|
||||
const callPrompt: VChatMessageIn[] = [
|
||||
{ role: 'system', content: 'You are having a phone call. Your response style is brief and to the point, and according to your personality, defined below.' },
|
||||
...chatMessages.map(message => ({ role: message.role, content: message.text })),
|
||||
{ role: 'system', content: 'You are now on the phone call related to the chat above. Respect your personality and answer with short, friendly and accurate thoughtful lines.' },
|
||||
...callMessages.map(message => ({ role: message.role, content: message.text })),
|
||||
];
|
||||
|
||||
// perform completion
|
||||
responseAbortController.current = new AbortController();
|
||||
let finalText = '';
|
||||
let error: any | null = null;
|
||||
streamChat(chatLLMId, callPrompt, responseAbortController.current.signal, (updatedMessage: Partial<DMessage>) => {
|
||||
const text = updatedMessage.text?.trim();
|
||||
if (text) {
|
||||
finalText = text;
|
||||
setPersonaTextInterim(text);
|
||||
}
|
||||
}).catch((err: DOMException) => {
|
||||
if (err?.name !== 'AbortError')
|
||||
error = err;
|
||||
}).finally(() => {
|
||||
setPersonaTextInterim(null);
|
||||
setCallMessages(messages => [...messages, createDMessage('assistant', finalText + (error ? ` (ERROR: ${error.message || error.toString()})` : ''))]);
|
||||
// fire/forget
|
||||
void EXPERIMENTAL_speakTextStream(finalText, personaVoiceId);
|
||||
});
|
||||
|
||||
return () => {
|
||||
responseAbortController.current?.abort();
|
||||
responseAbortController.current = null;
|
||||
};
|
||||
}, [isConnected, callMessages, chatLLMId, messages, personaVoiceId, personaSystemMessage, routerPush]);
|
||||
|
||||
// [E] Message interrupter
|
||||
const abortTrigger = isConnected && isRecordingSpeech;
|
||||
React.useEffect(() => {
|
||||
if (abortTrigger && responseAbortController.current) {
|
||||
responseAbortController.current.abort();
|
||||
responseAbortController.current = null;
|
||||
}
|
||||
// TODO.. abort current speech
|
||||
}, [abortTrigger]);
|
||||
|
||||
|
||||
// [E] continuous speech recognition (reload)
|
||||
const shouldStartRecording = isConnected && !pushToTalk && speechInterim === null && !isRecordingAudio;
|
||||
React.useEffect(() => {
|
||||
if (shouldStartRecording)
|
||||
startRecording();
|
||||
}, [shouldStartRecording, startRecording]);
|
||||
|
||||
|
||||
// more derived state
|
||||
const personaName = persona?.title ?? 'Unknown';
|
||||
const isMicEnabled = isSpeechEnabled;
|
||||
const isTTSEnabled = true;
|
||||
const isEnabled = isMicEnabled && isTTSEnabled;
|
||||
|
||||
|
||||
// pluggable UI
|
||||
|
||||
const menuItems = React.useMemo(() =>
|
||||
<CallMenuItems
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
override={overridePersonaVoice} setOverride={setOverridePersonaVoice} />
|
||||
, [overridePersonaVoice, pushToTalk],
|
||||
);
|
||||
|
||||
useLayoutPluggable(chatLLMDropdown, null, menuItems);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Typography
|
||||
level='h1'
|
||||
sx={{
|
||||
fontSize: { xs: '2.5rem', md: '3rem' },
|
||||
textAlign: 'center',
|
||||
mx: 2,
|
||||
}}
|
||||
>
|
||||
{isConnected ? personaName : 'Hello'}
|
||||
</Typography>
|
||||
|
||||
<CallAvatar
|
||||
symbol={persona?.symbol || '?'}
|
||||
imageUrl={persona?.imageUri}
|
||||
isRinging={isRinging}
|
||||
onClick={() => setAvatarClickCount(avatarClickCount + 1)}
|
||||
/>
|
||||
|
||||
<CallStatus
|
||||
callerName={isConnected ? undefined : personaName}
|
||||
statusText={isRinging ? 'is calling you' : isDeclined ? 'call declined' : isEnded ? 'call ended' : callElapsedTime}
|
||||
regardingText={chatTitle}
|
||||
micError={!isMicEnabled} speakError={!isTTSEnabled}
|
||||
/>
|
||||
|
||||
{/* Live Transcript, w/ streaming messages, audio indication, etc. */}
|
||||
{(isConnected || isEnded) && (
|
||||
<Card variant='soft' sx={{
|
||||
flexGrow: 1,
|
||||
minHeight: '15dvh', maxHeight: '24dvh',
|
||||
overflow: 'auto',
|
||||
width: '100%',
|
||||
borderRadius: 'lg',
|
||||
flexDirection: 'column-reverse',
|
||||
}}>
|
||||
|
||||
{/* Messages in reverse order, for auto-scroll from the bottom */}
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column-reverse', gap: 1 }}>
|
||||
|
||||
{/* Listening... */}
|
||||
{isRecording && (
|
||||
<CallMessage
|
||||
text={<>{speechInterim?.transcript ? speechInterim.transcript + ' ' : ''}<i>{speechInterim?.interimTranscript}</i></>}
|
||||
variant={isRecordingSpeech ? 'solid' : 'outlined'}
|
||||
role='user'
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Persona streaming text... */}
|
||||
{!!personaTextInterim && (
|
||||
<CallMessage
|
||||
text={personaTextInterim}
|
||||
variant='solid' color='neutral'
|
||||
role='assistant'
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Messages (last 6 messages, in reverse order) */}
|
||||
{callMessages.slice(-6).reverse().map((message) =>
|
||||
<CallMessage
|
||||
key={message.id}
|
||||
text={message.text}
|
||||
variant={message.role === 'assistant' ? 'solid' : 'soft'} color='neutral'
|
||||
role={message.role} />,
|
||||
)}
|
||||
</Box>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
{/* Call Buttons */}
|
||||
<Box sx={{ width: '100%', display: 'flex', justifyContent: 'space-evenly' }}>
|
||||
|
||||
{/* [ringing] Decline / Accept */}
|
||||
{isRinging && <CallButton Icon={CallEndIcon} text='Decline' color='danger' onClick={() => setStage('declined')} />}
|
||||
{isRinging && isEnabled && <CallButton Icon={CallIcon} text='Accept' color='success' variant='soft' onClick={() => setStage('connected')} />}
|
||||
|
||||
{/* [Calling] Hang / PTT (mute not enabled yet) */}
|
||||
{isConnected && <CallButton Icon={CallEndIcon} text='Hang up' color='danger' onClick={handleCallStop} />}
|
||||
{isConnected && (pushToTalk
|
||||
? <CallButton Icon={MicIcon} onClick={toggleRecording}
|
||||
text={isRecordingSpeech ? 'Listening...' : isRecording ? 'Listening' : 'Push To Talk'}
|
||||
variant={isRecordingSpeech ? 'solid' : isRecording ? 'soft' : 'outlined'} />
|
||||
: null
|
||||
// <CallButton disabled={true} Icon={MicOffIcon} onClick={() => setMicMuted(muted => !muted)}
|
||||
// text={micMuted ? 'Muted' : 'Mute'}
|
||||
// color={micMuted ? 'warning' : undefined} variant={micMuted ? 'solid' : 'outlined'} />
|
||||
)}
|
||||
|
||||
{/* [ended] Back / Call Again */}
|
||||
{(isEnded || isDeclined) && <Link noLinkStyle href='/'><CallButton Icon={ArrowBackIcon} text='Back' variant='soft' /></Link>}
|
||||
{(isEnded || isDeclined) && <CallButton Icon={CallIcon} text='Call Again' color='success' variant='soft' onClick={() => setStage('connected')} />}
|
||||
|
||||
</Box>
|
||||
|
||||
{/* DEBUG state */}
|
||||
{avatarClickCount > 10 && (avatarClickCount % 2 === 0) && (
|
||||
<Card variant='outlined' sx={{ maxHeight: '25dvh', overflow: 'auto', whiteSpace: 'pre', py: 0, width: '100%' }}>
|
||||
Special commands: Stop, Retry, Try Again, Restart, Goodbye.
|
||||
{JSON.stringify({ isSpeechEnabled, isRecordingAudio, speechInterim }, null, 2)}
|
||||
</Card>
|
||||
)}
|
||||
|
||||
{/*{isEnded && <Card variant='solid' size='lg' color='primary'>*/}
|
||||
{/* <CardContent>*/}
|
||||
{/* <Typography>*/}
|
||||
{/* Please rate the call quality, 1 to 5 - Just a Joke*/}
|
||||
{/* </Typography>*/}
|
||||
{/* </CardContent>*/}
|
||||
{/*</Card>}*/}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,211 @@
|
||||
import * as React from 'react';
|
||||
import { keyframes } from '@emotion/react';
|
||||
|
||||
import { Box, Button, Card, CardContent, IconButton, ListItemDecorator, Typography } from '@mui/joy';
|
||||
import ArrowForwardIcon from '@mui/icons-material/ArrowForward';
|
||||
import ChatIcon from '@mui/icons-material/Chat';
|
||||
import CheckIcon from '@mui/icons-material/Check';
|
||||
import CloseIcon from '@mui/icons-material/Close';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
|
||||
import WarningIcon from '@mui/icons-material/Warning';
|
||||
|
||||
import { navigateBack } from '~/common/app.routes';
|
||||
import { openLayoutPreferences } from '~/common/layout/store-applayout';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useChatStore } from '~/common/state/store-chats';
|
||||
import { useUICounter } from '~/common/state/store-ui';
|
||||
|
||||
|
||||
const cssRainbowBackgroundKeyframes = keyframes`
|
||||
100%, 0% {
|
||||
background-color: rgb(128, 0, 0);
|
||||
}
|
||||
8% {
|
||||
background-color: rgb(102, 51, 0);
|
||||
}
|
||||
16% {
|
||||
background-color: rgb(64, 64, 0);
|
||||
}
|
||||
25% {
|
||||
background-color: rgb(38, 76, 0);
|
||||
}
|
||||
33% {
|
||||
background-color: rgb(0, 89, 0);
|
||||
}
|
||||
41% {
|
||||
background-color: rgb(0, 76, 41);
|
||||
}
|
||||
50% {
|
||||
background-color: rgb(0, 64, 64);
|
||||
}
|
||||
58% {
|
||||
background-color: rgb(0, 51, 102);
|
||||
}
|
||||
66% {
|
||||
background-color: rgb(0, 0, 128);
|
||||
}
|
||||
75% {
|
||||
background-color: rgb(63, 0, 128);
|
||||
}
|
||||
83% {
|
||||
background-color: rgb(76, 0, 76);
|
||||
}
|
||||
91% {
|
||||
background-color: rgb(102, 0, 51);
|
||||
}`;
|
||||
|
||||
function StatusCard(props: { icon: React.JSX.Element, hasIssue: boolean, text: string, button?: React.JSX.Element }) {
|
||||
return (
|
||||
<Card sx={{ width: '100%' }}>
|
||||
<CardContent sx={{ flexDirection: 'row' }}>
|
||||
<ListItemDecorator>
|
||||
{props.icon}
|
||||
</ListItemDecorator>
|
||||
<Typography level='title-md' color={props.hasIssue ? 'warning' : undefined} sx={{ flexGrow: 1 }}>
|
||||
{props.text}
|
||||
{props.button}
|
||||
</Typography>
|
||||
<ListItemDecorator>
|
||||
{props.hasIssue ? <WarningIcon color='warning' /> : <CheckIcon color='success' />}
|
||||
</ListItemDecorator>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
export function CallWizard(props: { strict?: boolean, conversationId: string, children: React.ReactNode }) {
|
||||
// state
|
||||
const [chatEmptyOverride, setChatEmptyOverride] = React.useState(false);
|
||||
const [recognitionOverride, setRecognitionOverride] = React.useState(false);
|
||||
|
||||
// external state
|
||||
const recognition = useCapabilityBrowserSpeechRecognition();
|
||||
const synthesis = useCapabilityElevenLabs();
|
||||
const chatIsEmpty = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return !(conversation?.messages?.length);
|
||||
});
|
||||
const { novel, touch } = useUICounter('call-wizard');
|
||||
|
||||
// derived state
|
||||
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenRecognition = recognitionOverride || recognition.mayWork;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
|
||||
const fatalGood = overriddenRecognition && synthesis.mayWork;
|
||||
|
||||
if (!novel && fatalGood)
|
||||
return props.children;
|
||||
|
||||
const handleOverrideChatEmpty = () => setChatEmptyOverride(true);
|
||||
|
||||
const handleOverrideRecognition = () => setRecognitionOverride(true);
|
||||
|
||||
const handleConfigureElevenLabs = () => {
|
||||
openLayoutPreferences(3);
|
||||
};
|
||||
|
||||
const handleFinishButton = () => {
|
||||
if (!allGood)
|
||||
return navigateBack();
|
||||
touch();
|
||||
};
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
<Typography level='title-lg' sx={{ fontSize: '3rem', fontWeight: 200, lineHeight: '1.5em', textAlign: 'center' }}>
|
||||
Welcome to<br />
|
||||
<Typography
|
||||
component='span'
|
||||
sx={{
|
||||
backgroundColor: 'primary.solidActiveBg', mx: -0.5, px: 0.5,
|
||||
animation: `${cssRainbowBackgroundKeyframes} 15s linear infinite`,
|
||||
}}>
|
||||
your first call
|
||||
</Typography>
|
||||
</Typography>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
<Typography level='body-lg'>
|
||||
{/*Before you receive your first call, */}
|
||||
Let's get you all set up.
|
||||
</Typography>
|
||||
|
||||
{/* Chat Empty status */}
|
||||
<StatusCard
|
||||
icon={<ChatIcon />}
|
||||
hasIssue={!overriddenEmptyChat}
|
||||
text={overriddenEmptyChat ? 'Great! Your chat has messages.' : 'The chat is empty. Calls are effective when the caller has context.'}
|
||||
button={overriddenEmptyChat ? undefined : (
|
||||
<Button variant='outlined' onClick={handleOverrideChatEmpty} sx={{ mx: 1 }}>
|
||||
Ignore
|
||||
</Button>
|
||||
)}
|
||||
/>
|
||||
|
||||
{/* Add the speech to text feature status */}
|
||||
<StatusCard
|
||||
icon={<MicIcon />}
|
||||
text={
|
||||
((overriddenRecognition && !recognition.warnings.length) ? 'Speech recognition should be good to go.' : 'There might be a speech recognition issue.')
|
||||
+ (recognition.isApiAvailable ? '' : ' Your browser does not support the speech recognition API.')
|
||||
+ (recognition.isDeviceNotSupported ? ' Your device does not provide this feature.' : '')
|
||||
+ (recognition.warnings.length ? ' ⚠️ ' + recognition.warnings.join(' · ') : '')
|
||||
}
|
||||
button={overriddenRecognition ? undefined : (
|
||||
<Button variant='outlined' onClick={handleOverrideRecognition} sx={{ mx: 1 }}>
|
||||
Ignore
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!overriddenRecognition}
|
||||
/>
|
||||
|
||||
{/* Text to Speech status */}
|
||||
<StatusCard
|
||||
icon={<RecordVoiceOverIcon />}
|
||||
text={
|
||||
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
|
||||
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
}
|
||||
button={synthesis.mayWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
|
||||
Configure
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!synthesis.mayWork}
|
||||
/>
|
||||
|
||||
{/*<Typography>*/}
|
||||
{/* 1. To start a call, click the "Accept" button when you receive an incoming call.*/}
|
||||
{/* 2. If your mic is enabled, you'll see a "Push to Talk" button. Press and hold it to speak, then release it to stop speaking.*/}
|
||||
{/* 3. If your mic is disabled, you can still type your messages in the chat and the assistant will respond.*/}
|
||||
{/* 4. During the call, you can control the voice synthesis settings from the menu in the top right corner.*/}
|
||||
{/* 5. To end the call, click the "Hang up" button.*/}
|
||||
{/*</Typography>*/}
|
||||
|
||||
<Box sx={{ flexGrow: 2 }} />
|
||||
|
||||
{/* bottom: text & button */}
|
||||
<Box sx={{ display: 'flex', justifyContent: 'space-around', alignItems: 'center', width: '100%', gap: 2, px: 0.5 }}>
|
||||
|
||||
<Typography level='body-lg'>
|
||||
{allGood ? 'Ready, Set, Call' : 'Please resolve the issues above before proceeding with the call'}
|
||||
</Typography>
|
||||
|
||||
<IconButton
|
||||
size='lg' variant={allGood ? 'soft' : 'solid'} color={allGood ? 'success' : 'danger'}
|
||||
onClick={handleFinishButton} sx={{ borderRadius: '50px' }}
|
||||
>
|
||||
{allGood ? <ArrowForwardIcon sx={{ fontSize: '1.5em' }} /> : <CloseIcon sx={{ fontSize: '1.5em' }} />}
|
||||
</IconButton>
|
||||
</Box>
|
||||
|
||||
<Box sx={{ flexGrow: 0.5 }} />
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
import * as React from 'react';
|
||||
import { keyframes } from '@emotion/react';
|
||||
|
||||
import { Avatar, Box } from '@mui/joy';
|
||||
|
||||
|
||||
const cssScaleKeyframes = keyframes`
|
||||
0% {
|
||||
transform: scale(1);
|
||||
}
|
||||
50% {
|
||||
transform: scale(1.2);
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
}`;
|
||||
|
||||
|
||||
export function CallAvatar(props: { symbol: string, imageUrl?: string, isRinging: boolean, onClick: () => void }) {
|
||||
return (
|
||||
<Avatar
|
||||
variant='soft' color='neutral'
|
||||
onClick={props.onClick}
|
||||
src={props.imageUrl}
|
||||
sx={{
|
||||
'--Avatar-size': { xs: '160px', md: '200px' },
|
||||
'--variant-borderWidth': '4px',
|
||||
boxShadow: !props.imageUrl ? 'md' : null,
|
||||
fontSize: { xs: '100px', md: '120px' },
|
||||
}}
|
||||
>
|
||||
|
||||
{/* As fallback, show the large Persona Symbol */}
|
||||
{!props.imageUrl && (
|
||||
<Box
|
||||
sx={{
|
||||
...(props.isRinging
|
||||
? { animation: `${cssScaleKeyframes} 1.4s ease-in-out infinite` }
|
||||
: {}),
|
||||
}}
|
||||
>
|
||||
{props.symbol}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
</Avatar>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, ColorPaletteProp, IconButton, Typography, VariantProp } from '@mui/joy';
|
||||
|
||||
|
||||
/**
|
||||
* Large button to operate the call, e.g.
|
||||
* --------
|
||||
* | 🎤 |
|
||||
* | Mute |
|
||||
* --------
|
||||
*/
|
||||
export function CallButton(props: {
|
||||
Icon: React.FC, text: string,
|
||||
variant?: VariantProp, color?: ColorPaletteProp, disabled?: boolean,
|
||||
onClick?: () => void,
|
||||
}) {
|
||||
return (
|
||||
<Box
|
||||
onClick={() => !props.disabled && props.onClick?.()}
|
||||
sx={{
|
||||
display: 'flex', flexDirection: 'column', alignItems: 'center',
|
||||
gap: { xs: 1, md: 2 },
|
||||
}}
|
||||
>
|
||||
|
||||
<IconButton
|
||||
disabled={props.disabled} variant={props.variant || 'solid'} color={props.color}
|
||||
sx={{
|
||||
'--IconButton-size': { xs: '4.2rem', md: '5rem' },
|
||||
borderRadius: '50%',
|
||||
// boxShadow: 'lg',
|
||||
}}>
|
||||
<props.Icon />
|
||||
</IconButton>
|
||||
|
||||
<Typography level='title-md' variant={props.disabled ? 'soft' : undefined}>
|
||||
{props.text}
|
||||
</Typography>
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Chip, ColorPaletteProp, VariantProp } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { VChatMessageIn } from '~/modules/llms/transports/chatGenerate';
|
||||
|
||||
|
||||
export function CallMessage(props: {
|
||||
text?: string | React.JSX.Element,
|
||||
variant?: VariantProp, color?: ColorPaletteProp,
|
||||
role: VChatMessageIn['role'],
|
||||
sx?: SxProps,
|
||||
}) {
|
||||
return (
|
||||
<Chip
|
||||
color={props.color} variant={props.variant}
|
||||
sx={{
|
||||
alignSelf: props.role === 'user' ? 'end' : 'start',
|
||||
whiteSpace: 'break-spaces',
|
||||
borderRadius: 'lg',
|
||||
mt: 'auto',
|
||||
// boxShadow: 'md',
|
||||
py: 1,
|
||||
...(props.sx || {}),
|
||||
}}
|
||||
>
|
||||
|
||||
{props.text}
|
||||
|
||||
</Chip>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box, Typography } from '@mui/joy';
|
||||
|
||||
import { InlineError } from '~/common/components/InlineError';
|
||||
|
||||
|
||||
/**
|
||||
* A status message for the call, such as:
|
||||
*
|
||||
* $Name
|
||||
* "Connecting..." or "Call ended",
|
||||
* re: $Regarding
|
||||
*/
|
||||
export function CallStatus(props: {
|
||||
callerName?: string,
|
||||
statusText: string,
|
||||
regardingText?: string,
|
||||
micError: boolean, speakError: boolean,
|
||||
// llmComponent?: React.JSX.Element,
|
||||
}) {
|
||||
return (
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column' }}>
|
||||
|
||||
{!!props.callerName && <Typography level='h3' sx={{ textAlign: 'center' }}>
|
||||
<b>{props.callerName}</b>
|
||||
</Typography>}
|
||||
|
||||
{/*{props.llmComponent}*/}
|
||||
|
||||
<Typography level='body-md' sx={{ textAlign: 'center' }}>
|
||||
{props.statusText}
|
||||
</Typography>
|
||||
|
||||
{!!props.regardingText && <Typography level='body-md' sx={{ textAlign: 'center', mt: 0 }}>
|
||||
re: {props.regardingText}
|
||||
</Typography>}
|
||||
|
||||
{props.micError && <InlineError
|
||||
severity='danger' error='But this browser does not support speech recognition... 🤦♀️ - Try Chrome on Windows?' />}
|
||||
|
||||
{props.speakError && <InlineError
|
||||
severity='danger' error='And text-to-speech is not configured... 🤦♀️ - Configure it in Settings?' />}
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,477 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Box } from '@mui/joy';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
|
||||
import { CmdRunBrowse } from '~/modules/browse/browse.client';
|
||||
import { CmdRunProdia } from '~/modules/prodia/prodia.client';
|
||||
import { CmdRunReact } from '~/modules/aifn/react/react';
|
||||
import { DiagramConfig, DiagramsModal } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { FlattenerModal } from '~/modules/aifn/flatten/FlattenerModal';
|
||||
import { TradeConfig, TradeModal } from '~/modules/trade/TradeModal';
|
||||
import { imaginePromptFromText } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { speakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { useBrowseStore } from '~/modules/browse/store-module-browsing';
|
||||
import { useChatLLM, useModelsStore } from '~/modules/llms/store-llms';
|
||||
|
||||
import { ConfirmationModal } from '~/common/components/ConfirmationModal';
|
||||
import { GlobalShortcutItem, ShortcutKeyName, useGlobalShortcuts } from '~/common/components/useGlobalShortcut';
|
||||
import { addSnackbar, removeSnackbar } from '~/common/components/useSnackbarsStore';
|
||||
import { createDMessage, DConversationId, DMessage, getConversation, useConversation } from '~/common/state/store-chats';
|
||||
import { openLayoutLLMOptions, useLayoutPluggable } from '~/common/layout/store-applayout';
|
||||
import { useUXLabsStore } from '~/common/state/store-ux-labs';
|
||||
|
||||
import type { ComposerOutputMultiPart } from './components/composer/composer.types';
|
||||
import { ChatDrawerItemsMemo } from './components/applayout/ChatDrawerItems';
|
||||
import { ChatDropdowns } from './components/applayout/ChatDropdowns';
|
||||
import { ChatMenuItems } from './components/applayout/ChatMenuItems';
|
||||
import { ChatMessageList } from './components/ChatMessageList';
|
||||
import { CmdAddRoleMessage, CmdHelp, createCommandsHelpMessage, extractCommands } from './editors/commands';
|
||||
import { Composer } from './components/composer/Composer';
|
||||
import { Ephemerals } from './components/Ephemerals';
|
||||
import { usePanesManager } from './components/usePanesManager';
|
||||
|
||||
import { runAssistantUpdatingState } from './editors/chat-stream';
|
||||
import { runBrowseUpdatingState } from './editors/browse-load';
|
||||
import { runImageGenerationUpdatingState } from './editors/image-generate';
|
||||
import { runReActUpdatingState } from './editors/react-tangent';
|
||||
|
||||
|
||||
/**
|
||||
* Mode: how to treat the input from the Composer
|
||||
*/
|
||||
export type ChatModeId = 'immediate' | 'write-user' | 'react' | 'draw-imagine' | 'draw-imagine-plus';
|
||||
|
||||
|
||||
const SPECIAL_ID_WIPE_ALL: DConversationId = 'wipe-chats';
|
||||
|
||||
export function AppChat() {
|
||||
|
||||
// state
|
||||
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
|
||||
const [diagramConfig, setDiagramConfig] = React.useState<DiagramConfig | null>(null);
|
||||
const [tradeConfig, setTradeConfig] = React.useState<TradeConfig | null>(null);
|
||||
const [clearConversationId, setClearConversationId] = React.useState<DConversationId | null>(null);
|
||||
const [deleteConversationId, setDeleteConversationId] = React.useState<DConversationId | null>(null);
|
||||
const [flattenConversationId, setFlattenConversationId] = React.useState<DConversationId | null>(null);
|
||||
const showNextTitle = React.useRef(false);
|
||||
const composerTextAreaRef = React.useRef<HTMLTextAreaElement>(null);
|
||||
|
||||
// external state
|
||||
const { chatLLM } = useChatLLM();
|
||||
|
||||
const {
|
||||
chatPanes,
|
||||
focusedConversationId,
|
||||
navigateHistoryInFocusedPane,
|
||||
openConversationInFocusedPane,
|
||||
openConversationInSplitPane,
|
||||
setFocusedPaneIndex,
|
||||
} = usePanesManager();
|
||||
|
||||
const {
|
||||
title: focusedChatTitle,
|
||||
chatIdx: focusedChatNumber,
|
||||
isChatEmpty: isFocusedChatEmpty,
|
||||
areChatsEmpty,
|
||||
newConversationId,
|
||||
_remove_systemPurposeId: focusedSystemPurposeId,
|
||||
prependNewConversation,
|
||||
branchConversation,
|
||||
deleteConversation,
|
||||
wipeAllConversations,
|
||||
setMessages,
|
||||
} = useConversation(focusedConversationId);
|
||||
|
||||
|
||||
// Window actions
|
||||
|
||||
const chatPaneIDs = chatPanes.length > 0 ? chatPanes.map(pane => pane.conversationId) : [null];
|
||||
|
||||
const setActivePaneIndex = React.useCallback((idx: number) => {
|
||||
setFocusedPaneIndex(idx);
|
||||
}, [setFocusedPaneIndex]);
|
||||
|
||||
const setFocusedConversationId = React.useCallback((conversationId: DConversationId | null) => {
|
||||
conversationId && openConversationInFocusedPane(conversationId);
|
||||
}, [openConversationInFocusedPane]);
|
||||
|
||||
const openSplitConversationId = React.useCallback((conversationId: DConversationId | null) => {
|
||||
conversationId && openConversationInSplitPane(conversationId);
|
||||
}, [openConversationInSplitPane]);
|
||||
|
||||
const handleNavigateHistory = React.useCallback((direction: 'back' | 'forward') => {
|
||||
if (navigateHistoryInFocusedPane(direction))
|
||||
showNextTitle.current = true;
|
||||
}, [navigateHistoryInFocusedPane]);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (showNextTitle.current) {
|
||||
showNextTitle.current = false;
|
||||
const title = (focusedChatNumber >= 0 ? `#${focusedChatNumber + 1} · ` : '') + (focusedChatTitle || 'New Chat');
|
||||
const id = addSnackbar({ key: 'focused-title', message: title, type: 'title' });
|
||||
return () => removeSnackbar(id);
|
||||
}
|
||||
}, [focusedChatNumber, focusedChatTitle]);
|
||||
|
||||
|
||||
// Execution
|
||||
|
||||
const _handleExecute = React.useCallback(async (chatModeId: ChatModeId, conversationId: DConversationId, history: DMessage[]) => {
|
||||
const { chatLLMId } = useModelsStore.getState();
|
||||
if (!chatModeId || !conversationId || !chatLLMId) return;
|
||||
|
||||
// "/command ...": overrides the chat mode
|
||||
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
|
||||
if (lastMessage?.role === 'user') {
|
||||
const pieces = extractCommands(lastMessage.text);
|
||||
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
|
||||
const [command, prompt] = [pieces[0].value, pieces[1].value];
|
||||
if (CmdRunProdia.includes(command)) {
|
||||
setMessages(conversationId, history);
|
||||
return await runImageGenerationUpdatingState(conversationId, prompt);
|
||||
}
|
||||
if (CmdRunReact.includes(command) && chatLLMId) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, prompt, chatLLMId);
|
||||
}
|
||||
if (CmdRunBrowse.includes(command) && prompt?.trim() && useBrowseStore.getState().enableCommandBrowse) {
|
||||
setMessages(conversationId, history);
|
||||
return await runBrowseUpdatingState(conversationId, prompt);
|
||||
}
|
||||
if (CmdAddRoleMessage.includes(command)) {
|
||||
lastMessage.role = command.startsWith('/s') ? 'system' : command.startsWith('/a') ? 'assistant' : 'user';
|
||||
lastMessage.sender = 'Bot';
|
||||
lastMessage.text = prompt;
|
||||
return setMessages(conversationId, history);
|
||||
}
|
||||
if (CmdHelp.includes(command)) {
|
||||
return setMessages(conversationId, [...history, createCommandsHelpMessage()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// synchronous long-duration tasks, which update the state as they go
|
||||
if (chatLLMId && focusedSystemPurposeId) {
|
||||
switch (chatModeId) {
|
||||
case 'immediate':
|
||||
return await runAssistantUpdatingState(conversationId, history, chatLLMId, focusedSystemPurposeId);
|
||||
case 'write-user':
|
||||
return setMessages(conversationId, history);
|
||||
case 'react':
|
||||
if (!lastMessage?.text)
|
||||
break;
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, lastMessage.text, chatLLMId);
|
||||
case 'draw-imagine':
|
||||
case 'draw-imagine-plus':
|
||||
if (!lastMessage?.text)
|
||||
break;
|
||||
const imagePrompt = chatModeId == 'draw-imagine-plus'
|
||||
? await imaginePromptFromText(lastMessage.text) || 'An error sign.'
|
||||
: lastMessage.text;
|
||||
setMessages(conversationId, history.map(message => message.id !== lastMessage.id ? message : {
|
||||
...message,
|
||||
text: `${CmdRunProdia[0]} ${imagePrompt}`,
|
||||
}));
|
||||
return await runImageGenerationUpdatingState(conversationId, imagePrompt);
|
||||
}
|
||||
}
|
||||
|
||||
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
|
||||
console.log('handleExecuteConversation: issue running', chatModeId, conversationId, lastMessage);
|
||||
setMessages(conversationId, history);
|
||||
}, [focusedSystemPurposeId, setMessages]);
|
||||
|
||||
const handleComposerAction = (chatModeId: ChatModeId, conversationId: DConversationId, multiPartMessage: ComposerOutputMultiPart): boolean => {
|
||||
|
||||
// validate inputs
|
||||
if (multiPartMessage.length !== 1 || multiPartMessage[0].type !== 'text-block') {
|
||||
addSnackbar({
|
||||
key: 'chat-composer-action-invalid',
|
||||
message: 'Only a single text part is supported for now.',
|
||||
type: 'issue',
|
||||
overrides: {
|
||||
autoHideDuration: 2000,
|
||||
},
|
||||
});
|
||||
return false;
|
||||
}
|
||||
const userText = multiPartMessage[0].text;
|
||||
|
||||
// find conversation
|
||||
const conversation = getConversation(conversationId);
|
||||
if (!conversation)
|
||||
return false;
|
||||
|
||||
// start execution (async)
|
||||
void _handleExecute(chatModeId, conversationId, [
|
||||
...conversation.messages,
|
||||
createDMessage('user', userText),
|
||||
]);
|
||||
return true;
|
||||
};
|
||||
|
||||
const handleConversationExecuteHistory = async (conversationId: DConversationId, history: DMessage[]) =>
|
||||
await _handleExecute('immediate', conversationId, history);
|
||||
|
||||
const handleMessageRegenerateLast = React.useCallback(async () => {
|
||||
const focusedConversation = getConversation(focusedConversationId);
|
||||
if (focusedConversation?.messages?.length) {
|
||||
const lastMessage = focusedConversation.messages[focusedConversation.messages.length - 1];
|
||||
return await _handleExecute('immediate', focusedConversation.id, lastMessage.role === 'assistant'
|
||||
? focusedConversation.messages.slice(0, -1)
|
||||
: [...focusedConversation.messages],
|
||||
);
|
||||
}
|
||||
}, [focusedConversationId, _handleExecute]);
|
||||
|
||||
const handleTextDiagram = async (diagramConfig: DiagramConfig | null) => setDiagramConfig(diagramConfig);
|
||||
|
||||
const handleTextImaginePlus = async (conversationId: DConversationId, messageText: string) => {
|
||||
const conversation = getConversation(conversationId);
|
||||
if (conversation)
|
||||
return await _handleExecute('draw-imagine-plus', conversationId, [
|
||||
...conversation.messages,
|
||||
createDMessage('user', messageText),
|
||||
]);
|
||||
};
|
||||
|
||||
const handleTextSpeak = async (text: string) => {
|
||||
await speakText(text);
|
||||
};
|
||||
|
||||
|
||||
// Chat actions
|
||||
|
||||
const handleConversationNew = React.useCallback(() => {
|
||||
// activate an existing new conversation if present, or create another
|
||||
setFocusedConversationId(newConversationId
|
||||
? newConversationId
|
||||
: prependNewConversation(focusedSystemPurposeId ?? undefined),
|
||||
);
|
||||
composerTextAreaRef.current?.focus();
|
||||
}, [focusedSystemPurposeId, newConversationId, prependNewConversation, setFocusedConversationId]);
|
||||
|
||||
const handleConversationImportDialog = () => setTradeConfig({ dir: 'import' });
|
||||
|
||||
const handleConversationExport = (conversationId: DConversationId | null) => setTradeConfig({ dir: 'export', conversationId });
|
||||
|
||||
const handleConversationBranch = React.useCallback((conversationId: DConversationId, messageId: string | null): DConversationId | null => {
|
||||
showNextTitle.current = true;
|
||||
const branchedConversationId = branchConversation(conversationId, messageId);
|
||||
addSnackbar({
|
||||
key: 'branch-conversation',
|
||||
message: 'Branch started.',
|
||||
type: 'success',
|
||||
overrides: {
|
||||
autoHideDuration: 3000,
|
||||
startDecorator: <ForkRightIcon />,
|
||||
},
|
||||
});
|
||||
const branchInAltPanel = useUXLabsStore.getState().labsSplitBranching;
|
||||
if (branchInAltPanel)
|
||||
openSplitConversationId(branchedConversationId);
|
||||
else
|
||||
setFocusedConversationId(branchedConversationId);
|
||||
return branchedConversationId;
|
||||
}, [branchConversation, openSplitConversationId, setFocusedConversationId]);
|
||||
|
||||
const handleConversationFlatten = (conversationId: DConversationId) => setFlattenConversationId(conversationId);
|
||||
|
||||
|
||||
const handleConfirmedClearConversation = React.useCallback(() => {
|
||||
if (clearConversationId) {
|
||||
setMessages(clearConversationId, []);
|
||||
setClearConversationId(null);
|
||||
}
|
||||
}, [clearConversationId, setMessages]);
|
||||
|
||||
const handleConversationClear = (conversationId: DConversationId) => setClearConversationId(conversationId);
|
||||
|
||||
|
||||
const handleConfirmedDeleteConversation = () => {
|
||||
if (deleteConversationId) {
|
||||
let nextConversationId: DConversationId | null;
|
||||
if (deleteConversationId === SPECIAL_ID_WIPE_ALL)
|
||||
nextConversationId = wipeAllConversations(focusedSystemPurposeId ?? undefined);
|
||||
else
|
||||
nextConversationId = deleteConversation(deleteConversationId);
|
||||
setFocusedConversationId(nextConversationId);
|
||||
setDeleteConversationId(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleConversationsDeleteAll = () => setDeleteConversationId(SPECIAL_ID_WIPE_ALL);
|
||||
|
||||
const handleConversationDelete = React.useCallback((conversationId: DConversationId, bypassConfirmation: boolean) => {
|
||||
if (bypassConfirmation)
|
||||
setFocusedConversationId(deleteConversation(conversationId));
|
||||
else
|
||||
setDeleteConversationId(conversationId);
|
||||
}, [deleteConversation, setFocusedConversationId]);
|
||||
|
||||
|
||||
// Shortcuts
|
||||
|
||||
const handleOpenChatLlmOptions = React.useCallback(() => {
|
||||
const { chatLLMId } = useModelsStore.getState();
|
||||
if (!chatLLMId) return;
|
||||
openLayoutLLMOptions(chatLLMId);
|
||||
}, []);
|
||||
|
||||
const shortcuts = React.useMemo((): GlobalShortcutItem[] => [
|
||||
['o', true, true, false, handleOpenChatLlmOptions],
|
||||
['r', true, true, false, handleMessageRegenerateLast],
|
||||
['n', true, false, true, handleConversationNew],
|
||||
['b', true, false, true, () => isFocusedChatEmpty || focusedConversationId && handleConversationBranch(focusedConversationId, null)],
|
||||
['x', true, false, true, () => isFocusedChatEmpty || focusedConversationId && handleConversationClear(focusedConversationId)],
|
||||
['d', true, false, true, () => focusedConversationId && handleConversationDelete(focusedConversationId, false)],
|
||||
[ShortcutKeyName.Left, true, false, true, () => handleNavigateHistory('back')],
|
||||
[ShortcutKeyName.Right, true, false, true, () => handleNavigateHistory('forward')],
|
||||
], [focusedConversationId, handleConversationBranch, handleConversationDelete, handleConversationNew, handleMessageRegenerateLast, handleNavigateHistory, handleOpenChatLlmOptions, isFocusedChatEmpty]);
|
||||
useGlobalShortcuts(shortcuts);
|
||||
|
||||
|
||||
// Pluggable ApplicationBar components
|
||||
|
||||
const centerItems = React.useMemo(() =>
|
||||
<ChatDropdowns conversationId={focusedConversationId} />,
|
||||
[focusedConversationId],
|
||||
);
|
||||
|
||||
const drawerItems = React.useMemo(() =>
|
||||
<ChatDrawerItemsMemo
|
||||
activeConversationId={focusedConversationId}
|
||||
disableNewButton={isFocusedChatEmpty}
|
||||
onConversationActivate={setFocusedConversationId}
|
||||
onConversationDelete={handleConversationDelete}
|
||||
onConversationImportDialog={handleConversationImportDialog}
|
||||
onConversationNew={handleConversationNew}
|
||||
onConversationsDeleteAll={handleConversationsDeleteAll}
|
||||
/>,
|
||||
[focusedConversationId, handleConversationDelete, handleConversationNew, isFocusedChatEmpty, setFocusedConversationId],
|
||||
);
|
||||
|
||||
const menuItems = React.useMemo(() =>
|
||||
<ChatMenuItems
|
||||
conversationId={focusedConversationId}
|
||||
hasConversations={!areChatsEmpty}
|
||||
isConversationEmpty={isFocusedChatEmpty}
|
||||
isMessageSelectionMode={isMessageSelectionMode}
|
||||
setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onConversationClear={handleConversationClear}
|
||||
onConversationExport={handleConversationExport}
|
||||
onConversationFlatten={handleConversationFlatten}
|
||||
/>,
|
||||
[areChatsEmpty, focusedConversationId, handleConversationBranch, isFocusedChatEmpty, isMessageSelectionMode],
|
||||
);
|
||||
|
||||
useLayoutPluggable(centerItems, drawerItems, menuItems);
|
||||
|
||||
return <>
|
||||
|
||||
<Box sx={{
|
||||
flexGrow: 1,
|
||||
display: 'flex', flexDirection: { xs: 'column', md: 'row' },
|
||||
overflow: 'clip',
|
||||
}}>
|
||||
|
||||
{chatPaneIDs.map((_conversationId, idx) => (
|
||||
<Box key={'chat-pane-' + idx} onClick={() => setActivePaneIndex(idx)} sx={{
|
||||
flexGrow: 1, flexBasis: 1,
|
||||
display: 'flex', flexDirection: 'column',
|
||||
overflow: 'clip',
|
||||
}}>
|
||||
|
||||
<ChatMessageList
|
||||
conversationId={_conversationId}
|
||||
chatLLMContextTokens={chatLLM?.contextTokens}
|
||||
isMessageSelectionMode={isMessageSelectionMode}
|
||||
setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onConversationExecuteHistory={handleConversationExecuteHistory}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={handleTextImaginePlus}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
backgroundColor: 'background.level1',
|
||||
overflowY: 'auto',
|
||||
minHeight: 96,
|
||||
// outline the current focused pane
|
||||
...(chatPaneIDs.length < 2 ? {}
|
||||
: (_conversationId === focusedConversationId)
|
||||
? {
|
||||
border: '2px solid',
|
||||
borderColor: 'primary.solidBg',
|
||||
} : {
|
||||
padding: '2px',
|
||||
}),
|
||||
}}
|
||||
/>
|
||||
|
||||
<Ephemerals
|
||||
conversationId={_conversationId}
|
||||
sx={{
|
||||
// flexGrow: 0.1,
|
||||
flexShrink: 0.5,
|
||||
overflowY: 'auto',
|
||||
minHeight: 64,
|
||||
}} />
|
||||
|
||||
</Box>
|
||||
))}
|
||||
</Box>
|
||||
|
||||
<Composer
|
||||
chatLLM={chatLLM}
|
||||
composerTextAreaRef={composerTextAreaRef}
|
||||
conversationId={focusedConversationId}
|
||||
isDeveloperMode={focusedSystemPurposeId === 'Developer'}
|
||||
onAction={handleComposerAction}
|
||||
sx={{
|
||||
zIndex: 21, // position: 'sticky', bottom: 0,
|
||||
backgroundColor: 'background.surface',
|
||||
borderTop: `1px solid`,
|
||||
borderTopColor: 'divider',
|
||||
p: { xs: 1, md: 2 },
|
||||
}} />
|
||||
|
||||
|
||||
{/* Diagrams */}
|
||||
{!!diagramConfig && <DiagramsModal config={diagramConfig} onClose={() => setDiagramConfig(null)} />}
|
||||
|
||||
{/* Flatten */}
|
||||
{!!flattenConversationId && (
|
||||
<FlattenerModal
|
||||
conversationId={flattenConversationId}
|
||||
onConversationBranch={handleConversationBranch}
|
||||
onClose={() => setFlattenConversationId(null)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Import / Export */}
|
||||
{!!tradeConfig && <TradeModal config={tradeConfig} onConversationActivate={setFocusedConversationId} onClose={() => setTradeConfig(null)} />}
|
||||
|
||||
|
||||
{/* [confirmation] Reset Conversation */}
|
||||
{!!clearConversationId && <ConfirmationModal
|
||||
open onClose={() => setClearConversationId(null)} onPositive={handleConfirmedClearConversation}
|
||||
confirmationText={'Are you sure you want to discard all messages?'} positiveActionText={'Clear conversation'}
|
||||
/>}
|
||||
|
||||
{/* [confirmation] Delete All */}
|
||||
{!!deleteConversationId && <ConfirmationModal
|
||||
open onClose={() => setDeleteConversationId(null)} onPositive={handleConfirmedDeleteConversation}
|
||||
confirmationText={deleteConversationId === SPECIAL_ID_WIPE_ALL
|
||||
? 'Are you absolutely sure you want to delete ALL conversations? This action cannot be undone.'
|
||||
: 'Are you sure you want to delete this conversation?'}
|
||||
positiveActionText={deleteConversationId === SPECIAL_ID_WIPE_ALL
|
||||
? 'Yes, delete all'
|
||||
: 'Delete conversation'}
|
||||
/>}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
import * as React from 'react';
|
||||
import { shallow } from 'zustand/shallow';
|
||||
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
import { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { CmdRunProdia } from '@/modules/prodia/prodia.client';
|
||||
import { CmdRunReact } from '@/modules/search/search.client';
|
||||
import { PasteGG } from '@/modules/pastegg/pastegg.types';
|
||||
import { PublishedModal } from '@/modules/pastegg/PublishedModal';
|
||||
import { callPublish } from '@/modules/pastegg/pastegg.client';
|
||||
|
||||
import { ConfirmationModal } from '@/common/components/ConfirmationModal';
|
||||
import { Link } from '@/common/components/Link';
|
||||
import { conversationToMarkdown } from '@/common/util/conversationToMarkdown';
|
||||
import { createDMessage, DMessage, useChatStore } from '@/common/state/store-chats';
|
||||
import { extractCommands } from '@/common/util/extractCommands';
|
||||
import { useComposerStore } from '@/common/state/store-composer';
|
||||
import { useSettingsStore } from '@/common/state/store-settings';
|
||||
|
||||
import { ApplicationBar } from './components/appbar/ApplicationBar';
|
||||
import { ChatMessageList } from './components/ChatMessageList';
|
||||
import { Composer } from './components/composer/Composer';
|
||||
import { Ephemerals } from './components/ephemerals/Ephemerals';
|
||||
import { imaginePromptFromText } from './util/ai-functions';
|
||||
import { runAssistantUpdatingState } from './util/agi-immediate';
|
||||
import { runImageGenerationUpdatingState } from './util/imagine';
|
||||
import { runReActUpdatingState } from './util/agi-react';
|
||||
|
||||
|
||||
export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
|
||||
// state
|
||||
const [isMessageSelectionMode, setIsMessageSelectionMode] = React.useState(false);
|
||||
const [publishConversationId, setPublishConversationId] = React.useState<string | null>(null);
|
||||
const [publishResponse, setPublishResponse] = React.useState<PasteGG.API.Publish.Response | null>(null);
|
||||
|
||||
// external state
|
||||
const theme = useTheme();
|
||||
const { sendModeId } = useComposerStore(state => ({ sendModeId: state.sendModeId }), shallow);
|
||||
const { activeConversationId, setMessages, chatModelId, systemPurposeId } = useChatStore(state => {
|
||||
const conversation = state.conversations.find(conversation => conversation.id === state.activeConversationId);
|
||||
return {
|
||||
activeConversationId: state.activeConversationId,
|
||||
setMessages: state.setMessages,
|
||||
chatModelId: conversation?.chatModelId ?? null,
|
||||
systemPurposeId: conversation?.systemPurposeId ?? null,
|
||||
};
|
||||
}, shallow);
|
||||
|
||||
|
||||
const handleExecuteConversation = async (conversationId: string, history: DMessage[]) => {
|
||||
if (!conversationId) return;
|
||||
|
||||
// Command - last user message is a cmd
|
||||
const lastMessage = history.length > 0 ? history[history.length - 1] : null;
|
||||
if (lastMessage?.role === 'user') {
|
||||
const pieces = extractCommands(lastMessage.text);
|
||||
if (pieces.length == 2 && pieces[0].type === 'cmd' && pieces[1].type === 'text') {
|
||||
const command = pieces[0].value;
|
||||
const prompt = pieces[1].value;
|
||||
if (CmdRunProdia.includes(command)) {
|
||||
setMessages(conversationId, history);
|
||||
return await runImageGenerationUpdatingState(conversationId, prompt);
|
||||
}
|
||||
if (CmdRunReact.includes(command) && chatModelId) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, prompt, chatModelId);
|
||||
}
|
||||
// if (CmdRunSearch.includes(command))
|
||||
// return await run...
|
||||
}
|
||||
}
|
||||
|
||||
// synchronous long-duration tasks, which update the state as they go
|
||||
if (sendModeId && chatModelId && systemPurposeId) {
|
||||
switch (sendModeId) {
|
||||
case 'immediate':
|
||||
return await runAssistantUpdatingState(conversationId, history, chatModelId, systemPurposeId);
|
||||
case 'react':
|
||||
if (lastMessage?.text) {
|
||||
setMessages(conversationId, history);
|
||||
return await runReActUpdatingState(conversationId, lastMessage.text, chatModelId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ISSUE: if we're here, it means we couldn't do the job, at least sync the history
|
||||
setMessages(conversationId, history);
|
||||
};
|
||||
|
||||
const _findConversation = (conversationId: string) =>
|
||||
conversationId ? useChatStore.getState().conversations.find(c => c.id === conversationId) ?? null : null;
|
||||
|
||||
const handleSendUserMessage = async (conversationId: string, userText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation)
|
||||
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', userText)]);
|
||||
};
|
||||
|
||||
const handleImagineFromText = async (conversationId: string, messageText: string) => {
|
||||
const conversation = _findConversation(conversationId);
|
||||
if (conversation && chatModelId) {
|
||||
const prompt = await imaginePromptFromText(messageText, chatModelId);
|
||||
if (prompt)
|
||||
return await handleExecuteConversation(conversationId, [...conversation.messages, createDMessage('user', `${CmdRunProdia[0]} ${prompt}`)]);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const handlePublishConversation = (conversationId: string) => setPublishConversationId(conversationId);
|
||||
|
||||
const handleConfirmedPublishConversation = async () => {
|
||||
if (publishConversationId) {
|
||||
const conversation = _findConversation(publishConversationId);
|
||||
setPublishConversationId(null);
|
||||
if (conversation) {
|
||||
const markdownContent = conversationToMarkdown(conversation, !useSettingsStore.getState().showSystemMessages);
|
||||
const publishResponse = await callPublish('paste.gg', markdownContent);
|
||||
setPublishResponse(publishResponse);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
return (
|
||||
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex', flexDirection: 'column', height: '100vh',
|
||||
...(props.sx || {}),
|
||||
}}>
|
||||
|
||||
<ApplicationBar
|
||||
conversationId={activeConversationId}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onPublishConversation={handlePublishConversation}
|
||||
onShowSettings={props.onShowSettings}
|
||||
sx={{
|
||||
zIndex: 20, // position: 'sticky', top: 0,
|
||||
// ...(process.env.NODE_ENV === 'development' ? { background: theme.vars.palette.danger.solidBg } : {}),
|
||||
}} />
|
||||
|
||||
<ChatMessageList
|
||||
conversationId={activeConversationId}
|
||||
isMessageSelectionMode={isMessageSelectionMode} setIsMessageSelectionMode={setIsMessageSelectionMode}
|
||||
onExecuteConversation={handleExecuteConversation}
|
||||
onImagineFromText={handleImagineFromText}
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
background: theme.vars.palette.background.level2,
|
||||
overflowY: 'auto', // overflowY: 'hidden'
|
||||
minHeight: 96,
|
||||
}} />
|
||||
|
||||
<Ephemerals
|
||||
conversationId={activeConversationId}
|
||||
sx={{
|
||||
// flexGrow: 0.1,
|
||||
flexShrink: 0.5,
|
||||
overflowY: 'auto',
|
||||
minHeight: 64,
|
||||
}} />
|
||||
|
||||
<Composer
|
||||
conversationId={activeConversationId} messageId={null}
|
||||
isDeveloperMode={systemPurposeId === 'Developer'}
|
||||
onSendMessage={handleSendUserMessage}
|
||||
sx={{
|
||||
zIndex: 21, // position: 'sticky', bottom: 0,
|
||||
background: theme.vars.palette.background.surface,
|
||||
borderTop: `1px solid ${theme.vars.palette.divider}`,
|
||||
p: { xs: 1, md: 2 },
|
||||
}} />
|
||||
|
||||
{/* Confirmation for Publishing */}
|
||||
<ConfirmationModal
|
||||
open={!!publishConversationId} onClose={() => setPublishConversationId(null)} onPositive={handleConfirmedPublishConversation}
|
||||
confirmationText={<>
|
||||
Share your conversation anonymously on <Link href='https://paste.gg' target='_blank'>paste.gg</Link>?
|
||||
It will be unlisted and available to share and read for 30 days. Keep in mind, deletion may not be possible.
|
||||
Are you sure you want to proceed?
|
||||
</>} positiveActionText={'Understood, upload to paste.gg'}
|
||||
/>
|
||||
|
||||
{/* Show the Published details */}
|
||||
{!!publishResponse && (
|
||||
<PublishedModal open onClose={() => setPublishResponse(null)} response={publishResponse} />
|
||||
)}
|
||||
|
||||
</Box>
|
||||
|
||||
);
|
||||
}
|
||||