mirror of
https://github.com/enricoros/big-AGI.git
synced 2026-05-10 21:50:14 -07:00
Compare commits
1214 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e900695f8b | |||
| aacb4349e9 | |||
| 55bde68a4d | |||
| 26ae3545a7 | |||
| 0001f7392b | |||
| d7e83e578b | |||
| 901d93b5f0 | |||
| 6858b0b94a | |||
| 9d88bf9b82 | |||
| 1bf1b744b9 | |||
| ee2d7114c7 | |||
| 3b1b54b3a3 | |||
| 524029a882 | |||
| 69161d29a7 | |||
| 8a542c1af4 | |||
| fe16970624 | |||
| e21abdef45 | |||
| acdbb2fbaf | |||
| 14be134ef2 | |||
| f56f6eb3cd | |||
| d3a7b75d1c | |||
| d5d7cf5a21 | |||
| 13b928d68b | |||
| 31948a62f9 | |||
| bf2d00a936 | |||
| ed4edd7c0b | |||
| e5de61d682 | |||
| ac69c62020 | |||
| a43b6a2cf5 | |||
| e8e3366fe2 | |||
| d813810a28 | |||
| c400aa7543 | |||
| 9fc0b39730 | |||
| 194bfe23a1 | |||
| 35110480ef | |||
| 959595e33a | |||
| a960424dfb | |||
| 0df6c7d08b | |||
| 65c841e7a7 | |||
| b21b8cc982 | |||
| aa2c4f06b7 | |||
| b8d7b4ec10 | |||
| c48520255a | |||
| 0790da989d | |||
| 506d24d2fd | |||
| 1348dbf493 | |||
| ce677f3cd9 | |||
| 39203d78e3 | |||
| 2ef7daf369 | |||
| cff3d90613 | |||
| 9f89243d7f | |||
| 784ee9a4da | |||
| 678e6b8ba1 | |||
| 30e301c496 | |||
| b22904f6bb | |||
| 3f0de7ddca | |||
| 9a6f0f9202 | |||
| 4f0bae5657 | |||
| 2101f06195 | |||
| 6d54b5594c | |||
| 36b8e5b1df | |||
| 8252d671c7 | |||
| 30d97c94aa | |||
| 82654a00d4 | |||
| 9595f14ddc | |||
| 8c496074b2 | |||
| 4d097d7136 | |||
| 178619d275 | |||
| 59c8b2538d | |||
| 443b72c52a | |||
| ae13abef45 | |||
| 83ae02ef9b | |||
| 9bb178413b | |||
| d85f0ebfc4 | |||
| 8f84dc2f24 | |||
| c8b4301bcd | |||
| bd8eaf0b9f | |||
| a4148cf694 | |||
| 4cb0b493dc | |||
| e6354e9089 | |||
| 08506abaee | |||
| 078c80d572 | |||
| b1c9f6be45 | |||
| fc497e9beb | |||
| 6ad01fd981 | |||
| 44ed8664c8 | |||
| 4cb16ee715 | |||
| 2dc9b87cda | |||
| 0e587c4889 | |||
| 41d42d82fb | |||
| f703c8a8c9 | |||
| bf753eab55 | |||
| 698b67af06 | |||
| 377d61056a | |||
| 94b32c8fe3 | |||
| 1e70a59ad6 | |||
| 44d05181f4 | |||
| 996998a5cc | |||
| 98474b2721 | |||
| 198dc0e23f | |||
| 079731c573 | |||
| 492c89650a | |||
| 5b5bbb7649 | |||
| 27d1f081ab | |||
| 76183fd840 | |||
| 345165eabf | |||
| c186732b3b | |||
| 04916b700e | |||
| 013dab185c | |||
| 5ab93faccf | |||
| fa301e3675 | |||
| fa6e7dd9c5 | |||
| 01736ad5da | |||
| ce682b1f85 | |||
| 96d801f40a | |||
| 8985868f63 | |||
| 8febdcd0c0 | |||
| 4d21d5134a | |||
| 09d44a4314 | |||
| 40066e975a | |||
| 202382c80a | |||
| 6ffbb32c57 | |||
| 9b8a3ca503 | |||
| cdd7892077 | |||
| 974aa12137 | |||
| d8f8999333 | |||
| 0efd87b522 | |||
| ec76e1c5cf | |||
| 1e04efe748 | |||
| 69c135ae78 | |||
| 205fb1bb5b | |||
| c8e7315de3 | |||
| 725f3b0fd7 | |||
| 7ee3701607 | |||
| 9537ce59e8 | |||
| 6c0a60e0d1 | |||
| 436a858cb0 | |||
| 6ea6c55f65 | |||
| c477fa86ce | |||
| 08cd5ed5b6 | |||
| b5f2cd35f2 | |||
| 4cb0f6d67e | |||
| 5260ec68cc | |||
| 72ce4d2884 | |||
| ed65f989d9 | |||
| 588ebf4993 | |||
| 22969033a7 | |||
| 8b5e00480b | |||
| aaf752fa9c | |||
| 82d3b36048 | |||
| 588c81f9ad | |||
| 4013a3f997 | |||
| 5823e18904 | |||
| 31ea6863aa | |||
| f3f58f26ae | |||
| 67132f285e | |||
| 20a638a8c9 | |||
| c9174e995f | |||
| 656c507c94 | |||
| a1fb744eb1 | |||
| 28367547fd | |||
| 6610211eac | |||
| b66e3e2afa | |||
| 4bf965953a | |||
| 1bd6513d59 | |||
| 6ce457913e | |||
| ef84ca5a04 | |||
| f76524c650 | |||
| 0be676229f | |||
| 40a0ca7235 | |||
| 1563c3a9dc | |||
| 80f32be80d | |||
| eea53714cc | |||
| 148f1ec22c | |||
| b5a2a70e73 | |||
| e7667e4b7d | |||
| 9250eb9aff | |||
| 92883caaab | |||
| 6d57450efc | |||
| 5dd4c600ea | |||
| 392a3b7949 | |||
| e22c40c7e4 | |||
| c7abee6969 | |||
| 4772e63fdb | |||
| f3d7abefec | |||
| ac76b156cf | |||
| 97e65efc31 | |||
| 13dcaa0a57 | |||
| 1f42b0ae66 | |||
| 003a50f181 | |||
| 32c5849a50 | |||
| 44a8ee0593 | |||
| 1ad70c7b1b | |||
| 7413983159 | |||
| 6c3e8c6a8f | |||
| 7e3e9854ac | |||
| 41fc93345c | |||
| b9275177e3 | |||
| 5ea95e4095 | |||
| 0ea041ed5b | |||
| 037e3b62d8 | |||
| 517c18c902 | |||
| 685b5c5130 | |||
| cfdab2f900 | |||
| 1a743ff264 | |||
| 85463fafb1 | |||
| 0641b0df97 | |||
| 98825081a9 | |||
| f549c13465 | |||
| 8bf7fd7106 | |||
| d8d889c706 | |||
| 90665ed84a | |||
| dd3d10a391 | |||
| 19ebd399a8 | |||
| f21a2973e9 | |||
| 04bb8f9c12 | |||
| 5ea63c8734 | |||
| f4f4ad9373 | |||
| ba06d70c05 | |||
| 62ddd17715 | |||
| f76db1d19e | |||
| f0901dbc03 | |||
| c65a2ce387 | |||
| eaee372938 | |||
| d8836534cb | |||
| 7d2e64b458 | |||
| bc942c5581 | |||
| 4ca24f8314 | |||
| b299dec68e | |||
| b9f07d011b | |||
| 9259be8dbb | |||
| 4b0b7c4493 | |||
| 73f0760809 | |||
| db6c2b1620 | |||
| 1233e846db | |||
| 27312537a7 | |||
| 1dfd4d8395 | |||
| ccd9f0980f | |||
| 5cc48d24ec | |||
| 7929d4eb30 | |||
| 14c5c83f91 | |||
| 263412c422 | |||
| d395fa817d | |||
| 9cfc8c513b | |||
| c92a1cfcb1 | |||
| f45e45ca8f | |||
| e44d4b8b01 | |||
| c342f553db | |||
| 2fab208ccf | |||
| eab3eee19f | |||
| fcb3903b5f | |||
| 90ccb64bd0 | |||
| 1772db5e98 | |||
| a04ee4de95 | |||
| 73b6a54f9e | |||
| 52b08b407c | |||
| 269a3a9991 | |||
| 1b2050cd96 | |||
| a71dd5e3aa | |||
| 8d91ea0413 | |||
| 81b39c7f9c | |||
| a3200e1aab | |||
| 4c8fa8e477 | |||
| f64aae10c5 | |||
| bd8f484cd2 | |||
| 4c3151e3be | |||
| 4e3377f1df | |||
| f95b643a5c | |||
| 85083f323d | |||
| b884386143 | |||
| 01a8d858cf | |||
| 08fed36a61 | |||
| f8b110e108 | |||
| b78b0f1323 | |||
| 148c0b1d77 | |||
| fe501831b2 | |||
| 1862b72ba5 | |||
| a609071966 | |||
| dc2d162e6e | |||
| 07f2cd291e | |||
| a6e040e3e5 | |||
| 3e6cfc9775 | |||
| 0e2abd2615 | |||
| 394e79510e | |||
| 848977820e | |||
| c893f1969c | |||
| bb9a8b81d1 | |||
| 188b338bdc | |||
| 463ef406a7 | |||
| a916ff46dc | |||
| db3a5c0b1b | |||
| b760250da1 | |||
| b5829ac541 | |||
| fa4f2b8fcd | |||
| 333c318a62 | |||
| 5f6f7086d0 | |||
| a7495bd4cf | |||
| 76c4919e9c | |||
| 5530a0253e | |||
| 86aaa65d10 | |||
| 65bf147e04 | |||
| f76ad186f0 | |||
| e5e333db70 | |||
| ddee08c2da | |||
| 93b7686f18 | |||
| e61e9626e2 | |||
| 3c6bfe0152 | |||
| e4fc44bc9c | |||
| 51e23ad3a4 | |||
| 5ebbe45a63 | |||
| 6df276d51d | |||
| f811500b60 | |||
| 2b51605c18 | |||
| 513b840b47 | |||
| d94c8c8a3b | |||
| 3dd641a398 | |||
| 8e545f1738 | |||
| 2a12597567 | |||
| e003683040 | |||
| 0338b3d2e9 | |||
| 5d5bc403c4 | |||
| b646149980 | |||
| 1e7e8ac632 | |||
| 309786e01e | |||
| 08e3caf8c2 | |||
| 21b68d7660 | |||
| 4986c61b2a | |||
| 801479cb5c | |||
| 1d18e21018 | |||
| 4c329a8f51 | |||
| 1eb4eeea42 | |||
| 5ca094111c | |||
| 4ce4202750 | |||
| 4873c0c390 | |||
| 351a28f34f | |||
| a2e99ed84f | |||
| 7d2a26ab66 | |||
| 94268187f1 | |||
| 5aafa98f1c | |||
| c42c34acb4 | |||
| f052963da3 | |||
| 07fa93609d | |||
| cbef9e5a57 | |||
| 0b342339d4 | |||
| 9de3d5a26f | |||
| 78878076c2 | |||
| 65cca958a6 | |||
| 19263f8494 | |||
| 5f71cbed47 | |||
| fe93a66d3b | |||
| aa3b451e00 | |||
| ca245bf8b8 | |||
| 9868068cd6 | |||
| 5fd27629d0 | |||
| 4bfc7636c9 | |||
| 305a7784ee | |||
| 87ecc11661 | |||
| 0faf5d5957 | |||
| 55d7ebd804 | |||
| 842b5b96c2 | |||
| b07fc759c2 | |||
| 0afa70aaab | |||
| c2cf93bf1a | |||
| 88639b8b57 | |||
| bfecc63d0d | |||
| 20bea327e4 | |||
| 1e5c26b490 | |||
| d9183c9658 | |||
| 3ecbbc3b70 | |||
| 1c1d21eed7 | |||
| 6129971bb2 | |||
| 8a3d75f077 | |||
| 9c249b513f | |||
| 04d3fe6e99 | |||
| ea7283b96e | |||
| 295fc111c4 | |||
| 58d73d5d81 | |||
| fd8ce2e99a | |||
| c8a33a06fa | |||
| 874be92a56 | |||
| 6bdb01e3c5 | |||
| ba03ab3aa8 | |||
| 3d554e513d | |||
| e516b9dae9 | |||
| 281d5a611e | |||
| 03eec23efe | |||
| e3d01f6615 | |||
| 99e15333cb | |||
| 5efd16c060 | |||
| b4a6c80d8c | |||
| 7991920f08 | |||
| a113b8223b | |||
| 7bb720a903 | |||
| 515de2679e | |||
| 38caacf816 | |||
| 676b0537e6 | |||
| a24341cda6 | |||
| d937bc246a | |||
| 5d2543131a | |||
| ca5d6872b5 | |||
| a97ce26072 | |||
| c698f78f92 | |||
| 77782a63eb | |||
| 41e1e44ef0 | |||
| 7b1fc56320 | |||
| c0ed41a529 | |||
| ba47fe1cfe | |||
| f1356d8fdc | |||
| 7a899c538f | |||
| 3daac973b1 | |||
| b0ec5f7459 | |||
| 71d6868512 | |||
| 605bb83eb3 | |||
| 3092e02ce9 | |||
| 5d82374975 | |||
| ab4d63e596 | |||
| f800bb8dae | |||
| 18862c0ff4 | |||
| 3765e8c69e | |||
| 70d54a9aa3 | |||
| 50c6ee69af | |||
| dd2532e269 | |||
| 16a54b3452 | |||
| 8373c1c785 | |||
| 39beda5519 | |||
| c7d1eae327 | |||
| ec81e2ff5b | |||
| 697090b695 | |||
| 8680fcc3db | |||
| 233037edd2 | |||
| 81c3251c6e | |||
| dc0fe7f4ca | |||
| 2c9c0f2e0b | |||
| 9c3fb9aadb | |||
| de37ac2c51 | |||
| d6b57702bd | |||
| d94642c29f | |||
| 75378ea88f | |||
| d539c1369b | |||
| 555ee6f333 | |||
| ad989d8a0b | |||
| aae7af4713 | |||
| df0a204767 | |||
| 5cdefc7b5e | |||
| c1bdb1fc61 | |||
| dde22a080b | |||
| 7f5ff30f97 | |||
| 38e1708e91 | |||
| fe4e755304 | |||
| 67f1c87d3a | |||
| eef88ffae2 | |||
| 319965c55c | |||
| 1f309b5c81 | |||
| 5273352ae9 | |||
| 5a48256d77 | |||
| 1d41294c1d | |||
| ff76229706 | |||
| b0f4b30ebe | |||
| 7be8f6c6a7 | |||
| b003993961 | |||
| 4878f361b5 | |||
| a82a3899c5 | |||
| ff0685e6e8 | |||
| a597489526 | |||
| 32e8890f62 | |||
| 211a43eab4 | |||
| 8c28df77cc | |||
| 4e82a12899 | |||
| 8d0e0dea89 | |||
| 5703f23b99 | |||
| 196d08b4fd | |||
| 2f9738f6fb | |||
| d4db225d1e | |||
| efff785713 | |||
| 234accad3f | |||
| 588b4b2c64 | |||
| 7de34d8478 | |||
| 741980adfc | |||
| 2690380bfd | |||
| b482b07335 | |||
| 03b4c6f941 | |||
| b7fd1b13de | |||
| 10a6f2d3c7 | |||
| ba149d3b43 | |||
| f175d071c4 | |||
| 874d0bca05 | |||
| 81ad0328b7 | |||
| 5198fa66cf | |||
| a807bdd6b6 | |||
| 2b209bb679 | |||
| 2f018dce9f | |||
| 2eb77f532a | |||
| 69063bb544 | |||
| 7fad2f8790 | |||
| 620275a1f5 | |||
| ba583fc448 | |||
| 0b96870644 | |||
| eb2b682eb5 | |||
| 577b52120a | |||
| b69ae3edae | |||
| 624b177996 | |||
| bbf01b49c0 | |||
| 86b2d8ae71 | |||
| d18af42d43 | |||
| 4f6e110bf9 | |||
| 62cf334e2f | |||
| 8bd6fd40fd | |||
| f21fe41188 | |||
| cfff23164c | |||
| a8d9233dc4 | |||
| 9c973efbbf | |||
| e2c4255920 | |||
| e01b9ff6a9 | |||
| 0084a635f1 | |||
| 0cd20b8d48 | |||
| 7c4094b4c2 | |||
| acd8430d51 | |||
| 6ae2195d10 | |||
| 6bcc0dd177 | |||
| 2de42c2010 | |||
| a231ccb492 | |||
| 35875d5837 | |||
| c36ff1edfa | |||
| ed35d5b541 | |||
| 2b2a2d84a9 | |||
| a645a4066c | |||
| 508a3beff7 | |||
| df0c133056 | |||
| 2da3942ce2 | |||
| 26547dec0d | |||
| aa4804bdd5 | |||
| eafa1f02cb | |||
| 836533a8c2 | |||
| cfeb134c20 | |||
| 35798b5568 | |||
| 7a250f0848 | |||
| 0a4e6d5142 | |||
| f4254a5ffb | |||
| 7b7718e578 | |||
| c261b2b156 | |||
| 237065553e | |||
| 6116af42df | |||
| 08b28cfde8 | |||
| b019655518 | |||
| 1264a2ebaf | |||
| 1960b4f618 | |||
| c75fbd89e6 | |||
| 3e67201665 | |||
| b60e2bae65 | |||
| 19c7fa4285 | |||
| f450dd3eac | |||
| d366cdd542 | |||
| c1ba83fddb | |||
| 617d6038b1 | |||
| 0abee15c30 | |||
| 1aa2e68e4a | |||
| cd692218ce | |||
| a5b7191185 | |||
| 56baba4cae | |||
| b696447be4 | |||
| e1ef2e72d7 | |||
| e85905e63c | |||
| c6208a2900 | |||
| 01299e4f19 | |||
| 1771575641 | |||
| 88a796fd87 | |||
| e403467d6d | |||
| 1914a2a8a3 | |||
| 683892afef | |||
| 470f8aab70 | |||
| 7a561d6b42 | |||
| affff0df4a | |||
| f5a81bdc94 | |||
| 818ed53b53 | |||
| 12c875f4e3 | |||
| 6ff715c0f0 | |||
| c4a89822d8 | |||
| a8a917f786 | |||
| 3aa9a71a4b | |||
| 3758612ed6 | |||
| b71a4265f8 | |||
| 870cdb67cf | |||
| 902c9dc3f4 | |||
| 0d1db0a360 | |||
| ddd784f041 | |||
| 830d45c06d | |||
| 6e27a31013 | |||
| ed87595e17 | |||
| da01b59ae3 | |||
| 79046b808b | |||
| 5a71153390 | |||
| 94056cdf4b | |||
| 41cb35c6b9 | |||
| e133fc81f6 | |||
| 418c2e496c | |||
| 3690202b38 | |||
| f069c2e5ab | |||
| 97bf6ca276 | |||
| a1390b152f | |||
| 4e8c7d46f6 | |||
| 02944d2015 | |||
| 58726f0425 | |||
| 85f796fb1d | |||
| 311a9c2bf2 | |||
| 6768917d44 | |||
| 7beb412738 | |||
| cf724625cc | |||
| f60b2410dd | |||
| bbdc16b06a | |||
| 0fa2d06725 | |||
| 36cdc4b55f | |||
| c2b4a50bfa | |||
| 73f88d4715 | |||
| af919be2ac | |||
| facffbc6c8 | |||
| dd5b7cb8c2 | |||
| 3dc61109d7 | |||
| 9ef84260b0 | |||
| cf2df7d7f9 | |||
| 16a883526b | |||
| 7b66b1a2eb | |||
| a4adce5c79 | |||
| 9e4174df53 | |||
| b5975713a3 | |||
| 0cd04266b7 | |||
| 5cbd162454 | |||
| bea1600358 | |||
| 6a2e201cf5 | |||
| 960551933e | |||
| 8b38b6416d | |||
| fac4c39f48 | |||
| 4c930efbf0 | |||
| 5a2a47cb87 | |||
| 4912a03250 | |||
| 3b13580613 | |||
| 95905113ac | |||
| c6b34bb252 | |||
| e5387c2323 | |||
| d3b4447669 | |||
| d5c5eac9ec | |||
| 49b61495d0 | |||
| e8298e9d30 | |||
| b29681e1f7 | |||
| 1e0b9a2f0c | |||
| 442b8e95b1 | |||
| 27090d9e28 | |||
| c37b4fa076 | |||
| 83161bbe98 | |||
| 4b166120e6 | |||
| 04494ac752 | |||
| 979809ddb1 | |||
| 5d797c3339 | |||
| 2ff74f6b80 | |||
| 06b1195f9a | |||
| c337b70a42 | |||
| 5047354892 | |||
| ce4e405fc6 | |||
| 30c8d66cd1 | |||
| fb5c8aad29 | |||
| 08d221d00f | |||
| af918178f6 | |||
| ed19896e3c | |||
| 47ad135e4b | |||
| 0eff7825c8 | |||
| 5c8baee390 | |||
| 3f71facb49 | |||
| eba42cc8f2 | |||
| 53092cee51 | |||
| 4bf621f128 | |||
| 33505dbb8e | |||
| c81e1f144f | |||
| ee788b967b | |||
| 38ac8733f6 | |||
| 737a20ee06 | |||
| 19f48b8001 | |||
| 3471d6b4f5 | |||
| 2dc7ba72b3 | |||
| e12279dab0 | |||
| 2e0c79cb64 | |||
| aa697edb8c | |||
| c72e3c58dd | |||
| 1de30c8bd5 | |||
| 3a8eea6fb7 | |||
| b7fd0bdba7 | |||
| 58457cac50 | |||
| 0fbacee7dc | |||
| a498f28d14 | |||
| 5b9c6a2d0e | |||
| 4c7f50ab98 | |||
| ef03d33bbf | |||
| 22c9fc56c0 | |||
| c952fd734f | |||
| 310e99af23 | |||
| e78446904a | |||
| 760e9d8279 | |||
| 61a60c5b9f | |||
| 3054e1b88d | |||
| 6f4fabf147 | |||
| b0c791a055 | |||
| 748991249a | |||
| 1aea7122cc | |||
| 9a83b428f1 | |||
| 2cd38bc02b | |||
| e586142190 | |||
| a10d0dcf5d | |||
| 6fdff488a9 | |||
| 8af0d78127 | |||
| 177686a7fc | |||
| 09b6e47036 | |||
| 704187ba3e | |||
| 4ea8a06503 | |||
| 80fcc7d3e3 | |||
| a04c62da6f | |||
| fcb518a050 | |||
| a222626933 | |||
| a3ceade738 | |||
| 51d58223b4 | |||
| d37a603db2 | |||
| ea984f3ddf | |||
| a9d3e3dead | |||
| 5499e57205 | |||
| 6f8ee0247f | |||
| 05ee5cc3d1 | |||
| cb6b569330 | |||
| 53073ff109 | |||
| 26d362d7a6 | |||
| 91d99e1a63 | |||
| a20917c971 | |||
| af9bf9e5b3 | |||
| 46b473b8a0 | |||
| e2b4028223 | |||
| bac2a31782 | |||
| 3d20e6bf91 | |||
| 9337216092 | |||
| cd35d0ca55 | |||
| 6d591b98b8 | |||
| 486381ab9d | |||
| c619b4debb | |||
| 383a3085ec | |||
| 5a3bb3d817 | |||
| d1ba758887 | |||
| 6fef149997 | |||
| aad3b16ff2 | |||
| 819ba14523 | |||
| d3c25ca16a | |||
| 99a65f72ac | |||
| be9080d392 | |||
| f32d991413 | |||
| 94b68ebefa | |||
| 0450eaaceb | |||
| 408c5ce088 | |||
| d936629ead | |||
| 9bd1a66208 | |||
| 1a0c029ee8 | |||
| e7be228703 | |||
| 0ab4dc972f | |||
| 5f1ca8954f | |||
| 3ec1b033ce | |||
| 0caf27af9b | |||
| bd67e14fa4 | |||
| 494c3b542c | |||
| 8e0884eb64 | |||
| 73c4dc4ac8 | |||
| d77274058d | |||
| 0c8460419b | |||
| eabb589390 | |||
| 62f860ae93 | |||
| 605aae873c | |||
| 62e9ee5b05 | |||
| d686f5d143 | |||
| 3922f232ae | |||
| 6735b438d3 | |||
| fb1e30ab32 | |||
| 0ec06edb57 | |||
| 2a52673c56 | |||
| cc20d00d8a | |||
| 3d9201f7dc | |||
| 176732a6c0 | |||
| 39815b3af3 | |||
| bcce517089 | |||
| a4b50d0d97 | |||
| 2a124e7588 | |||
| a85556ab5b | |||
| cef93d6084 | |||
| 207e257778 | |||
| 12203daa22 | |||
| 27f8e9248d | |||
| 51384dc984 | |||
| bc76cbb5ad | |||
| 5a1ca83f6d | |||
| c9f585f808 | |||
| 9f559e1dbf | |||
| e458bca1a7 | |||
| 43d2226019 | |||
| 122bc34701 | |||
| e01358e268 | |||
| 847c84c3e6 | |||
| b11cac4328 | |||
| f617b06109 | |||
| 345ccf3369 | |||
| d111b8af62 | |||
| 8f964c5c49 | |||
| b6f3f4538f | |||
| f6dd30d5d8 | |||
| af8b79f849 | |||
| 0cfccc423b | |||
| f9a5d582d4 | |||
| 684e00d594 | |||
| 3cd2df0b50 | |||
| 02197f4ee6 | |||
| f9049a3fea | |||
| 462bddc271 | |||
| f79000cf39 | |||
| 1d95273f4d | |||
| 6c4579f434 | |||
| 4ef56ade21 | |||
| 7c1369d6e9 | |||
| 533d54b106 | |||
| cce0ca6560 | |||
| e87ce2593c | |||
| 431dc8b667 | |||
| 5caf614bf7 | |||
| ecf9703570 | |||
| e7641393a0 | |||
| 2201f6ff5a | |||
| 557e1ce293 | |||
| cbe9a6b9a5 | |||
| 9bbcb038d4 | |||
| 3602204420 | |||
| 6f485e5589 | |||
| 2f46a3dfaf | |||
| 267845bba3 | |||
| 6f33a8eebf | |||
| b0d2b09a2e | |||
| c699b6b16b | |||
| 1789bac28d | |||
| 60c05f615f | |||
| bd84523671 | |||
| eb21b9c770 | |||
| ff3ac11afb | |||
| 1ef8c3d02b | |||
| 2ebaf6279b | |||
| a5ee40e184 | |||
| b17a97eac7 | |||
| 63908bfaf6 | |||
| 3f9a419a19 | |||
| bae691e33e | |||
| 91539346ee | |||
| 4842ca81b3 | |||
| 9c77a1a4ab | |||
| 4af284be42 | |||
| 6aec68bb3c | |||
| d4e2b0834f | |||
| 24c2702f96 | |||
| 4691fc9bad | |||
| 8c6c60b6f1 | |||
| bc482407fe | |||
| ff05593db8 | |||
| 3d304d9374 | |||
| 1734f0c2f1 | |||
| 1b25e5df85 | |||
| ea8eb32b0b | |||
| 614a1f95de | |||
| d36bc28914 | |||
| deec48d7c1 | |||
| b318ec8d39 | |||
| b4b0e2befc | |||
| 51d3fe13da | |||
| 58220216d3 | |||
| cac75cca42 | |||
| 47f247907f | |||
| 81e04b7322 | |||
| 56a964b700 | |||
| 458341d79f | |||
| d1d212b075 | |||
| 59c9996489 | |||
| bf8221a2f1 | |||
| 787a11a040 | |||
| 05d114be2f | |||
| 3c04a7dbac | |||
| 1673e1148d | |||
| de416b035d | |||
| 08aaf2989d | |||
| a50964060c | |||
| 54b6108719 | |||
| 585e5c254a | |||
| 477808c9bb | |||
| 6c58a2b688 | |||
| c9854bf30f | |||
| cfed4bbd41 | |||
| 2dd6485b0e | |||
| bf1dd5b860 | |||
| 765c373f7d | |||
| 32d752e82b | |||
| 4623e438fa | |||
| 8a44ff396f | |||
| 086d7ecae4 | |||
| d6adebb711 | |||
| 8325fe7b3c | |||
| 7cf83f878b | |||
| 597ba26424 | |||
| 7bccea47f5 | |||
| 5770116779 | |||
| 0679144f69 | |||
| c9fd288b52 | |||
| 9ae449fcfd | |||
| 249f67f796 | |||
| e91c0bb554 | |||
| 5e306d9598 | |||
| 42ebc81cbb | |||
| f624c37db5 | |||
| 22b6f42936 | |||
| 760c66cac8 | |||
| 1d91e9da03 | |||
| 7eac409ec6 | |||
| 128558420c | |||
| ca3e664690 | |||
| 7eb37462d7 | |||
| 31e02c2d39 | |||
| 003a68b9b8 | |||
| f418708389 | |||
| d23a564035 | |||
| 7fe586244c | |||
| f1a597cdc6 | |||
| 9b68c8f58c | |||
| be5b57ea71 | |||
| 425c82f26d | |||
| 942421c1fb | |||
| b1184f6928 | |||
| ffeb6d1b98 | |||
| b2718b56b7 | |||
| 455f834957 | |||
| 8a14c80ff8 | |||
| e268e733c7 | |||
| 8933a8dfb3 | |||
| 9796cc525c | |||
| cdbf9a9190 | |||
| c26792292d | |||
| 4698e0ee03 | |||
| 68afcb2f4b | |||
| e8f61e46e3 | |||
| 317bb2b7c8 | |||
| d1b3c6b468 | |||
| b35eccc984 | |||
| a780c92047 | |||
| 5fc65698ba | |||
| c923b5ec4c | |||
| 609b2b9a7b | |||
| a257278004 | |||
| 273daed634 | |||
| a6862d8c58 | |||
| 323e5b4ea7 | |||
| 89217a5308 | |||
| a45e995d2f | |||
| 8700b4c8ca | |||
| 1f7f5fb488 | |||
| afde8ee864 | |||
| 3884c26b15 | |||
| 24dce7eae9 | |||
| 1db4e9b771 | |||
| b2ed7eae00 | |||
| 3169fd67e8 | |||
| 773ceb1396 | |||
| 8c62ee1720 | |||
| 5fa1f52922 | |||
| d2180c010c | |||
| b73df7b2ce | |||
| 971f737846 | |||
| a393353907 | |||
| 751f609554 | |||
| e8cd5c6552 | |||
| 86e387b270 | |||
| 32f15aa621 | |||
| bfc889a9e5 | |||
| bd907625a8 | |||
| 60004926d7 | |||
| ac751dfd1a | |||
| 6828eee17f | |||
| 19c97f397b | |||
| 0167a8bdd8 | |||
| 93e5044603 | |||
| 024d930677 | |||
| 98873446a8 | |||
| 5318b7a406 | |||
| 4a6c3cbcd2 | |||
| ac0a39c202 | |||
| 88d39345a5 | |||
| 7aa9cb07b2 | |||
| ef30c8d28d | |||
| 2727f690b4 | |||
| 5945c24301 | |||
| 7b6aff1f95 | |||
| cb0fe3aadd | |||
| 4f9d69f9c2 | |||
| c18aeabe06 | |||
| 550742323a | |||
| c71f789a08 | |||
| a9b4b195bf | |||
| 52e8177f42 | |||
| b0743efc48 | |||
| 6dfd652dac | |||
| 3f93cb2e6d | |||
| 8f7b9b7f19 | |||
| abff89ab6b | |||
| d4f03f743a | |||
| c3714f6651 | |||
| 9b4d0ddf2f | |||
| 2c9ac2f549 | |||
| c1292de2a0 | |||
| 21d5e4cd29 | |||
| a9495a3e15 | |||
| bff5b3d765 | |||
| a4ff37eecc | |||
| 460209f486 | |||
| 96c68c86a4 | |||
| 8b152fdff8 | |||
| 25c9a52873 | |||
| 44302d903c | |||
| c7b8668609 | |||
| 7d60df6266 | |||
| b7f898a5e5 | |||
| 04c4dbe4b8 | |||
| 8d04c494df | |||
| a6aadf76f3 | |||
| a685ef97bf | |||
| d46c29689f | |||
| 65ce07395b | |||
| cc1542fe95 | |||
| b70d57d878 | |||
| 5aa857362b | |||
| c92fc34051 | |||
| b01e66f12a | |||
| a88d20784a | |||
| 63486ed6cf | |||
| 3ceec773f2 | |||
| 817fa56ec4 | |||
| 088fb21a90 | |||
| 79c755a469 | |||
| a091d3f011 | |||
| c7c01a5d7c | |||
| cdc0f48973 | |||
| e884f6b962 | |||
| 485a9bea71 | |||
| f3c3b667ca | |||
| 3b0c4f31b6 | |||
| 5e54600766 | |||
| c3e54f69b7 | |||
| c4022d1c9b | |||
| 6e13a78a24 | |||
| c7cacd9727 | |||
| a77110f704 | |||
| 83a6069de5 | |||
| e9a1890e54 | |||
| bf928aa06e | |||
| b2dc50590c | |||
| 229e53ac32 | |||
| 51e8a47615 | |||
| e80b58a412 | |||
| 48ced8b079 | |||
| c07e2aea1e | |||
| f3194aa30e | |||
| cb3e4cd951 | |||
| f5d8d029ea | |||
| 7c946c4126 | |||
| ded4ea0d69 | |||
| c180c549fe | |||
| 1f30f1168f | |||
| 9446f15922 | |||
| e13b2c9cd9 | |||
| e9e14e0292 | |||
| added19656 | |||
| 4fa3c4d479 | |||
| 690738de9a | |||
| cb31d27e68 | |||
| e6658df123 | |||
| 0b7154a14c | |||
| 02c1838de5 | |||
| fc455fceb8 | |||
| 8d40cdd234 | |||
| 40145c669a | |||
| 34d2fc233f | |||
| 670ec0381a | |||
| 2128f255fe | |||
| b717bd9a9a | |||
| 8aab9311f5 | |||
| ff3e16ea67 | |||
| 1de039c315 | |||
| d05e1786d7 | |||
| e34b5a7372 | |||
| a1b3d1b508 | |||
| 1ebccdf420 | |||
| e5f674509c | |||
| 197a4ae5c0 | |||
| 64d2dcf39c | |||
| caf54c736b | |||
| 423c2cce28 | |||
| a1af51efcb | |||
| ffc1bf9c58 | |||
| a54bfdb342 | |||
| 03861d2dbd | |||
| 8c080da6bf | |||
| a8c98056b6 | |||
| 78e663f955 | |||
| 70546a5039 | |||
| 30f78b33cb | |||
| 712e8c1f16 | |||
| 933dfdfb53 | |||
| 9ce86b029f | |||
| 13580cc69d | |||
| a7dee0002d | |||
| c84b2df3fa | |||
| d9471a8684 | |||
| ef630c2272 | |||
| e188c71652 | |||
| 910260c2c8 | |||
| 22752abc38 | |||
| 92bc3a5d64 | |||
| 1383752cc1 | |||
| 66af16fb81 | |||
| fc019d7b46 | |||
| ac4f0fcb12 | |||
| a6c2bc663d | |||
| e62ffa02e9 | |||
| a003600839 | |||
| ea73feb06d | |||
| 3bdf69e1b7 | |||
| 590fe78bd1 | |||
| 76187ba0e7 | |||
| 5eba375f4d | |||
| 8fa6a8251f | |||
| 75fa046f30 | |||
| 08a8cd1430 | |||
| 3afbb78a39 | |||
| fca6ccd816 | |||
| 8d351822c1 | |||
| 7d274a31fe | |||
| e36dde0d25 | |||
| 51cc6e5ae5 | |||
| 28d911c617 | |||
| b1e9fe58fb | |||
| 16ba014ade | |||
| e9d5a20c1a | |||
| 6e0036f9c4 | |||
| d7e189aa1c | |||
| ea2b444fb2 | |||
| cd1efaf26e | |||
| e47f0e5d43 | |||
| 5284d37984 | |||
| 1bf6fa0e4d | |||
| fc294c82f1 | |||
| 7b1dc49dda | |||
| d15ddeea24 | |||
| eaac213859 | |||
| 02c1460351 | |||
| 2fff35b7d9 | |||
| c5b9072bde | |||
| 8a570e912a | |||
| 1dcc40afb8 | |||
| c2092f8035 | |||
| 886c4b411e | |||
| 8888fd40cd | |||
| 31cd01bccf | |||
| c59b221004 | |||
| cb3cc3e74c | |||
| 9e90015fcc | |||
| 95e0517056 | |||
| 2b2f47915f | |||
| 9acd178ce1 | |||
| f381f80184 | |||
| c83be61343 | |||
| f6e49d31ec | |||
| cc0429a362 | |||
| b35901d94c | |||
| c0df1a23f4 | |||
| 495619af2c | |||
| 72dfadf106 | |||
| 5825909e45 | |||
| d3f6d87ee0 | |||
| c4f4c5ddad | |||
| 2921d7ca27 | |||
| 2021cbc988 | |||
| e9e29861b2 | |||
| 8e6da36059 | |||
| 5e1469e12e | |||
| bd7465f8b1 | |||
| 570397a616 | |||
| b3b5f1daef | |||
| 25ec3ae47c | |||
| 5ba5e3da58 | |||
| 9296c14ca0 | |||
| 310b5d3422 | |||
| 1c5967112e | |||
| 49a3d8ee71 | |||
| cf8b61e8d9 | |||
| 967ae5723e | |||
| 03421acf2f | |||
| d43896cc5a | |||
| b283124a2f | |||
| 8c39be01f8 | |||
| fb2bd4ccd8 | |||
| 5b826ffc45 | |||
| 0b2ab365d3 | |||
| 93fc54992c | |||
| 60b7326deb | |||
| d6e6139244 | |||
| 0892911ddc | |||
| 30267ac50c | |||
| ffef0ef31d | |||
| fc047087ce | |||
| 81d4966535 | |||
| 004d63fda1 | |||
| 23e2dbb354 | |||
| 28e9899b97 | |||
| 7441d41550 |
@@ -0,0 +1 @@
|
||||
commands/code/apply-issue-main.md
|
||||
@@ -46,4 +46,4 @@ Focus on discrepancies and gaps:
|
||||
Report differences in wire types, adapter logic, parser handling, or dialect-specific quirks.
|
||||
Prioritize new capabilities that improve user experience (reasoning visibility, better tool use, etc.).
|
||||
|
||||
When making changes, add comments with date: `// [OpenRouter, 2025-MM-DD]: explanation`
|
||||
When making changes, add comments with date: `// [OpenRouter, 2026-MM-DD]: explanation`
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Sync xAI Responses API implementation with latest upstream documentation
|
||||
argument-hint: specific feature to check
|
||||
---
|
||||
|
||||
Review the xAI Responses API implementation:
|
||||
- xAI wire types: `src/modules/aix/server/dispatch/wiretypes/xai.wiretypes.ts` (xAI-specific request schema, tools)
|
||||
- Request adapter: `src/modules/aix/server/dispatch/chatGenerate/adapters/xai.responsesCreate.ts` (AIX → xAI Responses API)
|
||||
- Response parser: `src/modules/aix/server/dispatch/chatGenerate/parsers/openai.responses.parser.ts` (shared with OpenAI Responses)
|
||||
- Dispatch routing: `src/modules/aix/server/dispatch/chatGenerate/chatGenerate.dispatch.ts` (dialect='xai' routing)
|
||||
- OpenAI shared types: `src/modules/aix/server/dispatch/wiretypes/openai.wiretypes.ts` (InputItem/OutputItem schemas reused by xAI)
|
||||
|
||||
IMPORTANT context:
|
||||
- We use ONLY the xAI Responses API (`POST /v1/responses`). We do NOT use the Chat Completions API (`/v1/chat/completions`) for xAI anymore.
|
||||
- xAI's Responses API is similar to OpenAI's but has key differences - the skill should find what changed since our last sync.
|
||||
- Response streaming/parsing reuses the OpenAI Responses parser since the format is compatible.
|
||||
- We do NOT implement: Files API, Collections Search, Remote MCP tools, Voice Agent API, Image/Video generation, Batch API, or Deferred Completions.
|
||||
|
||||
Then take a look at the newest API information available. Try these sources, and be creative if some are blocked:
|
||||
|
||||
**Primary Sources (guide pages work well with WebFetch despite being JS-rendered):**
|
||||
- Responses API Guide: https://docs.x.ai/docs/guides/chat
|
||||
- Stateful Responses: https://docs.x.ai/docs/guides/responses-api
|
||||
- Tools Overview: https://docs.x.ai/docs/guides/tools/overview
|
||||
- Search Tools (web_search, x_search): https://docs.x.ai/docs/guides/tools/search-tools
|
||||
- Code Execution Tool: https://docs.x.ai/docs/guides/tools/code-execution-tool
|
||||
- Function Calling: https://docs.x.ai/docs/guides/function-calling
|
||||
- Streaming: https://docs.x.ai/docs/guides/streaming-response
|
||||
- Reasoning: https://docs.x.ai/docs/guides/reasoning
|
||||
- Structured Outputs: https://docs.x.ai/docs/guides/structured-outputs
|
||||
- Models & Pricing: https://docs.x.ai/developers/models
|
||||
- Release Notes: https://docs.x.ai/developers/release-notes
|
||||
- API Reference: https://docs.x.ai/developers/api-reference#create-new-response
|
||||
|
||||
**Alternative Sources if primary blocked:**
|
||||
- xAI Python SDK: https://github.com/xai-org/xai-sdk-python
|
||||
- Web Search for "xai grok api changelog 2026" or "xai responses api new features"
|
||||
|
||||
**If all blocked:** Explain what you attempted and ask user to provide documentation manually.
|
||||
|
||||
$ARGUMENTS
|
||||
Check carefully for discrepancies between our implementation and the current API docs:
|
||||
|
||||
1. **Request fields**: Compare `XAIWire_API_Responses.Request_schema` against current docs - any new, changed, or deprecated parameters?
|
||||
2. **Tool definitions**: Compare `XAIWire_Responses_Tools` - any new parameters on web_search/x_search/code_interpreter? Any new hosted tool types?
|
||||
3. **Input/Output item types**: Any xAI-specific output items not handled by the shared OpenAI parser (e.g., x_search_call, web_search_call, code_interpreter_call)?
|
||||
4. **Streaming events**: Any xAI-specific SSE event types beyond what the OpenAI Responses parser handles?
|
||||
5. **Response shape**: Usage reporting differences, new fields in the response object?
|
||||
6. **Adapter logic**: Message role mapping, content type handling, system message approach - still correct?
|
||||
7. **Include options**: Any new values for the `include` array?
|
||||
8. **Reasoning config**: Which models support it and with what values?
|
||||
|
||||
Prioritize breaking changes and new capabilities that would improve the user experience.
|
||||
When making changes, add comments with date: `// [xAI, 2026-MM-DD]: explanation`
|
||||
|
||||
**Self-update this skill**: After completing the sync, if your research reveals that assumptions in THIS skill file (`.claude/commands/aix/sync-xai-api.md`) are wrong or outdated - e.g., new APIs we now implement, new tool types added, URLs moved, file paths changed - update this skill file to stay accurate for next time.
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
description: Search git history for commits that introduce or remove an exact string, within a commit range
|
||||
argument-hint: "[search-string] [ancestor-commit]"
|
||||
allowed-tools: Bash(git *)
|
||||
---
|
||||
|
||||
Search git history using `git log -S` (pickaxe) to find commits that add or remove an exact string.
|
||||
This repo has 7000+ commits, so pickaxe searches can take 30-60+ seconds - this is expected.
|
||||
|
||||
## Parameters
|
||||
|
||||
- `$0` - The exact string to search for in file contents (not commit messages). Examples: `getLabsSUDO`, `EXPERIMENT_ON_SUDO`, `myFunctionName`
|
||||
- `$1` - A commit hash or unique commit message substring to identify the start of the range. Examples: `5af80b96a8`, `"Sudo Mode": 10-click`
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
/code:grep-history EXPERIMENT_ON_SUDO "Sudo Mode": 10-click
|
||||
```
|
||||
|
||||
This searches all commits between the `"Sudo Mode": 10-click` commit and HEAD for any that add or remove the string `EXPERIMENT_ON_SUDO` in file contents.
|
||||
|
||||
## Procedure
|
||||
|
||||
### Step 1: Resolve the ancestor commit
|
||||
|
||||
If `$1` looks like a commit hash (hex string), use it directly.
|
||||
Otherwise, search for it by message, restricting to ancestors of HEAD:
|
||||
|
||||
```bash
|
||||
git log --oneline --grep='$1' HEAD | head -5
|
||||
```
|
||||
|
||||
This only walks commits reachable from HEAD, so every result is a guaranteed ancestor - no verification loop needed.
|
||||
|
||||
If multiple results, pick the oldest (last listed) since it represents the earliest matching commit.
|
||||
If none, report the error and stop.
|
||||
|
||||
### Step 2: Run pickaxe search
|
||||
|
||||
```bash
|
||||
git log -S "$0" --oneline <resolved_ancestor>..HEAD
|
||||
```
|
||||
|
||||
This finds commits where the count of `$0` in the codebase changes (i.e., it was added or removed).
|
||||
This can be slow on 7000+ commits - wait for it.
|
||||
|
||||
### Step 3: Check endpoints
|
||||
|
||||
Also check whether the string exists at HEAD and at the ancestor commit:
|
||||
|
||||
```bash
|
||||
git grep -l "$0" HEAD 2>/dev/null || echo "(not found at HEAD)"
|
||||
git grep -l "$0" <resolved_ancestor> 2>/dev/null || echo "(not found at ancestor)"
|
||||
```
|
||||
|
||||
### Step 4: Report
|
||||
|
||||
Present results concisely:
|
||||
- Number of commits found (or "none")
|
||||
- List of matching commits (hash + subject line)
|
||||
- Whether the string exists at HEAD and/or at the ancestor
|
||||
- If found, suggest next steps (e.g., `git show <hash>` to inspect specific commits)
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
description: Review in-flight changes for coherence, completeness, and quality
|
||||
---
|
||||
|
||||
Review the current in-flight changes in the big-agi-private repository (dev branch, continuously rebased ~1800 commits on top of main).
|
||||
|
||||
**Step 1: Scope and read**
|
||||
|
||||
`git diff --stat` + `git status` for breadth. Then full `git diff` (if empty: `git diff --cached`, then `git diff HEAD~1`).
|
||||
For every file in the diff, read surrounding context in the actual source file - the diff alone hides bugs in adjacent untouched code.
|
||||
|
||||
**Step 2: Reverse-engineer the intent**
|
||||
|
||||
From the diff, determine the **what**, **how**, and **why**. Present this concisely so the author can confirm or correct,
|
||||
but don't stop here, continue to the full review in the same response.
|
||||
|
||||
**Step 3: Validate**
|
||||
|
||||
Run `tsc --noEmit --pretty` and `npm run lint` (in parallel). Report any errors with the review.
|
||||
If the diff removes/renames identifiers, grep the codebase for stale references to the OLD names. This catches broken guards, stale imports, and incomplete migrations.
|
||||
|
||||
**Step 4: Deep review**
|
||||
|
||||
Evaluate every file in the diff.
|
||||
Leave no rocks unturned - correctness, coherence, completeness, excess, generalization, maintenance burden,
|
||||
codebase consistency, etc.
|
||||
|
||||
**Step 5: Prioritized next steps**
|
||||
|
||||
Think about what happens when the next developer touches this code.
|
||||
Rank findings by severity (bug > correctness > cleanup > cosmetic). Be specific about what to change and where.
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
description: Show a hierarchical progress tree of the current conversation thread
|
||||
---
|
||||
|
||||
Analyze this conversation thread and produce a **hierarchical progress tree** - a vertical breadcrumb of the chat and actions from the very start to now.
|
||||
|
||||
**Format:**
|
||||
|
||||
A tree, where every rabbithole that was taken adds a level.
|
||||
|
||||
```
|
||||
[ ] Brief initial phase/ask/goal description
|
||||
[x] Specific thing done or decided - "user quote if relevant"
|
||||
[x] Another step
|
||||
[ ] Sub-phase/rabbithole/etc
|
||||
[x] Done step (if important)
|
||||
[ ] Sub-sub-phase
|
||||
[ ] Current step doing <-- HERE
|
||||
[ ] Next step since this sub-sub-phase was broken out
|
||||
|
||||
[ ] Remaining step
|
||||
[ ] ...
|
||||
|
||||
[ ] Missing, back to the main goal
|
||||
[ ] ...
|
||||
|
||||
### What do we rewind the rabbithole to (once the current level is complete)?
|
||||
...
|
||||
|
||||
### What's up (towards user value) and down (towards deeper code levels) the rabbithole?
|
||||
...
|
||||
|
||||
### What's a good hyphenated title for this chat?
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `[x]` done, `[ ]` not done. Parent is done only when ALL children on the next level are `[x]`
|
||||
- Each node: a few words, specific. Quote the user briefly when it captures the intent
|
||||
- Group by logical phases or rabbitholes (when descending to a deeper level of implementation or going off for a temporary tangent or sub-quest), not by messages
|
||||
- Earlier levels that are fully completed don't need to be expanded in subtasks
|
||||
- Root nodes/completed nodes need to show what was "wanted" from them, not being checked because they are shown as earlier phases (i.e. upper hierarchy contains more)
|
||||
- Some earlier sub-phases or even levels of rabbitholes can be marked as done as indented [x] below each other (do not add non-major bullets on already completed nodes)
|
||||
- Insert newlines in between large groups of items
|
||||
- Decisions: state what was chosen, not the alternatives
|
||||
- If a former phase produced no code change or decision, omit
|
||||
- Very important to insert incomplete `[ ]` items for things that wre mentioned and are likely useful but mentioned at higher levels of the rabbithole so they must come after, when unwinding the stack
|
||||
- Keep it short, tight (min 0 max item count below *ONE QUARTER the user messages*). This is a navigation aid, not a transcript
|
||||
|
||||
It's important for this to represent a high-level sequence of important actions and turns and pivots and rabbiholes, all focuses on trying to solve something.
|
||||
|
||||
First think through it looking at all the chat from the back to the front, then front to back, user requests, and understand the main storybeats. This is useful especially to remove already done leaves that don't add much if shown.
|
||||
So think about the full list, so you have it all in front of you when you do the last pass to show it to me.
|
||||
It's important to see the progress of what we were doing (e.g. see that we set out to do something at the beginning, but a few items of those are still incomplete, also because we took 2 detours to fix more things in the meantime...).
|
||||
|
||||
At the end anser the questions in the Format, with brief bullet points.
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
description: Sync LLM parameter options between full model dialog and chat side panel
|
||||
---
|
||||
|
||||
Audit and sync LLM parameter configurations between the two UI editors. Goal: identical `value` fields in option arrays + equivalent onChange logic. Labels/descriptions can differ for UI space.
|
||||
|
||||
**Files to Compare:**
|
||||
1. **Full Model Dialog**: `src/modules/llms/models-modal/LLMParametersEditor.tsx` (main branch)
|
||||
2. **Chat Side Panel**: `src/apps/chat/components/layout-panel/ChatPanelModelParameters.tsx` (main derived branches only)
|
||||
|
||||
**Reference Documentation:**
|
||||
- Parameter system: `kb/systems/LLM-parameters-system.md`
|
||||
- Parameter registry: `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Task: Perform a comprehensive audit**
|
||||
|
||||
1. **Read both files** and extract all option arrays (e.g., `_reasoningEffortOptions`, `_antEffortOptions`, `_geminiThinkingLevelOptions`, etc.)
|
||||
|
||||
2. **Check for missing parameters:**
|
||||
- Parameters handled in `LLMParametersEditor.tsx` but NOT in `ChatPanelModelParameters.tsx`
|
||||
- Parameters in `ChatPanelModelParameters.tsx`'s `_interestingParameters` array but missing UI controls
|
||||
- Note: The side panel intentionally shows only "interesting" parameters - focus on those listed in `_interestingParameters`
|
||||
|
||||
3. **Check for value mismatches** between corresponding option arrays:
|
||||
- Different number of options (e.g., 3 vs 4 options)
|
||||
- Same label but different `value` (this causes the bug in issue #926)
|
||||
- Different labels for the same `value`
|
||||
- Missing `_UNSPECIFIED`/Default option in one but not the other
|
||||
|
||||
4. **Check onChange handler consistency:**
|
||||
- Both should remove parameter on `_UNSPECIFIED` selection
|
||||
- Both should set explicit values the same way
|
||||
- Watch for conditions like `value === 'high'` that may differ
|
||||
|
||||
**Output Format:**
|
||||
|
||||
```
|
||||
## Parameter Sync Audit Report
|
||||
|
||||
### Missing Parameters
|
||||
- [ ] `llmVndXyz` - In full dialog, missing from side panel
|
||||
|
||||
### Value Mismatches
|
||||
- [ ] `_xyzOptions`:
|
||||
- Full dialog: [values...]
|
||||
- Side panel: [values...]
|
||||
- Issue: [description]
|
||||
|
||||
### Handler Inconsistencies
|
||||
- [ ] `llmVndXyz` onChange differs: [explanation]
|
||||
|
||||
### Recommended Fixes
|
||||
1. [Specific fix with code snippet if needed]
|
||||
```
|
||||
|
||||
**Fix Direction:** Full dialog is source of truth. Update side panel to match its values when mismatched.
|
||||
|
||||
**Notes:**
|
||||
- Side panel uses shorter descriptions (space-constrained) - that's fine
|
||||
- Variable names may differ (e.g., `_anthropicEffortOptions` vs `_antEffortOptions`) - that's fine, but same is better
|
||||
- `value` fields must be identical sets
|
||||
- `_UNSPECIFIED` must mean the same thing in both
|
||||
- onChange: remove on `_UNSPECIFIED`, set explicit value otherwise
|
||||
@@ -4,17 +4,46 @@ description: Update Anthropic model definitions with latest pricing and capabili
|
||||
|
||||
Update `src/modules/llms/server/anthropic/anthropic.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
Reference files (for context only, do not modify):
|
||||
- `src/modules/llms/server/llm.server.types.ts`
|
||||
- `src/modules/llms/server/models.mappings.ts`
|
||||
- `src/common/stores/llms/llms.parameters.ts`
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://docs.claude.com/en/docs/about-claude/models/overview
|
||||
- Pricing: https://claude.com/pricing#api
|
||||
- Deprecations: https://docs.claude.com/en/docs/about-claude/model-deprecations
|
||||
**Workflow: Start with recent changes, then verify the full model list.**
|
||||
|
||||
**Fallbacks if blocked:** Check Anthropic TypeScript SDK at https://github.com/anthropics/anthropic-sdk-typescript, search "anthropic models latest pricing", "anthropic latest models", or search GitHub for latest model prices and context windows
|
||||
**Primary Sources (append `.md` to any path for clean markdown):**
|
||||
1. Recent changes: https://platform.claude.com/docs/en/release-notes/overview.md
|
||||
2. Models & IDs: https://platform.claude.com/docs/en/about-claude/models/overview.md
|
||||
3. Pricing (base, cache, batch, long context): https://platform.claude.com/docs/en/about-claude/pricing.md
|
||||
4. Deprecations & retirement dates: https://platform.claude.com/docs/en/about-claude/model-deprecations.md
|
||||
|
||||
**Discovering feature docs:** The release notes and models overview markdown
|
||||
contain inline links to feature-specific pages (thinking modes, effort,
|
||||
context windows, what's-new pages, etc.). When a new capability is
|
||||
referenced, follow those links - append `.md` to get markdown. Examples of
|
||||
pages you might discover this way:
|
||||
- `about-claude/models/whats-new-claude-*` - per-generation changes
|
||||
- `build-with-claude/extended-thinking` - thinking budget configuration
|
||||
- `build-with-claude/effort` - effort parameter levels
|
||||
- `build-with-claude/adaptive-thinking` - adaptive thinking mode
|
||||
|
||||
**Fallback web pages** (crawl if `.md` paths break or structure changes):
|
||||
- https://platform.claude.com/docs/en/about-claude/models/overview
|
||||
- https://platform.claude.com/docs/en/about-claude/pricing
|
||||
- https://platform.claude.com/docs/en/release-notes/overview
|
||||
- https://claude.com/pricing
|
||||
|
||||
**Fallbacks if blocked:** Check the Anthropic TypeScript SDK at
|
||||
https://github.com/anthropics/anthropic-sdk-typescript, or web-search
|
||||
for "anthropic models latest pricing" / "anthropic latest models".
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- For new models: check which `parameterSpecs` are needed (thinking mode,
|
||||
effort levels, 1M context, skills, web tools) by reading the linked
|
||||
feature docs and comparing with existing model entries
|
||||
- When thinking/effort semantics change between generations
|
||||
(e.g. adaptive vs manual thinking), document in comments
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -0,0 +1,91 @@
|
||||
---
|
||||
description: Update/validate dynamic vendor model parsers (OpenRouter, TogetherAI, Alibaba, Azure, Novita, ChutesAI, FireworksAI, TLUS, LM Studio, LocalAI, FastAPI)
|
||||
---
|
||||
|
||||
Validate that the dynamic (API-fetched) vendor model parsers are up to date and not silently broken.
|
||||
|
||||
These vendors do NOT have hardcoded model lists - they fetch models from APIs at runtime. But their parsers, filters, heuristic detection, and capability mapping can break if upstream APIs change. This skill covers all dynamic vendors NOT covered by the other `llms:update-models-{vendor}` skills.
|
||||
|
||||
## Vendors to Validate
|
||||
|
||||
### High Risk
|
||||
|
||||
**OpenRouter** - `src/modules/llms/server/openai/models/openrouter.models.ts`
|
||||
- Most complex parser. Vendor-specific parameter inheritance (Anthropic thinking variants, Gemini thinking/image, OpenAI reasoning effort, xAI/DeepSeek reasoning).
|
||||
- Hardcoded family ordering list (lines ~24-37) - check if new leading vendors are missing.
|
||||
- Hardcoded old/deprecated model hiding list (lines ~39-49) - check if stale.
|
||||
- Cache pricing detection (Anthropic-style vs OpenAI-style) - verify format still valid.
|
||||
- Variant injection for Anthropic thinking/non-thinking - verify still correct.
|
||||
- Reference: https://openrouter.ai/docs/models
|
||||
|
||||
### Medium Risk
|
||||
|
||||
**Novita** - `src/modules/llms/server/openai/models/novita.models.ts`
|
||||
- Features array mapping (`function-calling`, `reasoning`, `structured-outputs`) and input modalities parsing.
|
||||
- Pricing unit conversion (hundredths of cent per million → dollars per 1K).
|
||||
- Hostname heuristic: `novita.ai`.
|
||||
|
||||
**ChutesAI** - `src/modules/llms/server/openai/models/chutesai.models.ts`
|
||||
- Custom `max_model_len` field for context window.
|
||||
- Assumes all models support Vision + Functions (aggressive).
|
||||
- Hostname heuristic: `.chutes.ai`.
|
||||
|
||||
**FireworksAI** - `src/modules/llms/server/openai/models/fireworksai.models.ts`
|
||||
- Relies on provider capability flags: `supports_chat`, `supports_image_input`, `supports_tools`.
|
||||
- Hostname heuristic: `fireworks.ai/`.
|
||||
|
||||
**TogetherAI** - `src/modules/llms/server/openai/models/together.models.ts`
|
||||
- Type allow-list (`type: 'chat'`), vision detection by string match.
|
||||
- Custom wire schema with pricing conversion.
|
||||
|
||||
**TLUS** - `src/modules/llms/server/openai/models/tlusapi.models.ts`
|
||||
- Detected by response structure (`total_models`, `free_models`, `pro_models` fields).
|
||||
- Capability enum mapping (`text`, `vision`, `audio`, `tool-calling`, `reasoning`, `websearch`).
|
||||
- Tier-based pricing (`free` vs paid).
|
||||
|
||||
**Alibaba** - `src/modules/llms/server/openai/models/alibaba.models.ts`
|
||||
- Model list was cleared (dynamic-only). Exclusion patterns for non-chat models.
|
||||
- Assumes 128K context and Vision+Functions for all models (overly permissive).
|
||||
- Check if hardcoded data should be restored now that naming has stabilized.
|
||||
|
||||
### Low Risk (local/generic - validate only if issues reported)
|
||||
|
||||
**Azure** - `src/modules/llms/server/openai/models/azure.models.ts`
|
||||
- Custom deployments API, not `/v1/models`. User-specific. Deployment name fallback logic.
|
||||
|
||||
**LM Studio** - `src/modules/llms/server/openai/models/lmstudio.models.ts`
|
||||
- Local service, native API (`/api/v1/models`). GGUF metadata parsing, capability flags.
|
||||
|
||||
**LocalAI** - `src/modules/llms/server/openai/models/localai.models.ts`
|
||||
- Local service. String-based hide list, vision/reasoning detection by name pattern.
|
||||
|
||||
**FastAPI** - `src/modules/llms/server/openai/models/fastapi.models.ts`
|
||||
- Generic passthrough. Detected by `owned_by === 'fastchat'`. Minimal parsing.
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
For each vendor (prioritize High > Medium > Low):
|
||||
|
||||
1. **Read the parser file** and check for:
|
||||
- Deny/allow lists that may be stale (new model families missing)
|
||||
- Capability assumptions that may be wrong (e.g. "all models support vision")
|
||||
- Field names that may have changed upstream
|
||||
- Pricing conversion math that may use wrong units
|
||||
|
||||
2. **Check upstream docs** (where available) for:
|
||||
- API response schema changes
|
||||
- New model types or capability fields
|
||||
- Deprecated fields
|
||||
|
||||
3. **Cross-reference with OpenRouter** (aggregator):
|
||||
- OpenRouter surfaces models from many of these vendors
|
||||
- If OpenRouter shows capabilities that a vendor's parser misses, the parser is stale
|
||||
|
||||
4. **Fix issues found** - update parsers, filters, deny lists as needed.
|
||||
|
||||
5. Run `tsc --noEmit` after changes.
|
||||
|
||||
**Important:**
|
||||
- Do NOT convert dynamic vendors to hardcoded lists - the dynamic approach is intentional
|
||||
- Focus on parser correctness, not model coverage
|
||||
- Flag any vendor whose API response format seems to have changed substantially
|
||||
@@ -6,11 +6,11 @@ Update `src/modules/llms/server/openai/models/groq.models.ts` with latest model
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models: https://console.groq.com/docs/models
|
||||
**Primary Source:**
|
||||
- Fetch https://console.groq.com/docs/models.md directly (markdown format, no search needed)
|
||||
- Pricing: https://groq.com/pricing/
|
||||
|
||||
**Fallbacks if blocked:** Search "groq models latest pricing", "groq latest models", "groq api models", or search GitHub for latest model prices and context windows
|
||||
**Do NOT use web search.** The `.md` endpoint provides structured markdown content directly.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
|
||||
@@ -6,11 +6,11 @@ Update `src/modules/llms/server/openai/models/moonshot.models.ts` with latest mo
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
**Primary Sources (fetch directly, no search needed):**
|
||||
- Pricing: https://platform.moonshot.ai/docs/pricing/chat
|
||||
- API Reference: https://platform.moonshot.ai/docs/api/chat
|
||||
|
||||
**Fallbacks if blocked:** Search "moonshot kimi models latest pricing", "kimi k2 models", "moonshot api models", or search GitHub for latest model prices and context windows
|
||||
**Do NOT use web search.** Fetch the URLs directly, or ask the user to provide data, if unaccessible.
|
||||
|
||||
**Important:**
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
description: Update MiniMax model definitions with latest pricing and capabilities
|
||||
---
|
||||
|
||||
Update `src/modules/llms/server/openai/models/minimax.models.ts` with latest model definitions.
|
||||
|
||||
Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/server/models.mappings.ts` for context only. Focus on the model file, do not descend into other code.
|
||||
|
||||
**Primary Sources:**
|
||||
- Models & Changelog: https://platform.minimax.io/docs/release-notes/models.md
|
||||
- Pricing: https://platform.minimax.io/docs/guides/pricing-paygo.md
|
||||
- Pricing Overview: https://platform.minimax.io/docs/pricing/overview.md
|
||||
- Text Generation API: https://platform.minimax.io/docs/guides/text-generation.md
|
||||
|
||||
**Note:** MiniMax is a hardcoded-only vendor (no `/v1/models` API yet). All model IDs, context windows, and pricing must be manually maintained from the docs. Pay attention to new model releases (M-series), highspeed variants, and deprecated models.
|
||||
|
||||
**Fallbacks if blocked:** Search "minimax api models pricing", "minimax m2 m3 models", "minimax api changelog" or check https://openrouter.ai models list for MiniMax entries.
|
||||
|
||||
**Important:**
|
||||
- Models are `ModelDescriptionSchema[]` objects (not ManualMappings) - match existing pattern in the file
|
||||
- Review the full model list for additions, removals, and price changes
|
||||
- Check for new `-highspeed` variants and new model families
|
||||
- Verify context window sizes and max completion tokens against docs
|
||||
- Minimize whitespace/comment changes, focus on content
|
||||
- Preserve comments to make diffs easy to review
|
||||
- Flag broken links or unexpected content
|
||||
@@ -8,29 +8,29 @@ Reference `src/modules/llms/server/llm.server.types.ts` and `src/modules/llms/se
|
||||
|
||||
**Automated Workflow:**
|
||||
```bash
|
||||
# 1. Fetch the HTML
|
||||
curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html
|
||||
# 1. Fetch the HTML to a cross-platform temp path (sorted by newest for stable ordering)
|
||||
curl -s "https://ollama.com/library?sort=newest" -o "$(node -p "require('os').tmpdir()")/ollama-newest.html"
|
||||
|
||||
# 2. Parse it with the script
|
||||
node .claude/scripts/parse-ollama-models.js > /tmp/ollama-parsed.txt 2>&1
|
||||
|
||||
# 3. Review the parsed output
|
||||
cat /tmp/ollama-parsed.txt
|
||||
# 2. Parse it with the script (auto-finds the file in os.tmpdir())
|
||||
node .claude/scripts/parse-ollama-models.js 2>&1
|
||||
```
|
||||
|
||||
The parser outputs: `modelName|pulls|capabilities|sizes`
|
||||
- Example: `deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b`
|
||||
|
||||
**Primary Sources:**
|
||||
- Model Library: https://ollama.com/library?sort=featured
|
||||
- Model Library: https://ollama.com/library?sort=newest
|
||||
- Parser script: `.claude/scripts/parse-ollama-models.js`
|
||||
|
||||
**Fallbacks if blocked:** Check https://github.com/ollama/ollama, search "ollama featured models", "ollama latest models", or search GitHub for latest model info
|
||||
|
||||
**Important:**
|
||||
- Skip models below 50,000 pulls (parser does this automatically)
|
||||
- Skip embedding models (parser does not do this automatically)
|
||||
- Sort them in the EXACT same order as the source (featured models)
|
||||
- Parser filtering rules:
|
||||
- Top 30 newest models are always included (regardless of pull count)
|
||||
- After top 30, only models with 50K+ pulls are included
|
||||
- Models with 'cloud' capability are automatically excluded
|
||||
- Models with 'embedding' capability are automatically excluded
|
||||
- Sort them in the EXACT same order as the source (newest first, for stable ordering)
|
||||
- Extract tags: 'tools' → hasTools, 'vision' → hasVision, 'embedding' → isEmbeddings (note the 's'), 'thinking' → tags only
|
||||
- Extract 'b' tags (1.5b, 7b, 32b) to tags field
|
||||
- Set today's date (YYYYMMDD format) for newly added models only
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
---
|
||||
description: Verify model parameterSpecs match API-validated sweep data
|
||||
argument-hint: openai | anthropic | gemini | xai (or empty for all)
|
||||
---
|
||||
|
||||
# Verify LLM Parameters
|
||||
|
||||
Compare model `parameterSpecs` in definition files against API-validated sweep data.
|
||||
|
||||
If `$ARGUMENTS` provided, verify only that dialect, which includes reading the pair of sweep results and model defintions. Otherwise verify all four, and read the pairs in sequence.
|
||||
|
||||
## Files
|
||||
|
||||
**Sweep results** (source of truth for select parameters):
|
||||
- `tools/develop/llm-parameter-sweep/llm-{dialect}-parameters-sweep.json`
|
||||
By the time you see these files, the repo owner has already updated them via `tools/develop/llm-parameter-sweep/sweep.sh` (very long running, 15 min per vendor).
|
||||
|
||||
**Model definitions (source of truth for model defintions for the user and application, including constants, interfaces, supported parameters and sometimes allowed parameter values)**:
|
||||
- OpenAI: `src/modules/llms/server/openai/models/openai.models.ts`
|
||||
- Anthropic: `src/modules/llms/server/anthropic/anthropic.models.ts`
|
||||
- Gemini: `src/modules/llms/server/gemini/gemini.models.ts`
|
||||
- xAI: `src/modules/llms/server/openai/models/xai.models.ts`
|
||||
|
||||
## Task
|
||||
|
||||
The sweep data is the source of truth for allowed model parameter values or value ranges, and for the `fn` function-calling capability probe.
|
||||
|
||||
For each model in the sweep, verify the model definition exposes exactly those capabilities - no more, no less. This includes:
|
||||
- The parameter is present in parameterSpecs
|
||||
- The paramId variant covers exactly the values from the sweep, if applicable
|
||||
- `LLM_IF_OAI_Fn` in `interfaces` matches `"roundtrip"` in the sweep's `fn` array (see below)
|
||||
- etc.
|
||||
|
||||
Report models where the definition doesn't match the sweep.
|
||||
|
||||
## Parameter Mapping
|
||||
|
||||
Example parameter mapping. Note that new parameters may have been added to both the definition, and the sweep.
|
||||
The objective of the sweep is to hint at model definition values, but the model definitions are what matters for Big-AGI,
|
||||
and need to be carefully updated, otherwise thousands of clients may break.
|
||||
|
||||
| Dialect | Sweep Key | Model paramId |
|
||||
|-----------|--------------------------|------------------------------|
|
||||
| OpenAI | `oai-reasoning-effort` | `llmVndOaiEffort` |
|
||||
| OpenAI | `oai-verbosity` | `llmVndOaiVerbosity` |
|
||||
| OpenAI | `oai-image-generation` | `llmVndOaiImageGeneration` |
|
||||
| OpenAI | `oai-web-search` | `llmVndOaiWebSearchContext` |
|
||||
| Anthropic | `ant-effort` | `llmVndAntEffort` |
|
||||
| Anthropic | `ant-thinking-budget` | `llmVndAntThinkingBudget` |
|
||||
| Gemini | `gemini-thinking-level` | `llmVndGemEffort` |
|
||||
| Gemini | `gemini-thinking-budget` | `llmVndGeminiThinkingBudget` |
|
||||
| xAI | `xai-web-search` | `llmVndXaiWebSearch` |
|
||||
|
||||
## Function-Calling Capability (`fn`)
|
||||
|
||||
The sweep `fn` array is a capability probe (not a paramId). `"roundtrip"` is the authoritative signal - full tool-call -> response -> coherent follow-up. `LLM_IF_OAI_Fn` in the model's `interfaces` must track `"roundtrip"`: present iff present.
|
||||
|
||||
Flag:
|
||||
- `"roundtrip"` in sweep but `LLM_IF_OAI_Fn` missing (or vice versa)
|
||||
- `fn` contains `"auto"`/`"required"` without `"roundtrip"` - partial capability, call it out
|
||||
|
||||
## Output
|
||||
|
||||
Report first for every model the expected values from the sweep, then the actual values from the definition, then the mismatches.
|
||||
|
||||
Finally make one table for each dialect listing all models with mismatches and the specific issues.
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
description: Generate changelog bullets for big-agi.com/changes
|
||||
argument-hint: date like "2026-01-10" or empty for auto-detect
|
||||
---
|
||||
|
||||
Generate changelog bullets for a single entry in https://big-agi.com/changes
|
||||
|
||||
**Step 1: Find the starting date**
|
||||
|
||||
IMPORTANT: This repo rebases frequently, so commits are INTERLEAVED throughout history.
|
||||
New commits can appear at line 10, 500, or 1800. Use AUTHOR DATE (`%ad`) to filter - it's preserved during rebases.
|
||||
|
||||
If `$ARGUMENTS` provided, use it as the cutoff date.
|
||||
|
||||
If NO argument:
|
||||
1. Fetch https://big-agi.com/changes to get the most recent changelog date
|
||||
2. Use that date as the cutoff
|
||||
|
||||
**Step 2: Get commits by author date**
|
||||
|
||||
Filter commits by author date to catch ALL new commits regardless of position in history:
|
||||
|
||||
```bash
|
||||
# For commits after Jan 10, 2026 (adjust date pattern as needed)
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep "2026-01-1[1-9]\|2026-01-2\|2026-02"
|
||||
|
||||
# Verify interleaving by checking line numbers
|
||||
git log --oneline --no-merges --format="%h %ad %s" --date=short | grep -n "2026-01-1[1-9]"
|
||||
```
|
||||
|
||||
The line numbers prove commits are scattered (e.g., lines 14, 638, 1156, 1803 = interleaved).
|
||||
|
||||
**Step 3: Write bullets**
|
||||
|
||||
Real examples from big-agi.com/changes:
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
- "Cloud Sync launched! - long awaited and top requested"
|
||||
- "Deepseek V3.2 Speciale comes with almost Gemini 3 Pro performance but 20 times cheaper"
|
||||
- "Anthropic Opus 4.5 with controls for effort (speed tradeoff), thinking budget, search"
|
||||
- "Login with email, via magic link"
|
||||
- "Mobile UX fixes for popups drag/interaction"
|
||||
|
||||
**Rules:**
|
||||
|
||||
1. **Order by importance** - most significant changes first, minor fixes last
|
||||
2. **Feature-first, no verb prefixes** - "Gemini 3 support" not "Add Gemini 3 support"
|
||||
3. **Model names lead** when it's about LLMs
|
||||
4. **Specific details** - "4-level thinking: high, medium, low, minimal" not "multiple thinking levels"
|
||||
5. **One-liners** - short, no fluff
|
||||
6. **Consolidate commits** - 10 persona editor commits = 1 bullet
|
||||
7. **No corporate speak** - no "enhanced", "streamlined", "robust", "leverage"
|
||||
|
||||
**Skip:** WIP, internal refactors, KB docs, automation, review cleanups, trivial fixes, deps bumps, CI changes.
|
||||
|
||||
**Output:** Just bullets, ready to paste. 2-5 bullets but adapt depending on scope, especially
|
||||
in relation to the usual https://big-agi.com/changes entries.
|
||||
@@ -0,0 +1,149 @@
|
||||
---
|
||||
description: Execute the Big-AGI release process
|
||||
argument-hint: version like "2.0.4" or empty to auto-increment patch
|
||||
---
|
||||
|
||||
Execute the release process for Big-AGI. Go step-by-step, waiting for user approval between major steps.
|
||||
|
||||
## Step 1: Determine Version
|
||||
|
||||
If `$ARGUMENTS` provided, use it. Otherwise, read `package.json` and increment patch version.
|
||||
|
||||
## Step 2: Gather Context
|
||||
|
||||
Before drafting, gather what changed:
|
||||
1. `git log --oneline` since last release tag to see all commits
|
||||
2. Fetch https://big-agi.com/changes to see what daily entries already covered
|
||||
3. `gh issue list --state closed --search "closed:>LAST_RELEASE_DATE"` to find closed issues
|
||||
4. Check auto-generated release notes (`gh release create --generate-notes --draft`) for community PRs and new contributors
|
||||
|
||||
## Step 3: Update Files
|
||||
|
||||
1. **package.json** - Update `version` field
|
||||
2. **src/common/app.release.ts** - Increment `Monotonics.NewsVersion` (e.g., 203 → 204)
|
||||
3. **src/apps/news/news.data.tsx** - Add new entry at top of `NewsItems` array
|
||||
|
||||
For the news entry, ask user for release name and key highlights.
|
||||
|
||||
**News entry style** - Draft is a starting point, user will refine:
|
||||
- Models lead when model-heavy, grouped together
|
||||
- Callout features get own bullet with colon explanation
|
||||
- UX items grouped, minimal bold
|
||||
- Fixes last, brief
|
||||
- Release name stays subtle - don't oversell the theme
|
||||
- Apply the draft, then let the user edit manually and re-read after - don't over-iterate
|
||||
|
||||
Use `<B>`, `<B issue={N}>`, `<B href='url'>`. Re-read file after user edits.
|
||||
|
||||
4. User runs `npm i` to update lockfile
|
||||
|
||||
## Step 4: README
|
||||
|
||||
Update `README.md`:
|
||||
- Line ~46: Update model examples if new flagship models
|
||||
- Line ~147: Add release bullet above previous version
|
||||
|
||||
**Style:** `- Open X.Y.Z: **Name** feature1, feature2, feature3`
|
||||
|
||||
## Step 5: Git Operations
|
||||
|
||||
User commits changes, then:
|
||||
```bash
|
||||
git tag vX.Y.Z
|
||||
git push opensource vX.Y.Z
|
||||
```
|
||||
|
||||
## Step 6: GitHub Release
|
||||
|
||||
Create release with `gh release create` using `--notes` (not `--body`).
|
||||
|
||||
**Structure** - discursive intro paragraph, then themed sections, not a generic "What's New" header:
|
||||
|
||||
```
|
||||
# Big-AGI X.Y.Z - Name
|
||||
|
||||
### Theme tagline.
|
||||
|
||||
1-2 sentence discursive paragraph setting the release theme - what it means, not a feature list.
|
||||
|
||||
### Section Name (e.g., Models & Parameters)
|
||||
- Bullet points for specifics
|
||||
- Group by theme, not by commit order
|
||||
|
||||
### Vendor/Platform Section (when enough substance)
|
||||
- Give a vendor its own section if 3+ related changes (e.g., Anthropic, AWS Bedrock)
|
||||
|
||||
### Also New
|
||||
- Remaining features, scannable
|
||||
|
||||
## New Contributors
|
||||
* @user made their first contribution (brief description) in PR_URL
|
||||
|
||||
**Full Changelog**: https://github.com/enricoros/big-AGI/compare/vPREV...vNEW
|
||||
|
||||
## Get Started
|
||||
Available now at [big-agi.com](https://big-agi.com), via Docker, or self-host from source.
|
||||
```
|
||||
|
||||
## Step 7: Changelog (big-agi.com/changes)
|
||||
|
||||
The Open release entry on big-agi.com/changes is lightweight - just 1-2 bullets announcing the stable release, since daily entries already covered the individual features. Use `/rel:changelog` to generate.
|
||||
|
||||
**Style:** `- Open X.Y.Z Name stable release on GitHub and Docker`
|
||||
followed by 1 bullet summarizing what landed in the final days since the last daily entry.
|
||||
|
||||
## Step 8: Announcements
|
||||
|
||||
Draft for user to post:
|
||||
|
||||
**Twitter** - Thematic, not feature dumps. Talk about what it means, not what it lists:
|
||||
```
|
||||
Big-AGI Open X.Y.Z is out!
|
||||
|
||||
[Theme - e.g., "Lots of love to models: native support, latest protocols, total configuration - puts you in control."]
|
||||
|
||||
[One more angle, natural prose]
|
||||
|
||||
[Optional link]
|
||||
```
|
||||
|
||||
**Discord** - Structured with bold headers:
|
||||
```
|
||||
## :partyblob: Big-AGI **Open** X.Y.Z
|
||||
|
||||
**Category:** Items
|
||||
**Category:** Items
|
||||
**More:** Count of commits/fixes
|
||||
```
|
||||
|
||||
## Step 9: Cover Image Prompts
|
||||
|
||||
Offer cover image prompt alternatives for the release. Read past prompts from `news.data.tsx` comments (lines ~24-37) for the pattern.
|
||||
|
||||
**Pattern:** Always a capybara sculpture made of crystal glass, wearing rayban-like oversized black sunglasses. Each release has a unique theme/activity that symbolizes the release.
|
||||
|
||||
**Shared prefix:** `High-key white scene, very clean, hero framing. A close-up photo of a capybara sculpture made of crystal glass. The capybara wears rayban-like oversized black sunglasses.`
|
||||
|
||||
**Also offer future release concepts** tied to vision vectors from `kb/vision-inlined.md` (e.g., agency, inhabitation, sculpting, safe exploration).
|
||||
|
||||
## Tone Guide
|
||||
|
||||
**Good:**
|
||||
- "Lots of love to models: native support, latest protocols, total configuration"
|
||||
- "UX quality of life improvements, from Google Drive to message reorder"
|
||||
- "Gemini 3 Flash support with 4-level thinking: high, medium, low, minimal"
|
||||
|
||||
**Bad:**
|
||||
- "Rolling out the red carpet for top models!" (too salesy)
|
||||
- "Enhanced and streamlined the robust model experience" (corporate speak)
|
||||
- "Added support for Gemini 3 Flash model with multiple thinking levels" (verb prefix, vague)
|
||||
|
||||
## Reference
|
||||
|
||||
Find previous copy at:
|
||||
- **GitHub releases:** https://github.com/enricoros/big-AGI/releases
|
||||
- **News entries:** `src/apps/news/news.data.tsx`
|
||||
- **README:** `README.md` release notes section
|
||||
- **Changelog:** https://big-agi.com/changes
|
||||
|
||||
Match the existing tone - professional but human, specific not generic, features not marketing.
|
||||
@@ -1,23 +1,38 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Parse Ollama featured models from HTML
|
||||
* Parse Ollama models from HTML (sorted by newest for stable ordering)
|
||||
*
|
||||
* Usage:
|
||||
* 1. Fetch HTML: curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html
|
||||
* 1. Fetch HTML: curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html
|
||||
* 2. Parse: node .claude/scripts/parse-ollama-models.js
|
||||
*
|
||||
* Outputs: pipe-delimited format: modelName|pulls|capabilities|sizes
|
||||
* Example: deepseek-r1|66200000|tools,thinking|1.5b,7b,8b,14b,32b,70b,671b
|
||||
*
|
||||
* Filtering rules:
|
||||
* - Top 30 newest models are always included (regardless of pull count)
|
||||
* - After top 30, only models with 50K+ pulls are included
|
||||
* - Models with 'cloud' capability are always excluded
|
||||
* - Models with 'embedding' capability are always excluded
|
||||
*
|
||||
* Pull counts are rounded to significant figures for stable diffs:
|
||||
* - >=10M: round to 100K (e.g., 109,123,456 -> 109,100,000)
|
||||
* - >=1M: round to 10K (e.g., 5,432,100 -> 5,430,000)
|
||||
* - <1M: round to 1K (e.g., 88,700 -> 89,000)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const htmlPath = process.argv[2] || '/tmp/ollama-featured.html';
|
||||
const htmlPath = process.argv[2] || path.join(os.tmpdir(), 'ollama-newest.html');
|
||||
const TOP_N_ALWAYS_INCLUDE = 30;
|
||||
const MIN_PULLS_THRESHOLD = 50000;
|
||||
|
||||
if (!fs.existsSync(htmlPath)) {
|
||||
console.error(`Error: HTML file not found at ${htmlPath}`);
|
||||
console.error('Please fetch it first with:');
|
||||
console.error(' curl -s "https://ollama.com/library?sort=featured" -o /tmp/ollama-featured.html');
|
||||
console.error(' curl -s "https://ollama.com/library?sort=newest" -o /tmp/ollama-newest.html');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -25,7 +40,7 @@ const html = fs.readFileSync(htmlPath, 'utf8');
|
||||
|
||||
// Split into model sections - each starts with <a href="/library/
|
||||
const modelSections = html.split(/<a href="\/library\//);
|
||||
const models = [];
|
||||
const allParsedModels = [];
|
||||
|
||||
for (let i = 1; i < modelSections.length; i++) {
|
||||
const section = modelSections[i].substring(0, 5000); // Large enough window to capture all data
|
||||
@@ -65,10 +80,27 @@ for (let i = 1; i < modelSections.length; i++) {
|
||||
sizes.push(sizeMatch[1].trim());
|
||||
}
|
||||
|
||||
// Only include models with 50K+ pulls
|
||||
if (pulls >= 50000) {
|
||||
models.push({ name, pulls, capabilities, sizes });
|
||||
// Skip models with 'cloud' or 'embedding' capability
|
||||
if (capabilities.includes('cloud') || capabilities.includes('embedding')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allParsedModels.push({ name, pulls: roundPulls(pulls), capabilities, sizes });
|
||||
}
|
||||
|
||||
// Apply filtering: top 30 always included, rest need 50K+ pulls
|
||||
const models = allParsedModels.filter((model, index) => {
|
||||
return index < TOP_N_ALWAYS_INCLUDE || model.pulls >= MIN_PULLS_THRESHOLD;
|
||||
});
|
||||
|
||||
/**
|
||||
* Round pulls to significant figures for stable output.
|
||||
* This reduces churn from daily fluctuations while preserving magnitude.
|
||||
*/
|
||||
function roundPulls(pulls) {
|
||||
if (pulls >= 10000000) return Math.round(pulls / 100000) * 100000; // >=10M: round to 100K
|
||||
if (pulls >= 1000000) return Math.round(pulls / 10000) * 10000; // >=1M: round to 10K
|
||||
return Math.round(pulls / 1000) * 1000; // <1M: round to 1K
|
||||
}
|
||||
|
||||
// Output in pipe-delimited format (in the order they appear on the page)
|
||||
@@ -78,4 +110,6 @@ models.forEach(m => {
|
||||
console.log(`${m.name}|${m.pulls}|${caps}|${tags}`);
|
||||
});
|
||||
|
||||
console.error(`\nTotal models with 50K+ pulls: ${models.length}`);
|
||||
const topNCount = Math.min(TOP_N_ALWAYS_INCLUDE, allParsedModels.length);
|
||||
const thresholdCount = models.length - topNCount;
|
||||
console.error(`\nTotal models: ${models.length} (top ${topNCount} newest + ${thresholdCount} with ${MIN_PULLS_THRESHOLD / 1000}K+ pulls)`);
|
||||
|
||||
@@ -4,14 +4,20 @@
|
||||
"Bash(cat:*)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(eslint:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(gh issue list:*)",
|
||||
"Bash(gh issue view:*)",
|
||||
"Bash(git branch:*)",
|
||||
"Bash(git cherry-pick:*)",
|
||||
"Bash(git describe:*)",
|
||||
"Bash(git grep:*)",
|
||||
"Bash(git log:*)",
|
||||
"Bash(git log:*)",
|
||||
"Bash(git ls-tree:*)",
|
||||
"Bash(git mv:*)",
|
||||
"Bash(git show:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(head:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(node:*)",
|
||||
@@ -23,8 +29,11 @@
|
||||
"Bash(rg:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(sed:*)",
|
||||
"Bash(tail:*)",
|
||||
"Bash(tree:*)",
|
||||
"Bash(tsc:*)",
|
||||
"Read(//tmp/**)",
|
||||
"Skill(llms:update-models*)",
|
||||
"WebFetch",
|
||||
"WebFetch(domain:big-agi.com)",
|
||||
"WebSearch",
|
||||
|
||||
+15
-40
@@ -1,43 +1,18 @@
|
||||
# big-AGI non-code files
|
||||
/docs/
|
||||
/dist/
|
||||
README.md
|
||||
*
|
||||
|
||||
# Ignore build and log files
|
||||
Dockerfile
|
||||
/.dockerignore
|
||||
!app/
|
||||
!kb/
|
||||
!pages/
|
||||
!public/
|
||||
!src/
|
||||
!tools/
|
||||
|
||||
# Node build artifacts
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
!*.mjs
|
||||
!middleware_BASIC_AUTH.ts
|
||||
!middleware.ts
|
||||
!next.config.ts
|
||||
!package*.json
|
||||
!tsconfig.json
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# versioning
|
||||
.git/
|
||||
.github/
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
!LICENSE
|
||||
!README.md
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: docker
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
ignore:
|
||||
- dependency-name: "node"
|
||||
versions: [">=25", "<26"] # Node 25 breaks the build because of a dummy localStorage object
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
|
||||
# Disabled npm updates for now - will need precise package pinning, as some packages changed behavior upstream
|
||||
# - package-ecosystem: npm
|
||||
# directory: /
|
||||
# schedule:
|
||||
# interval: weekly
|
||||
# commit-message:
|
||||
# prefix: "chore(deps)"
|
||||
# cooldown:
|
||||
# semver-patch: 3
|
||||
# semver-minor: 7
|
||||
# semver-major: 14
|
||||
# # Ignore packages intentionally pinned due to upstream issues
|
||||
# ignore:
|
||||
# # Issue #857: v11.6+ breaks streaming; tried 11.4.4/11.6/11.7, only 11.5.1 works
|
||||
# - dependency-name: "@trpc/*"
|
||||
# versions: [">=11.5.1", "<12"]
|
||||
# # Pinned during tRPC #857 debugging - may be safe to unpin, test first
|
||||
# - dependency-name: "@tanstack/react-query"
|
||||
# versions: [">=5.90.10", "<6"]
|
||||
# # Pinned because 5.0.8 changes signatures so return set({ .. }) != void;
|
||||
# - dependency-name: "zustand"
|
||||
# versions: [">=5.0.7", "<6"]
|
||||
# groups:
|
||||
# next:
|
||||
# patterns:
|
||||
# - "@next/*"
|
||||
# - "eslint-config-next"
|
||||
# - "next"
|
||||
# react:
|
||||
# patterns:
|
||||
# - "react"
|
||||
# - "react-dom"
|
||||
# - "@types/react"
|
||||
# - "@types/react-dom"
|
||||
# emotion:
|
||||
# patterns:
|
||||
# - "@emotion/*"
|
||||
# mui:
|
||||
# patterns:
|
||||
# - "@mui/*"
|
||||
# dnd-kit:
|
||||
# patterns:
|
||||
# - "@dnd-kit/*"
|
||||
# prisma:
|
||||
# patterns:
|
||||
# - "@prisma/*"
|
||||
# - "prisma"
|
||||
# vercel:
|
||||
# patterns:
|
||||
# - "@vercel/*"
|
||||
@@ -12,27 +12,30 @@ on:
|
||||
|
||||
jobs:
|
||||
claude-dm:
|
||||
# Only allow repository owner to trigger DMs with @claude (blocks other users and bots)
|
||||
if: |
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
|
||||
github.actor == 'enricoros' &&
|
||||
github.triggering_actor == 'enricoros' &&
|
||||
((github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude'))
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
contents: write # Required for code creation and commits
|
||||
issues: write
|
||||
id-token: write
|
||||
pull-requests: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Run Claude Code DM Response
|
||||
id: claude
|
||||
@@ -41,6 +44,7 @@ jobs:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# Security: Only users with write access can trigger (DMs allow code execution)
|
||||
# Note: contents:write permission enables code creation and commits
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
@@ -49,9 +53,7 @@ jobs:
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
# claude_args: '--allowed-tools Bash(gh pr:*)'
|
||||
# disabling opus for now claude-opus-4-1-20250805
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--model claude-opus-4-6
|
||||
--max-turns 100
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools,SlashCommand"
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
|
||||
@@ -2,7 +2,7 @@ name: Claude Code Auto-Triage Issues
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [ opened, assigned ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
claude-issue-triage:
|
||||
@@ -17,15 +17,15 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
id-token: write
|
||||
pull-requests: read # was write, but we're not altering PRs here
|
||||
actions: read
|
||||
id-token: write # required to use OIDC to authenticate to Claude Code API
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0 # 1 -> 0: full history helps with git blame, etc.
|
||||
|
||||
- name: Analyze issue and provide help
|
||||
uses: anthropics/claude-code-action@v1
|
||||
@@ -35,6 +35,7 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: '*'
|
||||
# track_progress: true # Enables tracking comments
|
||||
show_full_output: ${{ github.event.repository.private }} # security: do not log verbosely in private repo
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
@@ -54,9 +55,11 @@ jobs:
|
||||
**Use web search**: When potentially outside Big-AGI (e.g. user configuration), search the web for similar errors or related issues
|
||||
**Provide a solution**:
|
||||
- Provide multiple solutions if uncertain, and say so
|
||||
- If you can fix it in code, propose the fix
|
||||
- If possible also suggest fixes or workarounds for immediate relief
|
||||
- Analyze the code and suggest specific fixes with code examples
|
||||
- If possible also suggest fixes or workarounds for immediate relief
|
||||
- Reference specific files and line numbers
|
||||
- Suggest workarounds for immediate relief if applicable
|
||||
- Use web search to find similar issues and solutions
|
||||
- Test selectively and even npm install and run build if needed to verify the solution
|
||||
2. Always add the 'claude-triage' issue label to indicate this issue was triaged by Claude
|
||||
3. Comment with:
|
||||
@@ -65,13 +68,16 @@ jobs:
|
||||
- Next steps or clarification needed
|
||||
- Link duplicates if found
|
||||
|
||||
Remember: design values for this codebase: orthogonal features, features that generalize well, modularized and reusable code,
|
||||
type-discriminated data, optimized code, zero maintenance burden. Minimize future pain, etc.
|
||||
|
||||
IMPORTANT: You are in READ-ONLY triage mode. Analyze and suggest solutions in your comment, but do NOT attempt to push code changes.
|
||||
If you're uncertain, say so and suggest next steps.
|
||||
If you write any code make sure that it compiles and that you push it.
|
||||
Be welcoming, helpful, professional, solution-focused and no-BS.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--model claude-opus-4-6
|
||||
--max-turns 75
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools,SlashCommand"
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),SlashCommand"
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Claude Code PR Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened, synchronize, ready_for_review ]
|
||||
|
||||
# Limit branches
|
||||
branches: [ main, dev, v1 ]
|
||||
|
||||
# Optional: Only run on specific file changes
|
||||
# paths:
|
||||
# - "src/**/*.ts"
|
||||
# - "src/**/*.tsx"
|
||||
|
||||
jobs:
|
||||
claude-pr-review:
|
||||
# Skip draft PRs
|
||||
# Optional: filter authors: github.event.pull_request.user.login != 'enricoros'
|
||||
if: |
|
||||
github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run PR Review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
# Security: Allow any user to trigger reviews (read-only PR analysis is safe)
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: '*'
|
||||
# track_progress: true # Enables tracking comments
|
||||
|
||||
# This setting allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
prompt: |
|
||||
REPO: ${{ github.repository }}
|
||||
PR NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
Please review this pull request and provide feedback on:
|
||||
- Potential bugs or issues
|
||||
- Adherence to Big-AGI architecture and design patterns
|
||||
- Code quality and best practices, including TypeScript types, error handling, and edge cases
|
||||
- Performance considerations: bundle size, React patterns, streaming efficiency
|
||||
- Security concerns if applicable
|
||||
|
||||
Use the repository's CLAUDE.md for guidance on style and conventions.
|
||||
|
||||
Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
|
||||
Use `gh pr review comment` for inline suggestions on specific lines.
|
||||
|
||||
IMPORTANT: After completing your review, always add the 'claude-review' label to the PR to indicate it was reviewed by Claude:
|
||||
gh pr edit ${{ github.event.pull_request.number }} --add-label "claude-review"
|
||||
|
||||
Be constructive, helpful, no-BS, and specific with file:line references.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5-20250929
|
||||
--max-turns 100
|
||||
--allowedTools "Edit,Read,Write,WebFetch,WebSearch,Bash(cat:*),Bash(cp:*),Bash(find:*),Bash(git branch:*),Bash(grep:*),Bash(ls:*),Bash(mkdir:*),Bash(npm install),Bash(npm install:*),Bash(npm run:*),Bash(gh issue:*),Bash(gh search:*),Bash(gh label:*),Bash(gh pr:*),mcp__chrome-devtools"
|
||||
@@ -20,29 +20,122 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60 # Max 1 hour (expected: ~25min)
|
||||
# Build job: runs on native runners for each platform (no QEMU emulation)
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
name: Build ${{ matrix.platform }}
|
||||
timeout-minutes: 30
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI Open
|
||||
org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
|
||||
- name: Build and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
NEXT_PUBLIC_BUILD_HASH=${{ github.sha }}
|
||||
NEXT_PUBLIC_BUILD_REF_NAME=${{ github.ref_name }}
|
||||
outputs: type=image,push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true
|
||||
provenance: false
|
||||
cache-from: type=gha,scope=${{ github.repository }}-${{ matrix.platform }}
|
||||
cache-to: type=gha,scope=${{ github.repository }}-${{ matrix.platform }},mode=max
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
# Merge job: combines platform-specific images into a unified multi-arch manifest
|
||||
merge:
|
||||
name: Merge manifests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: build
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Prepare
|
||||
run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -50,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
@@ -66,28 +159,18 @@ jobs:
|
||||
# Version tags (v2.0.0, 2.0.0)
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Big-AGI Open
|
||||
org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.documentation=https://big-agi.com
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=${{ secrets.GA4_MEASUREMENT_ID }}
|
||||
NEXT_PUBLIC_BUILD_HASH=${{ github.sha }}
|
||||
NEXT_PUBLIC_BUILD_REF_NAME=${{ github.ref_name }}
|
||||
# Enable build cache (future)
|
||||
#cache-from: type=gha
|
||||
#cache-to: type=gha,mode=max
|
||||
# Enable provenance and SBOM (future)
|
||||
#provenance: true
|
||||
#sbom: true
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
--annotation='index:org.opencontainers.image.title=Big-AGI Open' \
|
||||
--annotation='index:org.opencontainers.image.description=Big-AGI Open - Multi-model AI workspace for experts who need to think broader, decide smarter, and build with confidence.' \
|
||||
--annotation='index:org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}' \
|
||||
--annotation='index:org.opencontainers.image.documentation=https://big-agi.com' \
|
||||
$(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LC }}:${{ steps.meta.outputs.version }}
|
||||
@@ -53,3 +53,6 @@ next-env.d.ts
|
||||
.env*.local
|
||||
/.run/dev (ENV).run.xml
|
||||
/src/modules/3rdparty/aider/scratch*
|
||||
|
||||
# Ignore temporary CC files
|
||||
/tmpclaude*
|
||||
@@ -1,22 +1,54 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
Guidance to Claude Code when working with code in this repository.
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Targeted Code Quality (safe while dev server runs)
|
||||
npx tsc --noEmit # Type check without building
|
||||
npx eslint src/path/to/file.ts # Lint specific file
|
||||
npm run lint # Lint entire project
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Big-AGI is a Next.js 15 application with a modular architecture built for advanced AI interactions. The codebase follows a three-layer structure with distinct separation of concerns.
|
||||
Big-AGI is a Next.js 15 application with a sophisticated modular architecture built for professional AI interactions.
|
||||
|
||||
### Development Commands
|
||||
|
||||
Dev servers may be already running on ports 3000, 3001, 3002, or 3003 (not always this app - other projects may occupy these ports). Never start or stop dev servers, let the user do it.
|
||||
|
||||
```bash
|
||||
# Validate (~5s, safe while dev server runs, do NOT use `next build` ~45s for same checks)
|
||||
tsc --noEmit --pretty && npm run lint # Type check (~3.5s) + ESLint (~2s)
|
||||
eslint src/path/to/file.ts # Lint specific file
|
||||
|
||||
# Full build (~60s+, only when suspecting runtime/bundle issues)
|
||||
npm run build # next build runs compile+lint+types but stops at first type-error file; tsc shows all at once
|
||||
|
||||
# Database & External Services
|
||||
# npm run supabase:local-update-types # Generate TypeScript types
|
||||
# npm run stripe:listen # Listen for Stripe webhooks
|
||||
```
|
||||
|
||||
### Git/GitHub remotes
|
||||
|
||||
The `gh` command is available to interact with GitHub from the terminal, but **NEVER PUSH TO ANY BRANCH**. The user manages all 'write' git operations.
|
||||
- `opensource` -> `enricoros/big-AGI` (public, default branch: `main`, MIT) - community issues/PRs/releases
|
||||
- `private` -> `big-agi/big-agi-private` (private, default branch: `dev`) - main dev repo with `dev`->`staging`->`prod` pipeline
|
||||
- **Always use `git mv` instead of `mv`** when renaming or moving files - preserves git history tracking
|
||||
- **NEVER run `git stash`** - it causes work loss
|
||||
|
||||
**Branch contents:**
|
||||
- `main` is the open-source build: local-first, BYO-keys, full AIX and provider coverage
|
||||
- `dev` extends `main` with the hosted/cloud layer: auth, Zync sync, Cloud Fabric, Stripe, multi-tenant, admin pages, it's the way to go for users, the best user experience of any multi-model chat application
|
||||
- Cloud/auth/sync code stays on `dev`; non-cloud improvements (UX, AIX, model support, bug fixes) can land on either branch
|
||||
|
||||
**Branch workflow:**
|
||||
- `dev` is rebased on top of `main` (never merged) - `main` changes flow into `dev` on the next rebase, no manual forward-port needed
|
||||
- Never `git merge` between the two branches - breaks the linear topology
|
||||
- Backporting `dev` -> `main` is a re-implementation, never a cherry-pick - keep `main`-side edits minimal/additive so the existing `dev` version lands cleanly on rebase; split into small commits when natural
|
||||
|
||||
### Core Directory Structure
|
||||
|
||||
You are started from the root of the repository (i.e. where the git folder is or scripts should be run from).
|
||||
**ISSUE ALL COMMANDS FROM THE ROOT, OMITTING 'cd' COMMANDS. DO NOT CHAIN CD AND OTHER COMMANDS**
|
||||
**NEVER RUN COMPOUND `cd` COMMANDS LIKE `cd some-folder && command` - ONLY RUN `command` FROM THE ROOT, ALWAYS.**
|
||||
The directory structure is as follows:
|
||||
|
||||
```
|
||||
/app/api/ # Next.js App Router (API routes only, mostly -> /src/server/)
|
||||
/pages/ # Next.js Pages Router (file-based, mostly -> /src/apps/)
|
||||
@@ -31,11 +63,11 @@ Big-AGI is a Next.js 15 application with a modular architecture built for advanc
|
||||
### Key Technologies
|
||||
|
||||
- **Frontend**: Next.js 15, React 18, Material-UI Joy, Emotion (CSS-in-JS)
|
||||
- **State Management**: Zustand with localStorge/IndexedDB (single cell) persistence
|
||||
- **API Layer**: tRPC with React Query for type-safe communication
|
||||
- **State Management**: Zustand with localStorage/IndexedDB (single cell) persistence
|
||||
- **API Layer**: tRPC with TanStack React Query for type-safe communication
|
||||
- **Runtime**: Edge Runtime for AI operations, Node.js for data processing
|
||||
|
||||
### Apps Architecture Pattern
|
||||
### "Apps" Architecture Pattern
|
||||
|
||||
Each app in `/src/apps/` is a self-contained feature module:
|
||||
- Main component (`App*.tsx`)
|
||||
@@ -51,20 +83,20 @@ Modules in `/src/modules/` provide reusable business logic:
|
||||
- **`aix/`** - AI communication framework for real-time streaming
|
||||
- **`beam/`** - Multi-model AI reasoning system (scatter/gather pattern)
|
||||
- **`blocks/`** - Content rendering (markdown, code, images, etc.)
|
||||
- **`llms/`** - Language model abstraction supporting 16 vendors
|
||||
- **`llms/`** - Language model abstraction supporting 20+ vendors
|
||||
|
||||
### Key Subsystems & Their Patterns
|
||||
|
||||
#### 1. AIX - Real-time AI Communication
|
||||
#### AIX - Real-time AI Communication
|
||||
**Location**: `/src/modules/aix/`
|
||||
**Pattern**: Client-server streaming architecture with provider abstraction
|
||||
|
||||
- **Client** → tRPC → **Server** → **AI Providers**
|
||||
- **Client** -> tRPC -> **Server** -> **AI Providers**
|
||||
- Handles streaming/non-streaming responses with batching and error recovery
|
||||
- Particle-based streaming: `AixWire_Particles` → `ContentReassembler` → `DMessage`
|
||||
- Particle-based streaming: `AixWire_Particles` -> `ContentReassembler` -> `DMessage`
|
||||
- Provider-agnostic through adapter pattern (OpenAI, Anthropic, Gemini protocols)
|
||||
|
||||
#### 3. Beam - Multi-Model Reasoning
|
||||
#### Beam - Multi-Model Reasoning
|
||||
**Location**: `/src/modules/beam/`
|
||||
**Pattern**: Scatter/Gather for parallel AI processing
|
||||
|
||||
@@ -73,15 +105,24 @@ Modules in `/src/modules/` provide reusable business logic:
|
||||
- Real-time UI updates via vanilla Zustand stores
|
||||
- BeamStore per conversation via ConversationHandler
|
||||
|
||||
#### 4. Conversation Management
|
||||
#### Conversation Management
|
||||
**Location**: `/src/common/stores/chat/` and `/src/common/chat-overlay/`
|
||||
**Pattern**: Overlay architecture with handler per conversation
|
||||
|
||||
- `ConversationHandler` orchestrates chat, beam, ephemerals
|
||||
- Per-chat stores: `PerChatOverlayStore` + `BeamStore`
|
||||
- Message structure: `DMessage` → `DMessageFragment[]`
|
||||
- Message structure: `DMessage` -> `DMessageFragment[]`
|
||||
- Supports multi-pane with independent conversation states
|
||||
|
||||
#### Layout System ("Optima")
|
||||
|
||||
The Optima layout system provides:
|
||||
- **Responsive design** adapting desktop/mobile
|
||||
- **Drawer(left)/Toolbar/Panel(right)** composition
|
||||
- **Portal-based rendering** for flexible component placement
|
||||
|
||||
Located in `/src/common/layout/optima/`
|
||||
|
||||
### Storage System
|
||||
|
||||
Big-AGI uses a local-first architecture with Zustand + IndexedDB:
|
||||
@@ -89,7 +130,6 @@ Big-AGI uses a local-first architecture with Zustand + IndexedDB:
|
||||
- **localStorage** for persistent settings/all storage (via Zustand persist middleware)
|
||||
- **IndexedDB** for persistent chat-only storage (via Zustand persist middleware) on a single key-val cell
|
||||
- **Local-first** architecture with offline capability
|
||||
- **Migration system** for upgrading data structures across versions
|
||||
|
||||
Key storage patterns:
|
||||
- Stores use `createIDBPersistStorage()` for IndexedDB persistence
|
||||
@@ -101,16 +141,6 @@ Located in `/src/common/stores/` with stores like:
|
||||
- `chat/store-chats.ts`: Conversations and messages
|
||||
- `llms/store-llms.ts`: Model configurations
|
||||
|
||||
### Layout System ("Optima")
|
||||
|
||||
The Optima layout system provides:
|
||||
- **Responsive design** adapting desktop/mobile
|
||||
- **Drawer/Panel/Toolbar** composition
|
||||
- **Split-pane support** for multi-conversation views
|
||||
- **Portal-based rendering** for flexible component placement
|
||||
|
||||
Located in `/src/common/layout/optima/`
|
||||
|
||||
### State Management Patterns
|
||||
|
||||
1. **Global Stores** (Zustand with IndexedDB persistence)
|
||||
@@ -122,6 +152,7 @@ Located in `/src/common/layout/optima/`
|
||||
2. **Per-Instance Stores** (Vanilla Zustand)
|
||||
- `store-beam_vanilla`: Beam scatter/gather state
|
||||
- `store-perchat_vanilla`: Chat overlay state
|
||||
- `store-attachment-drafts_vanilla`: Attachment drafts
|
||||
- High-performance, no React integration
|
||||
|
||||
3. **Module Stores**
|
||||
@@ -131,94 +162,60 @@ Located in `/src/common/layout/optima/`
|
||||
### User Flows & Interdependencies
|
||||
|
||||
#### Chat Message Flow
|
||||
1. User input → `Composer` → `DMessage` creation
|
||||
2. `ConversationHandler.messageAppend()` → Store update
|
||||
3. `_handleExecute()` / `ConversationHandler.executeChatMessages()` → AIX client request
|
||||
4. AIX streaming → `ContentReassembler` → UI updates
|
||||
5. Zustand auto-persistence → IndexedDB
|
||||
1. User input -> `Composer` -> `DMessage` creation
|
||||
2. `ConversationHandler.messageAppend()` -> Store update
|
||||
3. `_handleExecute()` / `ConversationHandler.executeChatMessages()` -> AIX client request
|
||||
4. AIX streaming -> `ContentReassembler` -> UI updates
|
||||
5. Zustand auto-persistence -> IndexedDB
|
||||
|
||||
#### Beam Multi-Model Flow
|
||||
1. User triggers Beam → `BeamStore.open()` state update
|
||||
1. User triggers Beam -> `BeamStore.open()` state update
|
||||
2. Scatter: Parallel `aixChatGenerateContent()` to N models
|
||||
3. Real-time ray updates → UI progress
|
||||
4. Gather: User selects fusion → Combined output
|
||||
5. Result → New message in conversation
|
||||
3. Real-time ray updates -> UI progress
|
||||
4. Gather: User selects fusion -> Combined output
|
||||
5. Result -> New message in conversation
|
||||
|
||||
### Development Patterns
|
||||
|
||||
#### TypeScript & Code Quality
|
||||
- Type-safe through strict TypeScript interfaces
|
||||
- Clear interface-first approach for modules and components
|
||||
- Use latest TypeScript 5.9+ features
|
||||
- Use forward-looking patterns to minimize future refactors (e.g., discriminated unions, `satisfies` operator, as const assertions)
|
||||
- Type guards and exhaustiveChecks for robustness
|
||||
- Type inference where possible
|
||||
- Runtime validation with Zod schemas for API inputs/outputs (usually server-side, with the client importing as types the inferred types)
|
||||
|
||||
#### Module Integration
|
||||
- Each module exports its functionality through index files
|
||||
- Modules register with central registries (e.g., `vendors.registry.ts`)
|
||||
- Configuration objects define module behavior
|
||||
- Type-safe integration through strict TypeScript interfaces
|
||||
|
||||
#### Component Patterns
|
||||
- **Controlled components** with clear prop interfaces
|
||||
- **Hook-based logic** extraction for reusability
|
||||
- **Portal rendering** for overlays and modals
|
||||
- **Suspense boundaries** for async operations
|
||||
|
||||
#### API Patterns
|
||||
- **tRPC routers** for type-safe API endpoints
|
||||
- **Zod schemas** for runtime validation
|
||||
- **Middleware** for request/response processing
|
||||
- **Edge functions** for performance-critical AI operations
|
||||
- **tRPC procedures middleware** for authorization and logging (authorization is on a httpOnly cookie)
|
||||
- **Edge functions** for performance-critical operations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- API keys stored client-side in localStorage (user-provided)
|
||||
- Server-side API keys in environment variables only
|
||||
#### Security Considerations
|
||||
- API keys in environment variables only (server-side); on the client they're in localStorage for now, but we want to move away from this
|
||||
- XSS protection through proper content escaping
|
||||
- No credential transmission to third parties
|
||||
|
||||
## Knowledge Base
|
||||
#### Writing Style
|
||||
- **Never use emdashes (—).** Use normal dashes (-) instead, in all generated text, code comments, and documentation.
|
||||
|
||||
Architecture and system documentation is available in the `/kb/` knowledge base:
|
||||
|
||||
@kb/KB.md
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Testing & Quality
|
||||
- Run `npm run lint` before committing
|
||||
- Type-check with `npx tsc --noEmit`
|
||||
- Type-check with `tsc --noEmit`
|
||||
- Test critical user flows manually
|
||||
|
||||
### Adding a New LLM Vendor
|
||||
1. Create vendor in `/src/modules/llms/vendors/[vendor]/`
|
||||
2. Implement `IModelVendor` interface
|
||||
3. Register in `vendors.registry.ts`
|
||||
4. Add environment variables to `env.ts` (if server-side keys needed)
|
||||
|
||||
### Debugging Storage Issues
|
||||
- Check IndexedDB: DevTools → Application → IndexedDB → `app-chats`
|
||||
- Check IndexedDB: DevTools -> Application -> IndexedDB -> `app-chats`
|
||||
- Monitor Zustand state: Use Zustand DevTools
|
||||
- Check migration logs in console during rehydration
|
||||
|
||||
## Code Examples
|
||||
|
||||
### AIX Streaming Pattern
|
||||
```typescript
|
||||
// Efficient streaming with decimation
|
||||
aixChatGenerateContent_DMessage(
|
||||
llmId,
|
||||
request,
|
||||
{ abortSignal, throttleParallelThreads: 1 },
|
||||
async (update, isDone) => {
|
||||
// Real-time UI updates
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
### Model Registry Pattern
|
||||
```typescript
|
||||
// Registry pattern for extensibility
|
||||
const MODEL_VENDOR_REGISTRY: Record<ModelVendorId, IModelVendor> = {
|
||||
openai: ModelVendorOpenAI,
|
||||
anthropic: ModelVendorAnthropic,
|
||||
// ... 14 more vendors
|
||||
};
|
||||
```
|
||||
|
||||
## Server Architecture
|
||||
|
||||
@@ -226,9 +223,13 @@ The server uses a split architecture with two tRPC routers:
|
||||
|
||||
### Edge Network (`trpc.router-edge`)
|
||||
Distributed edge runtime for low-latency AI operations:
|
||||
- **AIX** - AI streaming and communication
|
||||
- **LLM Routers** - Direct vendor integrations (OpenAI, Anthropic, Gemini, Ollama)
|
||||
- **External Services** - ElevenLabs (TTS), Google Search, YouTube transcripts
|
||||
- **AIX** [1] - AI streaming and communication
|
||||
- **LLM Routers** [1] - Vendor-specific operations such as list models (OpenAI, Anthropic, Gemini, Ollama)
|
||||
- **Speex** [1] - Unified TTS router (ElevenLabs, Inworld, and other TTS vendors)
|
||||
- **External Services** - Google Search, YouTube transcripts
|
||||
|
||||
[1]: also supports client-side fetch (CSF) via client-side inclusion (rebundling with stubs),
|
||||
for direct browser-to-API communication when possible (CORS), to reduce latency and network barriers
|
||||
|
||||
Located at `/src/server/trpc/trpc.router-edge.ts`
|
||||
|
||||
@@ -240,3 +241,9 @@ Centralized server for data processing operations:
|
||||
Located at `/src/server/trpc/trpc.router-cloud.ts`
|
||||
|
||||
**Key Pattern**: Edge runtime for AI (fast, distributed), Cloud runtime for data ops (centralized, Node.js)
|
||||
|
||||
@kb/KB.md
|
||||
|
||||
@kb/vision-inlined.md
|
||||
|
||||
As a side note, the product tiers (independent, non-VC-funded) are: **Open** (self-host, MIT) · **Free** (big-agi.com) · **Pro** (paid, includes Sync + backup). All tiers use the user's own API keys.
|
||||
|
||||
+19
-10
@@ -1,5 +1,8 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# check=skip=CopyIgnoredFile
|
||||
|
||||
# Base
|
||||
FROM node:22-alpine AS base
|
||||
FROM node:24-alpine AS base
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
# Dependencies
|
||||
@@ -39,19 +42,20 @@ ENV NEXT_PUBLIC_GA4_MEASUREMENT_ID=${NEXT_PUBLIC_GA4_MEASUREMENT_ID}
|
||||
ARG NEXT_PUBLIC_POSTHOG_KEY
|
||||
ENV NEXT_PUBLIC_POSTHOG_KEY=${NEXT_PUBLIC_POSTHOG_KEY}
|
||||
|
||||
# Optional argument to configure Google Drive Picker at build time (can reuse AUTH_GOOGLE_ID value)
|
||||
ARG NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID
|
||||
ENV NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=${NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID}
|
||||
|
||||
# Copy development deps and source
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# link ssl3 for latest Alpine
|
||||
RUN sh -c '[ ! -e /lib/libssl.so.3 ] && ln -s /usr/lib/libssl.so.3 /lib/libssl.so.3 || echo "Link already exists"'
|
||||
|
||||
# Build the application
|
||||
ENV NODE_ENV=production
|
||||
RUN npm run build
|
||||
|
||||
# Reduce installed packages to production-only
|
||||
RUN npm prune --production
|
||||
RUN npm prune --omit=dev
|
||||
|
||||
|
||||
# Runner
|
||||
@@ -59,18 +63,23 @@ FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
# As user
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
RUN addgroup --system --gid 1001 nodejs \
|
||||
&& adduser --system --uid 1001 nextjs \
|
||||
&& apk add --no-cache openssl
|
||||
|
||||
# Copy Built app
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/src/server/prisma ./src/server/prisma
|
||||
# Instead of `COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next`, we only extract some parts, excluding .next/cache which is build time only:
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/BUILD_ID ./.next/
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/server ./.next/server
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/types ./.next/types
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/*.json ./.next/
|
||||
|
||||
# Minimal ENV for production
|
||||
ENV NODE_ENV=production
|
||||
ENV PATH=$PATH:/app/node_modules/.bin
|
||||
|
||||
# Run as non-root user
|
||||
USER nextjs
|
||||
@@ -79,4 +88,4 @@ USER nextjs
|
||||
EXPOSE 3000
|
||||
|
||||
# Start the application
|
||||
CMD ["next", "start"]
|
||||
CMD ["/app/node_modules/.bin/next", "start"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023-2025 Enrico Ros
|
||||
Copyright (c) 2023-2026 Enrico Ros
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
[](https://discord.gg/MkH4qj2Jp9)
|
||||
<br/>
|
||||
[](https://github.com/enricoros/big-agi/commits)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://github.com/enricoros/big-AGI/pkgs/container/big-agi)
|
||||
[](https://github.com/enricoros/big-AGI/graphs/contributors)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
<br/>
|
||||
@@ -37,12 +37,13 @@ You need to think broader, decide faster, and build with confidence, then you ne
|
||||
It comes packed with **world-class features** like Beam, and is praised for its **best-in-class AI chat UX**.
|
||||
**As an independent, non-VC-funded project, Pro subscriptions at $10.99/mo fund development for everyone, including the free and open-source tiers.**
|
||||
|
||||

|
||||

|
||||
[](https://big-agi.com/beam)
|
||||
[](https://big-agi.com/inspector)
|
||||
|
||||
### What makes Big-AGI different:
|
||||
**Intelligence**: with [Beam & Merge](https://big-agi.com/beam) for multi-model de-hallucination, native search, and bleeding-edge AI models like Nano Banana, Kimi K2 Thinking or GPT 5.1 -
|
||||
|
||||
**Intelligence**: with [Beam & Merge](https://big-agi.com/beam) for multi-model de-hallucination, native search, and bleeding-edge AI models like Opus 4.7, Nano Banana Pro, Kimi K2.6 or GPT 5.4 -
|
||||
**Control**: with personas, data ownership, requests inspection, unlimited usage with API keys, and *no vendor lock-in* -
|
||||
and **Speed**: with a local-first, over-powered, zero-latency, madly optimized web app.
|
||||
|
||||
@@ -73,7 +74,7 @@ Purest AI outputs
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
Flow-state interface<br/>
|
||||
Higly customizable<br/>
|
||||
Highly customizable<br/>
|
||||
Best-in-class UX
|
||||
</td>
|
||||
<td align="center" valign="top">
|
||||
@@ -138,9 +139,16 @@ so you **are not vendor locked-in**, and obsessed over a powerful UI that works,
|
||||
NOTE: this is a powerful tool - if you need a toy UI or clone, this ain't it.
|
||||
|
||||
|
||||
## What's New in 2.0 · Oct 31, 2025 · Open
|
||||
---
|
||||
|
||||
👉 **[See the full changelog](https://big-agi.com/changes)**
|
||||
## Release Notes
|
||||
|
||||
👉 **[See the Live Release Notes](https://big-agi.com/changes)**
|
||||
- Open 2.0.4: **Hyper Params** **Opus 4.6**, **GPT-5.4**, **Gemini 3.1 Pro**, AWS Bedrock, parameter accuracy, Anthropic continuation/Fast mode
|
||||
- Open 2.0.3: **Red Carpet** **Kimi K2.5**, **Gemini 3 Flash**, **GPT 5.2**, Google Drive, Inworld, Novita.ai, Speech/UX improvements
|
||||
- Open 2.0.2: **Speex** multi-vendor speech synthesis, **Opus 4.5**, **Gemini 3 Pro**, **Nano Banana Pro**, **Grok 4.1**, **GPT-5.1**, **Kimi K2** + 280 fixes
|
||||
|
||||
### What's New in 2.0 · Oct 31, 2025 · Open
|
||||
|
||||
- **Big-AGI Open** is ready and more productive and faster than ever, with:
|
||||
- **Beam 2**: multi-modal, program-based, follow-ups, save presets
|
||||
@@ -153,7 +161,7 @@ NOTE: this is a powerful tool - if you need a toy UI or clone, this ain't it.
|
||||
|
||||
<img width="830" height="385" alt="image" src="https://github.com/user-attachments/assets/ad52761d-7e3f-44d8-b41e-947ce8b4faa1" />
|
||||
|
||||
### Open links: 👉 [changelog](https://big-agi.com/changes) 👉 [installation](docs/installation.md) 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [documentation](docs/README.md)
|
||||
#### **Open** links: 👉 [changelog](https://big-agi.com/changes) 👉 [installation](docs/installation.md) 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2) 👉 [documentation](docs/README.md)
|
||||
|
||||
**For teams and institutions:** Need shared prompts, SSO, or managed deployments? Reach out at enrico@big-agi.com. We're actively collecting requirements from research groups and IT departments.
|
||||
|
||||
@@ -175,8 +183,11 @@ The new architecture is solid and the speed improvements are real.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>What's New in 1.16.1...1.16.10 · 2024-2025 (patch releases)</summary>
|
||||
<summary>What's New in 1.16.1...1.16.13 · (patch releases)</summary>
|
||||
|
||||
- 1.16.13: Docker fix ([#840](https://github.com/enricoros/big-AGI/issues/840))
|
||||
- 1.16.12: Dockerfile update ([#840](https://github.com/enricoros/big-AGI/issues/840))
|
||||
- 1.16.11: v1 final release, documentation updates
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
@@ -238,7 +249,7 @@ The new architecture is solid and the speed improvements are real.
|
||||
- New **[Perplexity](https://www.perplexity.ai/)** and **[Groq](https://groq.com/)** integration (thanks @Penagwin). [#407](https://github.com/enricoros/big-AGI/issues/407), [#427](https://github.com/enricoros/big-AGI/issues/427)
|
||||
- **[LocalAI](https://localai.io/models/)** deep integration, including support for [model galleries](https://github.com/enricoros/big-AGI/issues/411)
|
||||
- **Mistral** Large and Google **Gemini 1.5** support
|
||||
- Performance optimizations: runs [much faster](https://twitter.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Performance optimizations: runs [much faster](https://x.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Enhanced UX with auto-sizing charts, refined search and folder functionalities, perfected scaling
|
||||
- And with more UI improvements, documentation, bug fixes (20 tickets), and developer enhancements
|
||||
|
||||
@@ -306,7 +317,7 @@ For full details and former releases, check out the [archived versions changelog
|
||||
## 👉 Supported Models & Integrations
|
||||
|
||||
Delightful UX with latest models exclusive features like Beam for **multi-model AI validation**.
|
||||
> 
|
||||
> 
|
||||
> [](https://big-agi.com/beam)
|
||||
|
||||
|  |  |  |  |  |
|
||||
@@ -317,16 +328,17 @@ Delightful UX with latest models exclusive features like Beam for **multi-model
|
||||
|
||||
### AI Models & Vendors
|
||||
|
||||
Configure 100s of AI models from 18+ providers:
|
||||
Configure 100s of AI models from 20+ providers:
|
||||
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) (non-open) |
|
||||
| Multimodal services | [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Anthropic](https://anthropic.com) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| LLM services | [Alibaba](https://www.alibabacloud.com/en/product/modelstudio) · [DeepSeek](https://deepseek.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [Moonshot](https://www.moonshot.cn/) · [OpenPipe](https://openpipe.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) · [xAI](https://x.ai/) |
|
||||
| Image services | OpenAI · Google Gemini |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) (Voice synthesis / cloning) |
|
||||
| **AI models** | _supported vendors_ |
|
||||
|:--------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Opensource Servers | [LocalAI](https://localai.io/) · [Ollama](https://ollama.com/) |
|
||||
| Local Servers | [LM Studio](https://lmstudio.ai/) (non-open) |
|
||||
| Multimodal services | [Anthropic](https://anthropic.com) · [AWS Bedrock](https://aws.amazon.com/bedrock/) · [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) · [Google Gemini](https://ai.google.dev/) · [OpenAI](https://platform.openai.com/docs/overview) |
|
||||
| LLM services | [Alibaba](https://www.alibabacloud.com/en/product/modelstudio) · [DeepSeek](https://deepseek.com) · [Groq](https://wow.groq.com/) · [Mistral](https://mistral.ai/) · [Moonshot](https://www.moonshot.cn/) · [OpenPipe](https://openpipe.ai/) · [OpenRouter](https://openrouter.ai/) · [Perplexity](https://www.perplexity.ai/) · [Together AI](https://www.together.ai/) · [xAI](https://x.ai/) · [Z.ai](https://z.ai/) |
|
||||
| OpenAI-compatible | Any OpenAI-compatible endpoint - models, pricing, and capabilities are auto-detected |
|
||||
| Image services | OpenAI · Google Gemini (Nano Banana) · LocalAI |
|
||||
| Speech services | [ElevenLabs](https://elevenlabs.io) · [Inworld](https://inworld.ai) · [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech) · LocalAI · Browser (Web Speech API) |
|
||||
|
||||
### Additional Integrations
|
||||
|
||||
@@ -334,7 +346,6 @@ Configure 100s of AI models from 18+ providers:
|
||||
|:--------------|:---------------------------------------------------------------------------------------------------------------|
|
||||
| Web Browse | [Browserless](https://www.browserless.io/) · [Puppeteer](https://pptr.dev/)-based |
|
||||
| Web Search | [Google CSE](https://programmablesearchengine.google.com/) |
|
||||
| Code Editors | [CodePen](https://codepen.io/pen/) · [StackBlitz](https://stackblitz.com/) · [JSFiddle](https://jsfiddle.net/) |
|
||||
| Observability | [Helicone](https://www.helicone.ai) |
|
||||
|
||||
---
|
||||
@@ -382,4 +393,4 @@ When you open an issue, our custom AI triage system (powered by [Claude Code](ht
|
||||
|
||||
MIT License · [Third-Party Notices](src/modules/3rdparty/THIRD_PARTY_NOTICES.md)
|
||||
|
||||
**2023-2025** · Enrico Ros × [Big-AGI](https://big-agi.com)
|
||||
**2023-2026** · [Enrico Ros](https://www.enricoros.com) × [Token Fabrics](https://www.tokenfabrics.com)
|
||||
|
||||
@@ -33,7 +33,7 @@ const handlerNodeRoutes = (req: Request) => fetchRequestHandler({
|
||||
|
||||
// NOTE: the following statement breaks the build on non-pro deployments, and conditionals don't work either
|
||||
// so we resorted to raising the timeout from 10s to 60s in the vercel.json file instead
|
||||
export const maxDuration = 60;
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'nodejs';
|
||||
export const dynamic = 'force-dynamic';
|
||||
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
|
||||
@@ -14,5 +14,7 @@ const handlerEdgeRoutes = (req: Request) => fetchRequestHandler({
|
||||
: undefined,
|
||||
});
|
||||
|
||||
// NOTE: we don't set maxDuration explicitly here - however we set it in the Vercel project settings, raising to the limit of 300s
|
||||
// export const maxDuration = 60;
|
||||
export const runtime = 'edge';
|
||||
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
|
||||
@@ -2,8 +2,6 @@
|
||||
#
|
||||
# For more examples, such running big-AGI alongside a web browsing service, see the `docs/docker` folder.
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
big-agi:
|
||||
image: ghcr.io/enricoros/big-agi:latest
|
||||
@@ -11,4 +9,3 @@ services:
|
||||
- "3000:3000"
|
||||
env_file:
|
||||
- .env
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# AIX dispatch server - API features comparison
|
||||
|
||||
This is updated as of 2024-07-09, and includes the latest features and capabilities of the three major AI APIs: Anthropic, Gemini, and OpenAI.
|
||||
|
||||
+13
-5
@@ -10,6 +10,8 @@ Essential guides:
|
||||
|
||||
- **[FAQ](help-faq.md)**: Common questions and answers
|
||||
- **[Enabling Microphone](help-feature-microphone.md)**: Configure speech recognition in your browser
|
||||
- **[Data Ownership](help-data-ownership.md)**: How your data is stored and managed
|
||||
- **[Live File](help-feature-livefile.md)**: Live file attachment feature
|
||||
|
||||
## AI Services
|
||||
|
||||
@@ -21,18 +23,21 @@ How to set up AI models and features in big-AGI.
|
||||
- Easy API key configuration:
|
||||
[Alibaba](https://bailian.console.alibabacloud.com/?apiKey=1#/api-key),
|
||||
[Anthropic](https://console.anthropic.com/settings/keys),
|
||||
[AWS Bedrock](https://console.aws.amazon.com/bedrock/),
|
||||
[Deepseek](https://platform.deepseek.com/api_keys),
|
||||
[Google Gemini](https://aistudio.google.com/app/apikey),
|
||||
[Groq](https://console.groq.com/keys),
|
||||
[Mistral](https://console.mistral.ai/api-keys/),
|
||||
[Moonshot](https://platform.moonshot.cn/console/api-keys),
|
||||
[OpenAI](https://platform.openai.com/api-keys),
|
||||
[OpenPipe](https://app.openpipe.ai/settings),
|
||||
[Perplexity](https://www.perplexity.ai/settings/api),
|
||||
[TogetherAI](https://api.together.xyz/settings/api-keys),
|
||||
[xAI](http://x.ai/api)
|
||||
[xAI](https://x.ai/api),
|
||||
[Z.ai](https://z.ai/)
|
||||
- **[Azure OpenAI](config-azure-openai.md)** guide
|
||||
- **FireworksAI** ([API keys](https://fireworks.ai/account/api-keys), via custom OpenAI endpoint: https://api.fireworks.ai/inference)
|
||||
- **[OpenRouter](config-openrouter.md)** guide
|
||||
- **OpenAI-compatible endpoints**: Any provider with an OpenAI-compatible API works out of the box - models, pricing, and capabilities are auto-detected
|
||||
|
||||
|
||||
- **Local AI Integrations**:
|
||||
@@ -42,8 +47,9 @@ How to set up AI models and features in big-AGI.
|
||||
- **Enhanced AI Features**:
|
||||
- **[Web Browsing](config-feature-browse.md)**: Enable web page download through third-party services or your own cloud
|
||||
- **Web Search**: Google Search API (see '[Environment Variables](environment-variables.md)')
|
||||
- **Image Generation**: GPT Image (gpt-image-1), DALL·E 3 and 2
|
||||
- **Voice Synthesis**: ElevenLabs API for voice generation
|
||||
- **Image Generation**: GPT Image (gpt-image-1), Nano Banana, DALL·E 3 and 2
|
||||
- **Voice Synthesis**: ElevenLabs, Inworld, OpenAI TTS, LocalAI, or browser Web Speech API
|
||||
- **[Google Drive](config-feature-google-drive.md)**: Attach files from Google Drive
|
||||
|
||||
## Deployment & Customization
|
||||
|
||||
@@ -60,8 +66,10 @@ For deploying a custom big-AGI instance:
|
||||
- **Advanced Setup**:
|
||||
- **[Source Code Customization](customizations.md)**: Modify the source code
|
||||
- **[Access Control](deploy-authentication.md)**: Optional, add basic user authentication
|
||||
- **[Database Setup](deploy-database.md)**: Optional, enables "Chat Link Sharing"
|
||||
- **[Reverse Proxy](deploy-reverse-proxy.md)**: Optional, enables custom domains and SSL
|
||||
- **[Docker Deployment](deploy-docker.md)**: Deploy with Docker containers
|
||||
- **[Kubernetes](deploy-k8s.md)**: Deploy on Kubernetes clusters
|
||||
- **[Analytics](deploy-analytics.md)**: Set up usage analytics
|
||||
- **[Environment Variables](environment-variables.md)**: Pre-configures models and services
|
||||
|
||||
## Community & Support
|
||||
|
||||
+5
-3
@@ -20,8 +20,11 @@ by release.
|
||||
- And all of the [Big-AGI 2 changes](https://github.com/enricoros/big-AGI/issues/567#issuecomment-2262187617) and more
|
||||
- Built for the future, madly optimized
|
||||
|
||||
### What's New in 1.16.1...1.16.9 · Jan 21, 2025 (patch releases)
|
||||
### What's New in 1.16.1...1.16.13 · (patch releases)
|
||||
|
||||
- 1.16.13: Docker fix (#840)
|
||||
- 1.16.12: Dockerfile update (#840)
|
||||
- 1.16.11: v1 final release, documentation updates
|
||||
- 1.16.10: OpenRouter models support
|
||||
- 1.16.9: Docker Gemini fix, R1 models support
|
||||
- 1.16.8: OpenAI ChatGPT-4o Latest, o1 models support
|
||||
@@ -70,7 +73,7 @@ by release.
|
||||
- New **[Perplexity](https://www.perplexity.ai/)** and **[Groq](https://groq.com/)** integration (thanks @Penagwin). [#407](https://github.com/enricoros/big-AGI/issues/407), [#427](https://github.com/enricoros/big-AGI/issues/427)
|
||||
- **[LocalAI](https://localai.io/models/)** deep integration, including support for [model galleries](https://github.com/enricoros/big-AGI/issues/411)
|
||||
- **Mistral** Large and Google **Gemini 1.5** support
|
||||
- Performance optimizations: runs [much faster](https://twitter.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Performance optimizations: runs [much faster](https://x.com/enricoros/status/1756553038293303434?utm_source=localhost:3000&utm_medium=big-agi), saves lots of power, reduces memory usage
|
||||
- Enhanced UX with auto-sizing charts, refined search and folder functionalities, perfected scaling
|
||||
- And with more UI improvements, documentation, bug fixes (20 tickets), and developer enhancements
|
||||
- [Release notes](https://github.com/enricoros/big-AGI/releases/tag/v1.14.0), and changes [v1.13.1...v1.14.0](https://github.com/enricoros/big-AGI/compare/v1.13.1...v1.14.0) (233 commits, 8,000+ lines changed)
|
||||
@@ -228,7 +231,6 @@ For Developers:
|
||||
- **[Install Mobile APP](../docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
|
||||
- **[UI language](../docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
|
||||
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
|
||||
- **Code Execution: [Codepen](https://codepen.io/)** 💻 (@harlanlewis)
|
||||
- **[SVG Drawing](../docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
|
||||
- Chats: multiple chats, AI titles, Import/Export, Selection mode
|
||||
- Rendering: Markdown, SVG, improved Code blocks
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
# Google Drive Integration
|
||||
|
||||
Attach files from Google Drive directly in the chat composer.
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Enable APIs
|
||||
|
||||
In [Google Cloud Console](https://console.cloud.google.com/):
|
||||
|
||||
1. Go to **APIs & Services > Library**
|
||||
2. Enable **Google Drive API** and **Google Picker API**
|
||||
|
||||
### 2. Configure OAuth
|
||||
|
||||
1. Go to **APIs & Services > OAuth consent screen**
|
||||
2. Create consent screen (External or Internal)
|
||||
3. Add scope: `https://www.googleapis.com/auth/drive.file`
|
||||
4. Add test users if in testing mode
|
||||
|
||||
### 3. Create Credentials
|
||||
|
||||
1. Go to **APIs & Services > Credentials**
|
||||
2. Create **OAuth client ID** (Web application)
|
||||
3. Add JavaScript origins:
|
||||
- `http://localhost:3000` (dev)
|
||||
- `https://your-domain.com` (prod)
|
||||
|
||||
### 4. Set Environment Variable
|
||||
|
||||
```bash
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=your-client-id.apps.googleusercontent.com
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
- Click **Drive** button in attachment menu
|
||||
|
||||
## Supported Files
|
||||
|
||||
| Type | Export Format |
|
||||
|-----------------|---------------------|
|
||||
| Regular files | Downloaded directly |
|
||||
| Google Docs | Markdown (.md) |
|
||||
| Google Sheets | CSV (.csv) |
|
||||
| Google Slides | PDF (.pdf) |
|
||||
| Google Drawings | SVG (.svg) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Picker won't open**: Check `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` is set and APIs are enabled.
|
||||
|
||||
**OAuth errors**: Verify your domain is in authorized JavaScript origins. Add yourself as test user if app is in testing mode.
|
||||
|
||||
**Download fails**: Check file permissions and that Drive API is enabled.
|
||||
@@ -41,6 +41,8 @@ In addition to using the UI, configuration can also be done using
|
||||
|
||||
### Integration: Models Gallery
|
||||
|
||||
> Note: The Gallery Admin feature described below may have been removed or renamed in recent versions of big-AGI.
|
||||
|
||||
If the running LocalAI instance is configured with a [Model Gallery](https://localai.io/models/):
|
||||
|
||||
- Go to Models > LocalAI
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# OpenRouter Configuration
|
||||
|
||||
[OpenRouter](https://openrouter.ai) is a standalone, premium service
|
||||
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
|
||||
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
|
||||
that provides access to a wide range of AI models from multiple providers through a single API.
|
||||
This document details the process of integrating OpenRouter with big-AGI.
|
||||
|
||||
### 1. OpenRouter Account Setup and API Key Generation
|
||||
@@ -20,7 +19,7 @@ This document details the process of integrating OpenRouter with big-AGI.
|
||||

|
||||
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
|
||||

|
||||
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
|
||||
4. Models from all supported providers will now be accessible and selectable in the application.
|
||||
|
||||
In addition to using the UI, configuration can also be done using
|
||||
[environment variables](environment-variables.md).
|
||||
@@ -30,5 +29,5 @@ In addition to using the UI, configuration can also be done using
|
||||
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
|
||||
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
|
||||
|
||||
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
|
||||
credits - a single prompt may cost $1 or more, at the time of writing.
|
||||
Please note that running large models can be costly and may rapidly consume credits.
|
||||
Check model pricing on the OpenRouter website before use.
|
||||
@@ -49,8 +49,8 @@ Edit the `src/data.ts` file to customize personas. This file houses the default
|
||||
Adapt the UI to match your project's aesthetic, incorporate new features, or exclude unnecessary ones.
|
||||
|
||||
- [ ] Adjust `src/common/app.theme.ts` for theme changes: colors, spacing, button appearance, animations, etc
|
||||
- [ ] Modify `src/common/app.config.tsx` to alter the application's name
|
||||
- [ ] Update `src/common/app.nav.tsx` to revise the navigation bar
|
||||
- [ ] Modify `src/common/app.release.ts` to alter the application's name
|
||||
- [ ] Update `src/common/app.nav.ts` to revise the navigation bar
|
||||
|
||||
### Add a Message of the Day
|
||||
|
||||
@@ -71,7 +71,7 @@ Example: `NEXT_PUBLIC_MOTD=🚀 New features available in {{app_build_pkgver}}!
|
||||
|
||||
Test your application thoroughly using local development (refer to README.md for local build instructions). Deploy using your preferred hosting service. big-AGI supports deployment on platforms like Vercel, Docker, or any Node.js-compatible service, especially those supporting NextJS's "Edge Runtime."
|
||||
|
||||
- [deploy-cloudflare.md](deploy-cloudflare.md): for Cloudflare Workers deployment
|
||||
- [deploy-cloudflare.md](deploy-cloudflare.md): for Cloudflare Pages deployment (limited support)
|
||||
- [deploy-docker.md](deploy-docker.md): for Docker deployment instructions and examples
|
||||
- [deploy-k8s.md](deploy-k8s.md): for Kubernetes deployment instructions and examples
|
||||
|
||||
|
||||
@@ -51,13 +51,13 @@ Vercel Analytics and Speed Insights are local API endpoints deployed to your dom
|
||||
domain. Furthermore, the Vercel Analytics service is privacy-friendly, and does not track individual users.
|
||||
|
||||
This service is avaialble to system administrators when deploying to Vercel. It is automatically enabled when deploying to Vercel.
|
||||
The code that activates Vercel Analytics is located in the `src/pages/_app.tsx` file:
|
||||
The code that activates Vercel Analytics is located in the `pages/_app.tsx` file:
|
||||
|
||||
```tsx
|
||||
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) => <>
|
||||
...
|
||||
{isVercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{isVercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelAnalytics debug={false} />}
|
||||
{Is.Deployment.VercelFromFrontend && <VercelSpeedInsights debug={false} sampleRate={1 / 2} />}
|
||||
...
|
||||
</>;
|
||||
```
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# Deploying a Next.js App on Cloudflare Pages
|
||||
|
||||
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
|
||||
> WARNING: Cloudflare Pages only supports Edge Runtime functions, not the full Node.js runtime.
|
||||
>
|
||||
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
|
||||
> edge functions, so we cannot deploy this project on Cloudflare Pages.
|
||||
> The cloud router in this project requires a Node.js runtime for Supabase SDK, authentication,
|
||||
> sync, and other server-side features that cannot run on Cloudflare's edge runtime.
|
||||
>
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
|
||||
> Workaround: Step 3.4. has been added below, to DELETE the Node.js cloud router - which means that some
|
||||
> parts of this application will not work.
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
|
||||
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/modules/trade/server/trade.router.ts):
|
||||
> Sharing functionality, import from ChatGPT share, and post to Paste.GG will not work
|
||||
> - Cloud features (sync, auth, payments) will not be available
|
||||
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
|
||||
>
|
||||
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
|
||||
> and convert the Node runtime to Edge runtime once Prisma supports it.
|
||||
|
||||
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
|
||||
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
|
||||
|
||||
@@ -19,7 +19,6 @@ services:
|
||||
- .env
|
||||
environment:
|
||||
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
|
||||
command: [ "next", "start", "-p", "3000" ]
|
||||
depends_on:
|
||||
- browserless
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# Why big-AGI?
|
||||
Placeholder for a document that demonstrates the productivity and unique features of Big-AGI.
|
||||
|
||||
## Exclusive features
|
||||
- [x] Call AGI
|
||||
- [x] Continuous Voice mode
|
||||
- [x] Diagram generation
|
||||
- [ ] ...
|
||||
|
||||
## Productivity Features
|
||||
- [x] Multi-window to never wait
|
||||
- [x] Multi-Chat to explore different solutions
|
||||
- [x] Rendering of graphs, charts, mindmaps
|
||||
- [ ] ...
|
||||
@@ -3,7 +3,7 @@
|
||||
This document provides an explanation of the environment variables used in the big-AGI application.
|
||||
|
||||
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.ts`](../src/server/env.ts).
|
||||
which take place over _defaults_. This file is kept in sync with [`../src/server/env.server.ts`](../src/server/env.server.ts).
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
@@ -29,6 +29,11 @@ AZURE_OPENAI_API_ENDPOINT=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
ANTHROPIC_API_HOST=
|
||||
BEDROCK_BEARER_TOKEN=
|
||||
BEDROCK_ACCESS_KEY_ID=
|
||||
BEDROCK_SECRET_ACCESS_KEY=
|
||||
BEDROCK_SESSION_TOKEN=
|
||||
BEDROCK_REGION=
|
||||
DEEPSEEK_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
@@ -66,8 +71,9 @@ HTTP_BASIC_AUTH_PASSWORD=
|
||||
# Frontend variables
|
||||
NEXT_PUBLIC_MOTD=
|
||||
NEXT_PUBLIC_GA4_MEASUREMENT_ID=
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID=
|
||||
NEXT_PUBLIC_PLANTUML_SERVER_URL=
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
```
|
||||
|
||||
## Backend Variables
|
||||
@@ -99,7 +105,12 @@ requiring the user to enter an API key
|
||||
| `AZURE_OPENAI_API_VERSION` | API version for traditional deployment-based endpoints | Optional, defaults to '2025-04-01-preview' |
|
||||
| `AZURE_DEPLOYMENTS_API_VERSION` | API version for the deployments listing endpoint | Optional, defaults to '2023-03-15-preview' |
|
||||
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as AWS Bedrock | Optional |
|
||||
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, for proxies or custom endpoints | Optional |
|
||||
| `BEDROCK_BEARER_TOKEN` | Bedrock long-term API key (`ABSK...`). Takes priority over IAM credentials. Short-term keys only work for runtime, not model listing | Optional |
|
||||
| `BEDROCK_ACCESS_KEY_ID` | AWS IAM Access Key ID for Bedrock (Claude models via AWS) | Optional, but if set `BEDROCK_SECRET_ACCESS_KEY` must also be set |
|
||||
| `BEDROCK_SECRET_ACCESS_KEY` | AWS IAM Secret Access Key for Bedrock | Optional, but if set `BEDROCK_ACCESS_KEY_ID` must also be set |
|
||||
| `BEDROCK_SESSION_TOKEN` | AWS Session Token for temporary/STS credentials | Optional |
|
||||
| `BEDROCK_REGION` | AWS region for Bedrock (e.g., `us-east-1`, `us-west-2`, `eu-west-1`) | Optional, defaults to `us-east-1` |
|
||||
| `DEEPSEEK_API_KEY` | The API key for Deepseek AI | Optional |
|
||||
| `GEMINI_API_KEY` | The API key for Google AI's Gemini | Optional |
|
||||
| `GROQ_API_KEY` | The API key for Groq Cloud | Optional |
|
||||
@@ -132,10 +143,11 @@ Enable the app to Talk, Draw, and Google things up.
|
||||
|
||||
| Variable | Description |
|
||||
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
|
||||
| **Text-To-Speech** | ElevenLabs, Inworld, OpenAI TTS, LocalAI, and browser Web Speech API are supported |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
|
||||
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
|
||||
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
|
||||
| | *Note: OpenAI TTS and LocalAI TTS reuse credentials from your configured LLM services (no separate env vars needed)* |
|
||||
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
|
||||
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
|
||||
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
|
||||
@@ -154,8 +166,9 @@ The value of these variables are passed to the frontend (Web UI) - make sure the
|
||||
| `NEXT_PUBLIC_DEBUG_BREAKS` | (optional, development) When set to 'true', enables automatic debugger breaks on DEV/error/critical logs in development builds |
|
||||
| `NEXT_PUBLIC_MOTD` | Message of the Day - displays a dismissible banner at the top of the app (see [customizations](customizations.md) for the template variables). Example: 🔔 Welcome to our deployment! Version {{app_build_pkgver}} built on {{app_build_time}}. |
|
||||
| `NEXT_PUBLIC_GA4_MEASUREMENT_ID` | (optional) The measurement ID for Google Analytics 4. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_POSTHOG_KEY` | (optional) Key for PostHog analytics. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
| `NEXT_PUBLIC_GOOGLE_DRIVE_CLIENT_ID` | (optional) Google OAuth Client ID for Drive Picker. Can reuse `AUTH_GOOGLE_ID`. See [Google Drive](config-feature-google-drive.md) |
|
||||
| `NEXT_PUBLIC_PLANTUML_SERVER_URL` | The URL of the PlantUML server, used for rendering UML diagrams. Allows using custom local servers. |
|
||||
| `NEXT_PUBLIC_POSTHOG_KEY` | (optional) Key for PostHog analytics. (see [deploy-analytics](deploy-analytics.md)) |
|
||||
|
||||
> Important: these variables must be set at build time, which is required by Next.js to pass them to the frontend.
|
||||
> This is in contrast to the backend variables, which can be set when starting the local server/container.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# Big-AGI Advanced Tips & Tricks
|
||||
|
||||
> 🚨 This file is not meant for publication, and it's just been created as a handbook with tips
|
||||
|
||||
@@ -30,6 +30,12 @@ You can see your data in your browser's local storage and IndexedDB - try it you
|
||||
|
||||

|
||||
|
||||
### Sync for Authenticated Users
|
||||
|
||||
Users with accounts on big-agi.com who opt into Sync (a Pro feature) have their entity data - such as conversations and personas - replicated to the server for multi-device access.
|
||||
Server-side data is isolated per-user using Row Level Security (RLS), ensuring that no other user can access your synced data.
|
||||
Sync is entirely optional; without it, all data remains local to your browser.
|
||||
|
||||
### What This Means For You
|
||||
|
||||
Storing data in your browser means:
|
||||
@@ -43,7 +49,7 @@ Storing data in your browser means:
|
||||
|
||||
Big-AGI generates a _device identifier_ that combines timestamp and random components, stored only on your device. This identifier:
|
||||
|
||||
- Is used only for the **optional sync functionality** between your devices (not yet ready)
|
||||
- Is used only for the **optional sync functionality** between your devices
|
||||
- Helps maintain data consistency when using Big-AGI across multiple devices
|
||||
- Remains completely local unless you explicitly enable sync
|
||||
- Is not used for tracking, analytics, or telemetry
|
||||
@@ -74,6 +80,27 @@ and then are send to the upstream AI services.
|
||||
|
||||

|
||||
|
||||
### Direct Connection (Browser → AI Service)
|
||||
|
||||
Most AI services offer a **Direct Connection** toggle (under a service's Advanced settings). When enabled, the browser calls the AI provider's API directly, skipping the Big-AGI server entirely.
|
||||
|
||||
Benefits:
|
||||
|
||||
- **No 4.5 MB upload limit** - the Vercel body-size cap does not apply, so larger attachments and long prompts go through.
|
||||
- **No 300-second timeout** - the Vercel function timeout does not apply, so long-running generations keep streaming.
|
||||
- **More privacy** - connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
|
||||
Tradeoff:
|
||||
|
||||
- **Slightly more downlink bandwidth**: when traffic passes through the Big-AGI edge, repetitive streaming frames are compacted; direct streams arrive verbatim from the provider.
|
||||
|
||||
Availability requires both:
|
||||
|
||||
1. The API key is set in your browser (client-side), not via server environment variables. Server-key deployments cannot use Direct Connection because the browser has no credential to send.
|
||||
2. The AI service allows CORS (browser-origin requests). Most major providers do; Big-AGI sets any extra headers they require.
|
||||
|
||||
Direct Connection is a net win on speed, limits, and privacy whenever the provider permits it.
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
**Basic Security**:
|
||||
|
||||
@@ -2,6 +2,26 @@
|
||||
|
||||
Quick answers to common questions about Big-AGI. For detailed documentation, see our [Website Docs](https://big-agi.com/docs).
|
||||
|
||||
### Connectivity
|
||||
|
||||
<details open>
|
||||
<summary><b>What is "Direct Connection" and should I enable it?</b></summary>
|
||||
|
||||
Direct Connection lets the browser call the AI provider's API directly, skipping the Big-AGI edge server. It appears as a toggle in each AI service's Advanced settings when your API key is set client-side.
|
||||
|
||||
**When available, it is a net win**: faster, fewer restrictions, more privacy.
|
||||
|
||||
- **No 4.5 MB upload limit** (Vercel body-size cap does not apply).
|
||||
- **No 300-second timeout** (Vercel function timeout does not apply; call length is bound only by the AI service).
|
||||
- **More privacy** - connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
- **Slightly more downlink bandwidth** - when passing through the edge, Big-AGI sheds repetitive streaming frames; direct streams arrive verbatim.
|
||||
|
||||
**When it is unavailable**:
|
||||
|
||||
1. **Server-side keys** - if the deployment stores API keys in server environment variables, the browser has no credential to send directly.
|
||||
2. **Provider does not allow CORS** - browsers cannot call APIs that block cross-origin requests. Most major providers permit it; Big-AGI sets any required headers.
|
||||
</details>
|
||||
|
||||
### Versions
|
||||
|
||||
<details open>
|
||||
|
||||
+4
-10
@@ -7,7 +7,7 @@ process for your own instance of big-AGI and related products.
|
||||
|
||||
**Try big-AGI** - You don't need to install anything if you want to play with big-AGI
|
||||
and have your API keys to various model services. You can access our free instance on [big-AGI.com](https://big-agi.com).
|
||||
The free instance runs the latest `main-stable` branch from this repository.
|
||||
The free instance runs the latest `main` branch from this repository.
|
||||
|
||||
## 🧩 Build-your-own
|
||||
|
||||
@@ -72,9 +72,8 @@ Create your GitHub fork, create a Vercel project over that fork, and deploy it.
|
||||
|
||||
### Deploy on Cloudflare
|
||||
|
||||
Deploy on Cloudflare's global network by installing big-AGI on
|
||||
Cloudflare Pages. Check out the [Cloudflare Installation Guide](deploy-cloudflare.md)
|
||||
for step-by-step instructions.
|
||||
> Note: Cloudflare Pages deployment has limitations due to Edge Runtime constraints.
|
||||
> See the [Cloudflare guide](deploy-cloudflare.md) for details and known issues.
|
||||
|
||||
### Docker Deployments
|
||||
|
||||
@@ -136,11 +135,6 @@ Deploy big-AGI on a Kubernetes cluster for enhanced scalability and management.
|
||||
|
||||
For more detailed instructions on Kubernetes deployment, including updating and troubleshooting, refer to our [Kubernetes Deployment Guide](deploy-k8s.md).
|
||||
|
||||
### Midori AI Subsystem for Docker Deployment
|
||||
|
||||
Follow the instructions found on [Midori AI Subsystem Site](https://io.midori-ai.xyz/subsystem/manager/)
|
||||
for your host OS. After completing the setup process, install the Big-AGI docker backend to the Midori AI Subsystem.
|
||||
|
||||
## Enterprise-Grade Installation
|
||||
|
||||
For businesses seeking a fully-managed, scalable solution, consider our managed installations.
|
||||
@@ -151,6 +145,6 @@ Enjoy all the features of big-AGI without the hassle of infrastructure managemen
|
||||
Join our vibrant community of developers, researchers, and AI enthusiasts. Share your projects, get help, and collaborate with others.
|
||||
|
||||
- [Discord Community](https://discord.gg/MkH4qj2Jp9)
|
||||
- [Twitter](https://twitter.com/enricoros)
|
||||
- [X (Twitter)](https://x.com/enricoros)
|
||||
|
||||
For any questions or inquiries, please don't hesitate to [reach out to our team](mailto:hello@big-agi.com).
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
unlisted: true
|
||||
---
|
||||
|
||||
# ReAct: question answering with Reasoning and Actions
|
||||
|
||||
## What is ReAct?
|
||||
|
||||
@@ -14,4 +14,10 @@ const compat = new FlatCompat({
|
||||
|
||||
export default defineConfig([{
|
||||
extends: compat.extends("next/core-web-vitals"),
|
||||
rules: {
|
||||
//
|
||||
"react-hooks/exhaustive-deps": ["warn", {
|
||||
additionalHooks: "(useMemoShallowStable)",
|
||||
}],
|
||||
},
|
||||
}]);
|
||||
@@ -1,35 +1,46 @@
|
||||
# Knowledge Base
|
||||
## Knowledge Base
|
||||
|
||||
Internal documentation for Big-AGI architecture and systems, for use by AI agents and developers.
|
||||
Architecture and system documentation is available in the `/kb/` knowledge base, for use by AI agents and developers.
|
||||
|
||||
**Structure:**
|
||||
- `/kb/KB.md` - Already in context: this text
|
||||
- `/kb/vision-inlined.md` - Already in context (next section): long-term vision and north stars
|
||||
- `/kb/modules/` - Core business logic (e.g. AIX)
|
||||
- `/kb/systems/` - Infrastructure (routing, startup)
|
||||
|
||||
## Index
|
||||
|
||||
### Modules Documentation
|
||||
|
||||
#### AIX - AI Communication Framework
|
||||
- **[AIX.md](modules/AIX.md)** - AIX streaming architecture documentation
|
||||
- **[AIX-callers-analysis.md](modules/AIX-callers-analysis.md)** - Analysis of AIX entry points, call chains, common and different rendering, error handling, etc.
|
||||
|
||||
#### CSF - Client-Side Fetch
|
||||
- **[CSF.md](systems/client-side-fetch.md)** - Direct browser-to-API communication for LLM requests
|
||||
|
||||
#### LLM - Language Model Metadata
|
||||
- **[LLM-editorial-control.md](modules/LLM-editorial-pubdate.md)** - Where we have editorial control over per-model metadata vs dynamic discovery; `pubDate` field semantics, propagation chain, resolution rules, per-vendor matrix
|
||||
- **[LLM-models-catalog-pipeline.md](modules/LLM-models-catalog-pipeline.md)** - Forward-looking pipeline: extraction script, snapshot artifact, website consumption, future schema extensions
|
||||
|
||||
#### LLM - Vendor APIs
|
||||
- **[LLM-gemini-interactions.md](modules/LLM-gemini-interactions.md)** - Gemini Interactions API (Deep Research): endpoints, status taxonomy, two retrieval paths (SSE replay vs JSON GET), known failure modes (10-min cuts, zombies), UI surface
|
||||
|
||||
### Systems Documentation
|
||||
|
||||
#### Core Platform Systems
|
||||
- **[app-routing.md](systems/app-routing.md)** - Next.js routing, provider stack, and display state hierarchy
|
||||
- **[LLM-parameters-system.md](systems/LLM-parameters-system.md)** - Language model parameter flow across the system
|
||||
- **[LLM-vendor-integration.md](modules/LLM-vendor-integration.md)** - Adding new LLM providers
|
||||
|
||||
## Guidelines
|
||||
### KB Guidelines
|
||||
|
||||
### Writing Style
|
||||
#### Writing Style
|
||||
|
||||
- **Direct and factual** - No marketing language
|
||||
- **Present tense** - "AIX handles streaming" not "AIX will handle"
|
||||
- **Active voice** - "The system processes" not "Processing is done by"
|
||||
- **Concrete examples** - Show actual code/config when helpful, briefly
|
||||
|
||||
### Maintenance
|
||||
#### Maintenance
|
||||
|
||||
- Remove outdated information when detected!
|
||||
- Remove outdated knowledge base information when detected
|
||||
- Keep cross-references current when files move
|
||||
|
||||
@@ -7,8 +7,8 @@ This document analyzes all AIX function callers and their patterns for message r
|
||||
### Three-Tier Call Hierarchy
|
||||
|
||||
**Core AIX Functions** (Direct tRPC API callers):
|
||||
- `aixChatGenerateContent_DMessage_FromConversation` - 8 callers (conversation streaming)
|
||||
- `aixChatGenerateContent_DMessage` - 6 callers (direct request/response)
|
||||
- `aixChatGenerateContent_DMessage_FromConversation` - 9 callers (conversation streaming)
|
||||
- `aixChatGenerateContent_DMessage_orThrow` - 6 callers (direct request/response)
|
||||
- `aixChatGenerateText_Simple` - 12 callers (text-only utilities)
|
||||
|
||||
**Utility Layer** (Hooks & Functions):
|
||||
@@ -24,6 +24,7 @@ This document analyzes all AIX function callers and their patterns for message r
|
||||
| **Caller** | **Context** | **Message Removal** | **Placeholder** | **Error Handling** |
|
||||
|------------|-------------|-------------------|----------------|-------------------|
|
||||
| **Chat Persona** | `'conversation'` | `messageWasInterruptedAtStart()` → `removeMessage()` | None | Error fragments |
|
||||
| **XE Chat Generate** | `'conversation'` | `messageWasInterruptedAtStart()` → `removeMessage()` | `'...'` placeholder | Error fragments via messageEditor |
|
||||
| **Beam Scatter** | `'beam-scatter'` | `messageWasInterruptedAtStart()` → empty message | `SCATTER_PLACEHOLDER` | Ray status update |
|
||||
| **Beam Gather** | `'beam-gather'` | `messageWasInterruptedAtStart()` → clear fragments | `GATHER_PLACEHOLDER` | Re-throw errors |
|
||||
| **Beam Follow-up** | `'beam-followup'` | `messageWasInterruptedAtStart()` → remove message | `FOLLOWUP_PLACEHOLDER` | Status updates |
|
||||
|
||||
+5
-4
@@ -37,6 +37,7 @@ Built with tRPC, it manages the lifecycle of AI-generated content from request t
|
||||
| Perplexity | ✅ | ❌ (rejected) | | ✅ | Yes + 📦 | |
|
||||
| TogetherAI | ✅ | ✅ | | ✅ | Yes + 📦 | |
|
||||
| xAI | | | | | | |
|
||||
| Z.ai | ✅ | ✅ | Img: ✅ | ✅ | Yes + 📦 | Thinking mode |
|
||||
| Ollama (2) | ❌ (broken) | ? | | | | |
|
||||
|
||||
Notes:
|
||||
@@ -91,12 +92,12 @@ AIX is organized into the following files and folders:
|
||||
|
||||
- Dispatch (`/server/dispatch/`) - Server to AI Provider communication:
|
||||
- `/server/dispatch/chatGenerate/`: Content Generation with chat-style inputs:
|
||||
- `./adapters/`: Adapters for creating API requests for different AI protocols (Anthropic, Gemini, OpenAI).
|
||||
- `./parsers/`: Parsers for parsing streaming/non-streamin responses from different AI protocols (same 3).
|
||||
- `./adapters/`: Adapters for creating API requests for different AI protocols (Anthropic, Bedrock, Gemini, OpenAI Chat Completions, OpenAI Responses, xAI Responses).
|
||||
- `./parsers/`: Parsers for parsing streaming/non-streaming responses from different AI protocols (Anthropic, Bedrock Converse, Gemini, OpenAI, OpenAI Responses).
|
||||
- `chatGenerate.dispatch.ts`: Creates a pipeline to execute Chat Generation to a specific provider.
|
||||
- `ChatGenerateTransmitter.ts`: Used to serialize and transmit AixWire_Particles to the client.
|
||||
- `/server/dispatch/wiretypes/`: AI provider Wire Types:
|
||||
- Type definitions for different AI providers/protocols (Anthropic, Gemini, OpenAI).
|
||||
- Type definitions for different AI providers/protocols (Anthropic, Bedrock Converse, Gemini, OpenAI, xAI).
|
||||
- `stream.demuxers.ts`: Handles demuxing of different stream formats.
|
||||
|
||||
## 3. Architecture Diagram
|
||||
@@ -159,7 +160,7 @@ sequenceDiagram
|
||||
AIX Client ->> AIX Client: Display error message
|
||||
else DMessageDocPart
|
||||
AIX Client ->> AIX Client: Process and display document
|
||||
else DMetaPlaceholderPart
|
||||
else DVoidPlaceholderPart
|
||||
AIX Client ->> AIX Client: Handle placeholder (non-submitted)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
# LLM Editorial Control Surface
|
||||
|
||||
This document maps where Big-AGI has editorial control over per-model metadata (and therefore can guarantee fields like `pubDate`, curated `description`, `chatPrice`, `benchmark`, `parameterSpecs`, etc.) versus where it must rely on the vendor API's dynamic discovery (and therefore cannot guarantee them).
|
||||
|
||||
For the forward-looking pipeline (extraction script, snapshot, website consumption, future schema extensions), see [LLM-models-catalog-pipeline.md](LLM-models-catalog-pipeline.md).
|
||||
|
||||
|
||||
## The `pubDate` field
|
||||
|
||||
`pubDate?: string` (validated as `/^\d{8}$/`, e.g. `'20250929'`) is **optional** in the wire schema and on `DLLM`. It was added to:
|
||||
|
||||
- `ModelDescription_schema` in `src/modules/llms/server/llm.server.types.ts` - the canonical wire type
|
||||
- `OrtVendorLookupResult` in the same file - so OpenRouter inherits it via `llmOrt*Lookup`
|
||||
- `DLLM` in `src/common/stores/llms/llms.types.ts` - the persisted client model
|
||||
|
||||
### Where `pubDate` is guaranteed (always emitted)
|
||||
|
||||
- **Editorial entries** in 12 hybrid/editorial vendors (282 models). Hand-curated, externally corroborated. Future entries in these arrays are expected to include `pubDate`.
|
||||
- **Anthropic 0-day placeholder** (`llmsAntCreatePlaceholderModel`): when the API surfaces an Anthropic model not in the editorial list, the placeholder uses the API's `created_at` ISO date, falling back to today via `formatPubDate()`.
|
||||
- **Gemini 0-day fallback** (`geminiModelToModelDescription`): when the API returns a Gemini model not in `_knownGeminiModels`, the converter falls back to today via `formatPubDate()` (Gemini API does not expose a creation timestamp).
|
||||
|
||||
### Where `pubDate` is omitted (optional)
|
||||
|
||||
- **Symlink entries** (`KnownLink`) - inherit the target's `pubDate` via the merge logic in `fromManualMapping`.
|
||||
- **Unknown variants resolved through `super`/`fallback`** in `fromManualMapping` for non-Anthropic/non-Gemini vendors - the field is left undefined rather than fabricated.
|
||||
- **Dynamic-only vendors** (OpenRouter, TogetherAI, Novita, ChutesAI, FireworksAI, TLUS, Azure, LM Studio, LocalAI, FastAPI, ArceeAI, LLMAPI) - no editorial knob; pubDate flows in only when the underlying lookup or upstream API populates it.
|
||||
|
||||
The rationale: today's date is a defensible 0-day proxy only when we know we're seeing a brand-new model the vendor just announced (Anthropic and Gemini's "discovery via official model list" paths). For arbitrary dynamic vendors, fabricating today would mark old/well-known models as new - misleading. Better to omit.
|
||||
|
||||
### Propagation chain
|
||||
|
||||
- `fromManualMapping()` in `src/modules/llms/server/models.mappings.ts` - copies the field for OAI-style vendors when present
|
||||
- `geminiModelToModelDescription()` in `src/modules/llms/server/gemini/gemini.models.ts` - copies for Gemini, falls back to today for unknowns
|
||||
- `llmsAntCreatePlaceholderModel()` in `src/modules/llms/server/anthropic/anthropic.models.ts` - emits from API `created_at` (or today)
|
||||
- `_mergeLookup()` in `src/modules/llms/server/openai/models/openrouter.models.ts` - merges for OpenRouter cross-vendor inheritance
|
||||
- `_createDLLMFromModelDescription()` in `src/modules/llms/llm.client.ts` - copies onto the persisted DLLM when present
|
||||
- `formatPubDate()` helper in `src/modules/llms/server/models.mappings.ts` - shared `'YYYYMMDD'` formatter for the 0-day-fillable paths
|
||||
|
||||
### Semantics
|
||||
|
||||
`pubDate` is the **earliest public availability** of the model - the date on which the vendor first made this specific model usable by external users via any channel (consumer app, web, console, API, partner, open-weights upload).
|
||||
|
||||
It is **not**:
|
||||
|
||||
- The date Big-AGI added the entry to its catalog (Ollama uses `added` for that)
|
||||
- The training-data cutoff (proposed but not implemented; see `src/common/stores/llms/llms.types.next.ts:217`)
|
||||
- The date the model snapshot was built (suffixes like `-1212` may refer to build dates, but `pubDate` tracks public availability)
|
||||
|
||||
### Resolution rules (when sources conflict)
|
||||
|
||||
1. **Date-suffixed model IDs**: when the suffix matches a documented announcement, the suffix is canonical (vendor convention). xAI, OpenAI, and Mistral all use suffixes that closely track release dates.
|
||||
2. **Anthropic exception**: Anthropic's date suffixes are typically the **snapshot/training-cutoff date, not the public release date**. For example, `claude-3-7-sonnet-20250219` was released on 2025-02-24, `claude-opus-4-20250514` was released 2025-05-22, and `claude-haiku-4-5-20251001` was released 2025-10-15. Always corroborate against Anthropic's blog/press for the actual release date. Only `claude-sonnet-4-5-20250929` and `claude-opus-4-1-20250805` have suffixes that match.
|
||||
3. **Closed beta -> public beta -> GA**: use the first date *external* users could access the specific variant.
|
||||
4. **Family-headline IDs and dated snapshots** (e.g., `claude-opus-4-1` and `claude-opus-4-1-20250805`): typically share a release date.
|
||||
5. **Hosted on a third party** (Groq hosting Llama, OpenPipe mirroring others, OpenRouter aggregating): use the *underlying* model's original release date by its creator, not when the host added it.
|
||||
6. **Symlinks** (entries with `symLink:`): inherit the target's date.
|
||||
7. **Partial dates** (only month known): use the 1st of the month and tag as MEDIUM confidence in the editor's note.
|
||||
|
||||
|
||||
## Editorial control matrix
|
||||
|
||||
Three categories:
|
||||
|
||||
- **Editorial** - the vendor file contains hand-curated entries; we control descriptions, pricing, benchmarks, interfaces, parameter specs, and `pubDate`.
|
||||
- **Hybrid** - the API returns the live model list, and editorial entries (keyed by id/idPrefix) merge over the API data via `fromManualMapping`. We control everything except *which models exist*.
|
||||
- **Dynamic** - the API is the only source of model identity and metadata. Big-AGI cannot reliably populate `pubDate` here (no editorial knob).
|
||||
|
||||
| Vendor | Category | File | Array | Entries | `pubDate` populated |
|
||||
|---|---|---|---|---|---|
|
||||
| Anthropic | Hybrid | `anthropic/anthropic.models.ts` | `hardcodedAnthropicModels` | 12 | 12/12 HIGH |
|
||||
| Gemini | Hybrid | `gemini/gemini.models.ts` | `_knownGeminiModels` | 33 | 33/33 HIGH |
|
||||
| OpenAI | Hybrid | `openai/models/openai.models.ts` | `_knownOpenAIChatModels` | 96 | 95/96 HIGH/MED (`osb-120b` skipped, speculative) |
|
||||
| xAI | Hybrid | `openai/models/xai.models.ts` | `_knownXAIChatModels` | 13 | 13/13 HIGH (pilot) |
|
||||
| Mistral | Hybrid | `openai/models/mistral.models.ts` | `_knownMistralModelDetails` | 41 | 41/41 (40 HIGH, 1 MED for legacy `mistral-medium`) |
|
||||
| Moonshot (Kimi) | Hybrid | `openai/models/moonshot.models.ts` | `_knownMoonshotModels` | 13 | 13/13 (10 HIGH, 3 MED for v1 base models) |
|
||||
| Perplexity | Editorial | `openai/models/perplexity.models.ts` | `_knownPerplexityChatModels` | 4 | 4/4 HIGH |
|
||||
| MiniMax | Editorial | `openai/models/minimax.models.ts` | `_knownMiniMaxModels` | 10 | 10/10 HIGH |
|
||||
| DeepSeek | Hybrid | `openai/models/deepseek.models.ts` | `_knownDeepseekChatModels` | 4 | 4/4 HIGH |
|
||||
| Groq | Hybrid (host) | `openai/models/groq.models.ts` | `_knownGroqModels` | 11 | 11/11 HIGH (underlying-model date) |
|
||||
| Z.AI / GLM | Hybrid | `openai/models/zai.models.ts` | `_knownZAIModels` | 17 | 16/17 (`glm-5-code` UNCONFIRMED) |
|
||||
| OpenPipe | Editorial (mirror) | `openai/models/openpipe.models.ts` | `_knownOpenPipeChatModels` | 30 | 30/30 HIGH (all upstream-mirror, no OpenPipe originals) |
|
||||
| Bedrock | Reuses Anthropic | `bedrock/bedrock.models.ts` | -> `hardcodedAnthropicModels` | (12) | inherited |
|
||||
| Ollama | Editorial (catalog) | `ollama/ollama.models.ts` | `OLLAMA_BASE_MODELS` | 209 | **deferred** - see notes |
|
||||
| Arcee AI | Dynamic | `openai/models/arceeai.models.ts` | `_arceeKnownModels` | 0 | n/a (empty) |
|
||||
| LLMAPI | Dynamic | `openai/models/llmapi.models.ts` | `_llmapiKnownModels` | 0 | n/a (empty) |
|
||||
| Alibaba | Dynamic | `openai/models/alibaba.models.ts` | `_knownAlibabaChatModels` | 0 | n/a (empty) |
|
||||
| OpenRouter | Dynamic + delegated lookup | `openai/models/openrouter.models.ts` | (parser) | -- | inherited via `llmOrt*Lookup` |
|
||||
| TogetherAI | Dynamic | `openai/models/together.models.ts` | (parser) | -- | no |
|
||||
| FireworksAI | Dynamic | `openai/models/fireworksai.models.ts` | (parser) | -- | no |
|
||||
| Novita | Dynamic | `openai/models/novita.models.ts` | (parser) | -- | no |
|
||||
| ChutesAI | Dynamic | `openai/models/chutesai.models.ts` | (parser) | -- | no |
|
||||
| TLUS | Dynamic | `openai/models/tlusapi.models.ts` | (parser) | -- | no |
|
||||
| Azure | Dynamic | `openai/models/azure.models.ts` | (parser) | -- | no |
|
||||
| LM Studio | Dynamic | `openai/models/lmstudio.models.ts` | (parser) | -- | no |
|
||||
| LocalAI | Dynamic | `openai/models/localai.models.ts` | (parser) | -- | no |
|
||||
| FastAPI | Dynamic | `openai/models/fastapi.models.ts` | (parser) | -- | no |
|
||||
|
||||
**Totals**: 284 editorial entries across 12 vendors, of which **282** have corroborated `pubDate` and **2** are intentional gaps (`osb-120b` speculative, `glm-5-code` not yet announced). All 12 vendor files type-check clean.
|
||||
|
||||
### Notes
|
||||
|
||||
- **Hybrid** vendors are still effectively editorial for the models we know about: when an API id matches a hardcoded `idPrefix` (or `id`), `fromManualMapping` injects all the editorial fields. Unknown ids fall through to a default-shaped placeholder where `pubDate` is undefined.
|
||||
- **OpenRouter** delegates back to Anthropic / Gemini / OpenAI editorial lookups via `llmOrtAntLookup_ThinkingVariants`, `llmOrtGemLookup`, `llmOrtOaiLookup`. `pubDate` flows through these lookups, so OpenRouter-served Claude/Gemini/GPT models get `pubDate` automatically once the underlying editorial entry has it.
|
||||
- **Bedrock** finds Anthropic editorial via `llmBedrockFindAnthropicModel` and strips unsupported interfaces - `pubDate` inherits from Anthropic.
|
||||
- **Ollama** is deferred: 209 entries keyed by upstream model family (e.g. `qwen3.6`, `kimi-k2`, `glm-4.6`). Each entry's `pubDate` would need to be the upstream creator's release date (Meta, Alibaba, Moonshot, Z.AI, etc.). This is large-scale upstream research; better handled in a follow-up pass once cross-vendor `pubDate` data is consolidated and reusable.
|
||||
- **Dynamic-only** vendors get nothing automatic. To add `pubDate` for them we'd have to seed editorial entries (which is what `fromManualMapping`'s mapping mechanism was built for); this is a per-vendor decision and out of scope for the initial rollout.
|
||||
@@ -0,0 +1,88 @@
|
||||
# Gemini Interactions API
|
||||
|
||||
The Interactions API powers Gemini's agent runs (Deep Research today, more agent types planned). This doc is the source of truth for protocol shape, failure modes, and the recovery model — code comments link here instead of repeating the rationale.
|
||||
|
||||
## References
|
||||
|
||||
- **GH [#1088](https://github.com/enricoros/big-AGI/issues/1088)** — Auto-resume for Deep Research; Recover button
|
||||
- **GH [#1095](https://github.com/enricoros/big-AGI/issues/1095)** — Visualizations toggle (`agent_config.visualization`)
|
||||
- **Google forum [143098](https://discuss.ai.google.dev/t/interactions-api-connection-breaks-at-the-10-minutes-mark/143098)** — 10-min SSE cut
|
||||
- **Google forum [143099](https://discuss.ai.google.dev/t/streaming-resume-broken-on-interactions-api-deep-research-often-cannot-resume/143099)** — Streaming resume re-cuts
|
||||
- **Upstream specs** — `_upstream/gemini.interactions.spec.md`, `gemini.interactions.guide.md`, `gemini.deep-research.guide.md`
|
||||
|
||||
## Endpoints
|
||||
|
||||
| Verb | Path | Purpose |
|
||||
|--------|-------------------------------------------|-------------------------------------------------------------------|
|
||||
| POST | `/v1beta/interactions` | Start a run. We always send `stream:true, background:true, store:true` |
|
||||
| GET | `/v1beta/interactions/{id}?stream=true` | Reattach via SSE replay (full event sequence from start) |
|
||||
| GET | `/v1beta/interactions/{id}` | Fetch the resource as JSON (one-shot) |
|
||||
| POST | `/v1beta/interactions/{id}/cancel` | Stop a background run |
|
||||
| DELETE | `/v1beta/interactions/{id}` | Remove the stored record (does NOT cancel an in-flight run) |
|
||||
|
||||
Retention: 1 day free, 55 days paid.
|
||||
|
||||
## Status taxonomy
|
||||
|
||||
| Status | Meaning | Handling |
|
||||
|-------------------|-----------------------------------------------|-------------------------------------------------------|
|
||||
| `in_progress` | Live run **or** zombie (see C) | Surface diagnostics; offer Resume/Recover/Stop |
|
||||
| `completed` | Done with content in `outputs[]` | Emit fragments, `tokenStopReason='ok'` |
|
||||
| `failed` | Server-side failure | Terminating issue |
|
||||
| `cancelled` | We or another client cancelled | Close as `cg-issue` |
|
||||
| `incomplete` | Stopped early (token limit) — partial outputs | Note + `tokenStopReason='out-of-tokens'` |
|
||||
| `requires_action` | Not expected for Deep Research | Fail loudly so we notice |
|
||||
|
||||
## Two retrieval paths
|
||||
|
||||
| Path | Endpoint | Parser | Use case |
|
||||
|-----------------------|-----------------------------------|-------------------------------------------|-----------------------------------|
|
||||
| SSE replay | `GET ?stream=true` | `createGeminiInteractionsParserSSE` | Canonical resume; live deltas |
|
||||
| JSON GET (recovery) | `GET` (no `stream`) | `createGeminiInteractionsParserNS` | Recover when SSE is broken |
|
||||
|
||||
Both replay from the start — `ContentReassembler` REPLACES content on reattach, so partial replay (`last_event_id`) is intentionally NOT used. The NS parser walks `outputs[]` (thoughts, text, images, audio) and emits the same particles the SSE parser would, in one batch.
|
||||
|
||||
## Failure modes
|
||||
|
||||
### A. 10-minute SSE cut (forum 143098)
|
||||
|
||||
The SSE connection gets cut at exactly 600 s, regardless of activity. The cut is malformed (JSON error array instead of clean SSE close) and we treat it as stream-closed-early. The run typically **continues** server-side and reaches `completed`. **Recover (JSON GET)** retrieves the full report.
|
||||
|
||||
### B. Streaming resume re-cuts (forum 143099)
|
||||
|
||||
A fresh SSE replay can re-cut at the same 10-minute boundary on long runs, so Resume alone never reaches `interaction.complete`. **Recover** is the fallback.
|
||||
|
||||
### C. Zombie interactions (#1088)
|
||||
|
||||
Resource sits in `status: in_progress` for **days** with `outputs: []` — the generator crashed but the status never transitioned. **Not recoverable** (no data was ever produced). The NS parser surfaces `created`, `updated`, output count, and a "stuck for over an hour" hint so the user can decide to delete and retry.
|
||||
|
||||
### D. Connection drop mid-run
|
||||
|
||||
Network blip; resource is fine. **Resume (SSE replay)** picks up cleanly.
|
||||
|
||||
## UI
|
||||
|
||||
`BlockOpUpstreamResume` renders up to three buttons:
|
||||
|
||||
| Button | Action | Shown when |
|
||||
|----------|-----------------------------------|---------------------------------------------------------|
|
||||
| Resume | SSE replay | `onResume` provided |
|
||||
| Recover | JSON GET (one-shot) | `upstreamHandle.uht` ∈ `_NS_RECOVER_UHTS` |
|
||||
| Stop | Cancel + delete upstream resource | `onDelete` provided |
|
||||
|
||||
The Recover gate is an inline `uht === 'vnd.gem.interactions'` check in `BlockOpUpstreamResume.tsx` — extend when another vendor needs the same fallback. Stop is intentionally NOT gated by Resume/Recover busy state — it's the escape hatch for hung resumes.
|
||||
|
||||
## Visualization control (#1095)
|
||||
|
||||
Deep Research accepts `agent_config.visualization: 'auto' | 'off'`. Exposed as `llmVndGeminiAgentViz` (label "Visualizations"). Forwarded only when explicitly `'off'` so the upstream `'auto'` default stays untouched. Useful when merging multiple reports — image fragments break Beam fusion.
|
||||
|
||||
## Code map
|
||||
|
||||
| File | Role |
|
||||
|--------------------------------------------------------------------------------------|-------------------------------------------------------|
|
||||
| `aix/server/dispatch/wiretypes/gemini.interactions.wiretypes.ts` | Zod schemas (RequestBody, Interaction, StreamEvent) |
|
||||
| `aix/server/dispatch/chatGenerate/adapters/gemini.interactionsCreate.ts` | POST body (input + agent_config) |
|
||||
| `aix/server/dispatch/chatGenerate/parsers/gemini.interactions.parser.ts` | SSE parser + NS parser |
|
||||
| `aix/server/dispatch/chatGenerate/chatGenerate.dispatch.ts` (`gemini` case) | Resume dispatch: SSE vs JSON branch |
|
||||
| `apps/chat/components/message/BlockOpUpstreamResume.tsx` | Resume / Recover / Stop UI |
|
||||
| `apps/chat/components/ChatMessageList.tsx` (`handleMessageUpstreamResume`) | Wires click handler to `aixReattachContent_DMessage_orThrow` |
|
||||
@@ -0,0 +1,78 @@
|
||||
# LLM Models Catalog Pipeline (forward-looking)
|
||||
|
||||
Status: **proposal / partially implemented**. Companion to [LLM-editorial-control.md](LLM-editorial-pubdate.md) which describes the durable reference (`pubDate` semantics, editorial-vs-dynamic matrix, propagation chain).
|
||||
|
||||
This document captures the forward-looking pipeline that turns Big-AGI's editorial model metadata into website value-add (plots, decision helpers, comparison tools at big-agi.com).
|
||||
|
||||
|
||||
## Goal
|
||||
|
||||
Stand up a database/datastore that the website (`~/dev/website`) can query for plots, decision helpers, and comparison tools - without requiring the website to call our authenticated tRPC endpoints.
|
||||
|
||||
|
||||
## Stages
|
||||
|
||||
### Stage 1: source of truth (in this repo) — DONE
|
||||
|
||||
Editorial files in `src/modules/llms/server/` remain the canonical source for:
|
||||
|
||||
- Identity: id, label, vendor
|
||||
- Capabilities: `interfaces`, `parameterSpecs`, `contextWindow`, `maxCompletionTokens`
|
||||
- Pricing: `chatPrice` (input / output / cache tiers)
|
||||
- Benchmarks: `benchmark.cbaElo` (Chat Bot Arena ELO)
|
||||
- Lifecycle: `pubDate`, `isLegacy`, `isPreview`, `hidden`, deprecation comments
|
||||
|
||||
Well-typed, version-controlled, reviewed - every model edit is a code change with diff history. 282 entries currently carry `pubDate` (see editorial-control matrix).
|
||||
|
||||
### Stage 2: extraction script — IN PROGRESS
|
||||
|
||||
A build-time script (e.g. `scripts/llms/export-models.ts`) that:
|
||||
|
||||
1. Loads every editorial vendor's model array.
|
||||
2. Normalizes per-vendor shapes (array vs Record, `id` vs `idPrefix`, `KnownLink` symlinks) to a single row format.
|
||||
3. Resolves symlinks (target's `pubDate` flows through).
|
||||
4. Writes a single JSON snapshot: `data/models-catalog.json` (one row per model, with vendor + the editorial fields above).
|
||||
|
||||
Open question: do we want this committed (gives the website a stable artifact / public URL) or built on-demand in CI? **Recommend committed snapshot** under `data/` so consumers get a stable URL.
|
||||
|
||||
### Stage 3: enrichment — NOT STARTED
|
||||
|
||||
The exported snapshot gets enriched with data we don't currently track in editorial files:
|
||||
|
||||
- **Knowledge cutoff** (proposed in `llms.types.next.ts:217` but never implemented; should be added to `ModelDescription_schema` as a follow-up).
|
||||
- **MMLU / HumanEval / SWE-bench / GPQA / MATH** scores (currently only `cbaElo`; richer benchmarks belong in a separate block).
|
||||
- **Throughput / latency** numbers (per-vendor, possibly per-region).
|
||||
- **Modalities matrix** (input image, input audio, input video, input PDF, output image, output audio).
|
||||
- **Weights availability** (closed / open / restricted), license.
|
||||
|
||||
Sources for enrichment: HuggingFace cards, vendor docs, Artificial Analysis, LLM-Stats, official benchmarks. Some can be scraped on a cadence; some needs editorial review.
|
||||
|
||||
### Stage 4: website consumption — NOT STARTED
|
||||
|
||||
The website (`~/dev/website`) consumes the snapshot to render:
|
||||
|
||||
- **Timeline plot**: `pubDate` (x-axis) vs `cbaElo` (y-axis), grouped by vendor - shows the frontier and rate of progress.
|
||||
- **Cost-per-quality plot**: `chatPrice.output` vs `cbaElo` - "best model per dollar".
|
||||
- **Decision helpers**: filter by capability (`interfaces`), context window, pricing tier, vendor.
|
||||
- **Comparison cards**: side-by-side specs.
|
||||
- **Lifecycle alerts**: deprecation warnings for retiring models.
|
||||
|
||||
|
||||
## Open questions
|
||||
|
||||
1. **Where does enrichment data live?** A separate `data/models-enrichment.json` (joined by id at build time) keeps editorial files clean but introduces a join surface. Alternative: extend `ModelDescription_schema` with optional enrichment fields and treat editorial files as the only source. Recommend the separate file approach - editorial files stay focused on vendor-API integration; enrichment evolves on a different cadence.
|
||||
2. **How fresh does the website need to be?** If daily, build the snapshot in CI on push and publish to a static URL. If real-time, consume tRPC directly - more work but fewer freshness gaps.
|
||||
3. **Do we expose `pubDate` and other editorial metadata via tRPC publicly, or only via the snapshot?** The current tRPC routes require auth; the website should consume the snapshot, not live tRPC.
|
||||
4. **Schema versioning** - if `ModelDescription_schema` evolves, the snapshot consumers need to be tolerant. Include a `schemaVersion` field in the snapshot envelope.
|
||||
|
||||
|
||||
## Future extensions to `ModelDescription_schema`
|
||||
|
||||
Beyond `pubDate`, the natural follow-ups (in priority order):
|
||||
|
||||
1. **`knowledgeCutoff?: string`** (`'YYYY-MM'` or `'YYYY-MM-DD'`) - already proposed in `llms.types.next.ts`. Useful for the timeline plot and for context-aware prompts.
|
||||
2. **`deprecationDate?: string`** - currently exists informally as `deprecated?: string` on `_knownGeminiModels`; should be promoted to the schema.
|
||||
3. **`license?: string`** - especially important for open-weights models (apache-2.0, mit, llama-community, custom).
|
||||
4. **`weights?: 'closed' | 'open' | 'restricted'`** - quick filter for "can I run this myself?".
|
||||
5. **`benchmarks?: { mmlu?: number, humaneval?: number, gpqa?: number, ... }`** - richer than the current `cbaElo`-only block.
|
||||
6. **`modalities?: { in: string[], out: string[] }`** - more precise than `interfaces` for input/output capability matrices.
|
||||
@@ -0,0 +1,126 @@
|
||||
# LLM Vendor Integration Guide
|
||||
|
||||
How to add support for new LLM providers in Big-AGI. There are two integration paths, and
|
||||
the dynamic backend path is strongly preferred for new vendors.
|
||||
|
||||
## Integration Paths
|
||||
|
||||
### Path 1: Dynamic Backend (preferred)
|
||||
|
||||
For any provider with an **OpenAI-compatible API** (which is nearly all new providers).
|
||||
|
||||
**Surface area**: 1-2 files, no UI changes, no registry changes.
|
||||
|
||||
A dynamic backend provides:
|
||||
- Hostname-based auto-detection when the user adds the provider's API URL
|
||||
- Automatic model list parsing with vendor-specific metadata (pricing, context windows, capabilities)
|
||||
- Zero UI code - uses the existing "Custom OpenAI-compatible" service setup
|
||||
|
||||
**Files touched**:
|
||||
- `src/modules/llms/server/openai/models/{vendor}.models.ts` (required) - model definitions + hostname heuristic
|
||||
- `src/modules/llms/server/openai/wiretypes/{vendor}.wiretypes.ts` (optional) - Zod schemas for vendor-specific wire format
|
||||
- `src/modules/llms/server/listModels.dispatch.ts` - add heuristic to the detection chain (2 lines)
|
||||
|
||||
**What the model file must export**:
|
||||
```typescript
|
||||
// 1. Hostname heuristic - returns true when the user's API URL matches this vendor
|
||||
export function vendorHeuristic(hostname: string): boolean {
|
||||
return hostname.includes('.vendor-domain.com');
|
||||
}
|
||||
|
||||
// 2. Model converter - transforms vendor's /v1/models response to ModelDescriptionSchema[]
|
||||
export function vendorModelsToModelDescriptions(wireModels: unknown): ModelDescriptionSchema[] {
|
||||
// Parse wire format, map to ModelDescriptionSchema with:
|
||||
// - id, label, description
|
||||
// - contextWindow, maxCompletionTokens
|
||||
// - interfaces (Chat, Vision, Fn, Reasoning, etc.)
|
||||
// - chatPrice (input/output per token)
|
||||
// - parameterSpecs (temperature, etc.)
|
||||
}
|
||||
```
|
||||
|
||||
**Existing examples**: `novita.models.ts`, `chutesai.models.ts`, `fireworksai.models.ts`
|
||||
|
||||
MUST also provide the updated vendor icon like other icons in `src/common/components/icons/vendors/`.
|
||||
Make sure all the information is available if in the future we want to promote those to full registered vendors.
|
||||
|
||||
### Path 2: Registered Vendor (heavyweight, discouraged for new providers)
|
||||
|
||||
Full first-class integration with dedicated UI, own dialect, and registry entry. Reserved for
|
||||
providers with **non-OpenAI protocols** (Anthropic, Gemini, Ollama) or providers with enough
|
||||
user demand to warrant a dedicated setup flow.
|
||||
|
||||
**Surface area**: 5+ files across 3 directories.
|
||||
|
||||
**Files touched**:
|
||||
- `src/modules/llms/vendors/{vendor}/{vendor}.vendor.ts` - IModelVendor implementation
|
||||
- `src/modules/llms/vendors/{vendor}/{VendorName}ServiceSetup.tsx` - React UI setup component
|
||||
- `src/modules/llms/vendors/vendors.registry.ts` - registry entry + ModelVendorId union
|
||||
- `src/modules/llms/server/openai/models/{vendor}.models.ts` - model definitions
|
||||
- `src/modules/llms/server/listModels.dispatch.ts` - dispatch case
|
||||
- Possibly server protocol adapter if not OpenAI-compatible
|
||||
- Possibly more files, e.g. wires, etc.
|
||||
- See existing providers and commits that added them for full scope
|
||||
|
||||
**When to use this path**: Only when the provider has a meaningfully different API protocol
|
||||
(not OpenAI-compatible), or when there is significant user demand AND the provider offers
|
||||
unique capabilities that benefit from dedicated UI (e.g., Ollama's local model management).
|
||||
|
||||
When using this path, please add links to upstream documentation. Make sure all constants
|
||||
are correctly handled everywhere, especially for provider-based switches.
|
||||
|
||||
## Decision Criteria
|
||||
|
||||
| Question | Dynamic | Registered |
|
||||
|----------|---------|------------|
|
||||
| OpenAI-compatible API? | Yes - use dynamic | Only if not OAI-compatible |
|
||||
| Needs custom auth UI? | No - uses generic fields | Yes - custom setup form |
|
||||
| Unique protocol? | No | Yes (Anthropic, Gemini, Ollama) |
|
||||
| User demand level | Any | High + sustained |
|
||||
| Maintenance burden | Minimal | Significant (5+ files) |
|
||||
|
||||
## For External Contributors / Vendor Requests
|
||||
|
||||
When vendors or community members request integration via GitHub issues:
|
||||
|
||||
1. **Point them to the dynamic backend path** - it's faster to implement, review, and maintain
|
||||
2. **Requirements for a dynamic backend PR**:
|
||||
- Model file with heuristic + converter exporting `ModelDescriptionSchema[]`
|
||||
- Wire types if the vendor's `/v1/models` response has non-standard fields
|
||||
- Vendor icon (SVG preferred) in `src/common/components/icons/vendors/`
|
||||
- Two-line addition to the heuristic chain in `listModels.dispatch.ts`
|
||||
3. **Do not accept**: New registered vendors for OpenAI-compatible providers. The maintenance
|
||||
cost of a full vendor (UI component, registry entry, dispatch case) is not justified when
|
||||
dynamic detection achieves the same result with a fraction of the code.
|
||||
|
||||
## Architecture Notes
|
||||
|
||||
### How Dynamic Detection Works
|
||||
|
||||
In `listModels.dispatch.ts`, the `case 'openai':` handler:
|
||||
1. Fetches `/v1/models` from the user-provided API host
|
||||
2. Runs the hostname through a chain of heuristics (in order)
|
||||
3. First matching heuristic's converter is used to parse models
|
||||
4. Falls back to stock OpenAI parsing if no heuristic matches
|
||||
|
||||
### Hostname Security
|
||||
|
||||
Hostname matching uses `llmsHostnameMatches()` from `openai.access.ts` which parses the
|
||||
URL properly to prevent DNS spoofing. Always use `.includes()` on the parsed hostname,
|
||||
never on the raw URL string.
|
||||
|
||||
### Key Types
|
||||
|
||||
- `ModelDescriptionSchema` (`llm.server.types.ts`) - output type for all model converters
|
||||
- `DModelInterfaceV1` (`llms.types.ts`) - capability flags (Chat, Vision, Fn, Reasoning, etc.)
|
||||
- `IModelVendor` (`vendors/IModelVendor.ts`) - interface for registered vendors only
|
||||
- `ManualMappings` / `KnownModel` (`models.mappings.ts`) - server-side model patches
|
||||
|
||||
### File Locations
|
||||
|
||||
- Dynamic backends: `src/modules/llms/server/openai/models/`
|
||||
- Wire types: `src/modules/llms/server/openai/wiretypes/`
|
||||
- Dispatch: `src/modules/llms/server/listModels.dispatch.ts`
|
||||
- Registered vendors: `src/modules/llms/vendors/*/`
|
||||
- Vendor icons: `src/common/components/icons/vendors/`
|
||||
- Type definitions: `src/modules/llms/server/llm.server.types.ts`
|
||||
@@ -13,12 +13,9 @@ The LLM parameters system operates across five layers that transform parameters
|
||||
|
||||
The `DModelParameterRegistry` defines all available parameters with their constraints and metadata. Each parameter includes type information, validation rules, and default behavior.
|
||||
|
||||
**Example**: `llmVndOaiReasoningEffort4` defines a 4-value enum with 'medium' as the required fallback.
|
||||
|
||||
**Default Value System**: The registry supports multiple default mechanisms:
|
||||
- `initialValue` - Parameter's base default (e.g., `llmVndOaiRestoreMarkdown: true`)
|
||||
- `requiredFallback` - Fallback for required parameters (e.g., `llmTemperature: 0.5`)
|
||||
- `nullable` - Parameters that can be explicitly null to skip API transmission
|
||||
- `initialValue` - Parameter's base default (e.g., `llmVndOaiRestoreMarkdown: true`)
|
||||
|
||||
### Layer 2: Model Specifications
|
||||
**File**: `src/modules/llms/server/llm.server.types.ts`
|
||||
@@ -27,7 +24,6 @@ Models declare which parameters they support through `parameterSpecs` arrays. Ea
|
||||
|
||||
```typescript
|
||||
parameterSpecs: [
|
||||
{ paramId: 'llmVndOaiReasoningEffort4' },
|
||||
{ paramId: 'llmVndAntThinkingBudget', initialValue: 1024 }, // Override default
|
||||
{ paramId: 'llmVndGeminiThinkingBudget', rangeOverride: [0, 8192] }, // Custom range
|
||||
]
|
||||
@@ -51,20 +47,14 @@ Shows only parameters that are:
|
||||
- Not marked as `hidden`
|
||||
|
||||
**Value Resolution**: Both UIs use `getAllModelParameterValues()` to merge:
|
||||
1. **Fallback values** - Required parameters get their `requiredFallback` values
|
||||
1. **Fallback values** - Implicit parameters get their `LLMImplicitParametersRuntimeFallback` values
|
||||
2. **Initial values** - Model's `initialParameters` (populated during model creation)
|
||||
3. **User values** - User's `userParameters` (highest priority)
|
||||
|
||||
### Layer 4: AIX Translation
|
||||
**File**: `src/modules/aix/client/aix.client.ts`
|
||||
|
||||
The AIX client transforms DLLM parameters to wire protocol format. This layer handles parameter precedence rules and name transformations:
|
||||
|
||||
```
|
||||
// Parameter precedence: newer 4-value version takes priority over 3-value
|
||||
...((llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort) ?
|
||||
{ vndOaiReasoningEffort: llmVndOaiReasoningEffort4 || llmVndOaiReasoningEffort } : {})
|
||||
```
|
||||
The AIX client transforms DLLM parameters to wire protocol format. This layer handles parameter precedence rules and name transformations.
|
||||
|
||||
**Client Options**: The system supports parameter overrides through `llmOptionsOverride` and complete replacement via `llmUserParametersReplacement`.
|
||||
|
||||
@@ -73,7 +63,7 @@ The AIX client transforms DLLM parameters to wire protocol format. This layer ha
|
||||
|
||||
Server-side adapters translate AIX parameters to vendor APIs. Each vendor may interpret parameters differently:
|
||||
|
||||
- **OpenAI**: `vndOaiReasoningEffort` → `reasoning_effort`
|
||||
- **OpenAI**: `vndEffort` -> `reasoning_effort`
|
||||
- **Perplexity**: Reuses OpenAI parameter format
|
||||
- **OpenAI Responses API**: Maps to structured reasoning config with additional logic
|
||||
|
||||
@@ -81,8 +71,8 @@ Server-side adapters translate AIX parameters to vendor APIs. Each vendor may in
|
||||
|
||||
When a model is loaded:
|
||||
|
||||
1. **Model Creation**: `modelDescriptionToDLLM()` creates the DLLM with empty `initialParameters`
|
||||
2. **Initial Value Application**: `applyModelParameterInitialValues()` populates initial values from:
|
||||
1. **Model Creation**: `_createDLLMFromModelDescription()` creates the DLLM with empty `initialParameters`
|
||||
2. **Initial Value Application**: `applyModelParameterSpecsInitialValues()` populates initial values from:
|
||||
- Model spec `initialValue` (highest priority)
|
||||
- Registry `initialValue` (fallback)
|
||||
3. **Runtime Resolution**: `getAllModelParameterValues()` creates final parameter set:
|
||||
@@ -105,7 +95,7 @@ When a model is loaded:
|
||||
The system maintains type safety through:
|
||||
- `DModelParameterId` union from registry keys
|
||||
- `DModelParameterValue<T>` conditional types for values
|
||||
- `DModelParameterSpec<T>` interfaces for specifications
|
||||
- `DModelParameterSpecAny` interfaces for specifications
|
||||
- Runtime validation via Zod schemas at API boundaries
|
||||
|
||||
## Model Variant Pattern
|
||||
@@ -117,7 +107,6 @@ Some vendors use model variants to enable features, for instance:
|
||||
## Migration and Compatibility
|
||||
|
||||
The architecture supports parameter evolution:
|
||||
- **Version Coexistence**: Both `llmVndOaiReasoningEffort` and `llmVndOaiReasoningEffort4` exist simultaneously
|
||||
- **Precedence Rules**: Newer parameters take priority during AIX translation
|
||||
- **Graceful Degradation**: Unknown parameters log warnings but don't break functionality
|
||||
|
||||
@@ -128,4 +117,4 @@ The architecture supports parameter evolution:
|
||||
- **UI Controls**: `src/modules/llms/models-modal/LLMParametersEditor.tsx`
|
||||
- **AIX Translation**: `src/modules/aix/client/aix.client.ts`
|
||||
- **Wire Types**: `src/modules/aix/server/api/aix.wiretypes.ts`
|
||||
- **Vendor Adapters**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
- **Vendor Adapters**: `src/modules/aix/server/dispatch/chatGenerate/adapters/*.ts`
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
# CSF - Client-Side Fetch
|
||||
|
||||
Client-Side Fetch (CSF), surfaced to users as **"Direct Connection"**, enables direct browser-to-API communication, bypassing the server for LLM requests. When enabled, the browser makes requests directly to vendor APIs (e.g., `api.openai.com`, `api.groq.com`) instead of routing through the Next.js server. This reduces latency, decreases server load, and is particularly useful for local models where the browser can communicate directly with Ollama or LM Studio.
|
||||
|
||||
## User-facing tradeoffs (Direct Connection vs via-server)
|
||||
|
||||
Wins when Direct Connection is on:
|
||||
- **No 4.5MB upload limit** (Vercel body-size cap does not apply to direct browser-to-API requests).
|
||||
- **No 300s function timeout** (Vercel serverless/edge timeout does not apply; call duration is bound only by the AI service).
|
||||
- **More privacy**: connection metadata (IP, timestamp, edge region, Vercel telemetry) is not observable by the Big-AGI edge server.
|
||||
|
||||
Costs:
|
||||
- **Slightly more downlink bandwidth**: when traffic passes through the Big-AGI server, repetitive streaming frames are shed/compacted; direct streams arrive verbatim.
|
||||
|
||||
Availability requires both:
|
||||
1. The API key is on the **client** (localStorage), not a server-side env var. Server-key deployments cannot use CSF because the browser has no credential to send.
|
||||
2. The AI service **allows CORS** from browsers. Most major providers do; some require specific headers which Big-AGI sets.
|
||||
|
||||
Net: Direct Connection is a win on speed, limits, and privacy whenever the provider permits it. It is unavailable when keys are server-side or the provider blocks browser-origin requests.
|
||||
|
||||
## Implementation
|
||||
|
||||
CSF is implemented as an opt-in setting stored as `csf: boolean` in each vendor's service settings. The vendor interface exposes `csfAvailable?: (setup) => boolean` to determine if CSF can be enabled (typically checking if an API key or host is configured). The actual execution happens in `aix.client.direct-chatGenerate.ts` which dynamically imports when CSF is active, making direct fetch calls using the same wire protocols as the server.
|
||||
|
||||
All 20+ supported vendors (OpenAI, Anthropic, Gemini, Ollama, LocalAI, Deepseek, Groq, Mistral, xAI, OpenRouter, Perplexity, Together AI, Alibaba, Moonshot, OpenPipe, LM Studio, Z.ai, Azure, Bedrock) support CSF. Cloud vendors require CORS support from the API provider (all tested vendors return `access-control-allow-origin: *`). Local vendors (Ollama, LocalAI, LM Studio) require CORS to be enabled on the local server.
|
||||
|
||||
## UI
|
||||
|
||||
The CSF toggle appears in each vendor's setup panel under "Advanced" settings, labeled "Direct Connection". It becomes visible when the prerequisites are met (API key present for cloud vendors, host configured for local vendors). The setting is managed through `useModelServiceClientSideFetch` hook which provides `csfAvailable`, `csfActive`, `csfToggle`, and `csfReset` for UI consumption.
|
||||
@@ -0,0 +1,3 @@
|
||||
## Strategic Vision
|
||||
|
||||
If provided, the following influences the long-term vision, product and architectural goals/north stars for Big-AGI.
|
||||
+3
-3
@@ -18,7 +18,7 @@ process.env.NEXT_PUBLIC_BUILD_HASH = (buildHash || '').slice(0, 10);
|
||||
process.env.NEXT_PUBLIC_BUILD_PKGVER = JSON.parse('' + readFileSync(new URL('./package.json', import.meta.url))).version;
|
||||
process.env.NEXT_PUBLIC_BUILD_TIMESTAMP = new Date().toISOString();
|
||||
process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE = process.env.NEXT_PUBLIC_DEPLOYMENT_TYPE || (process.env.VERCEL_ENV ? `vercel-${process.env.VERCEL_ENV}` : 'local'); // Docker or custom, Vercel
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH})`);
|
||||
console.log(` 🧠 \x1b[1mbig-AGI\x1b[0m v${process.env.NEXT_PUBLIC_BUILD_PKGVER} (@${process.env.NEXT_PUBLIC_BUILD_HASH}${process.env.VERCEL_ENV ? `, \x1b[2mV:\x1b[0m${process.env.VERCEL_ENV}` : ''}, \x1b[2mN:\x1b[0m${process.env.NODE_ENV})`);
|
||||
|
||||
// Non-default build types
|
||||
const buildType =
|
||||
@@ -30,7 +30,7 @@ buildType && console.log(` 🧠 big-AGI: building for ${buildType}...\n`);
|
||||
|
||||
/** @type {import('next').NextConfig} */
|
||||
let nextConfig: NextConfig = {
|
||||
reactStrictMode: true,
|
||||
reactStrictMode: !process.env.NO_STRICT_MODE, // default: enabled
|
||||
|
||||
// [exports] https://nextjs.org/docs/advanced-features/static-html-export
|
||||
...(buildType && {
|
||||
@@ -141,7 +141,7 @@ if (process.env.POSTHOG_API_KEY && process.env.POSTHOG_ENV_ID) {
|
||||
personalApiKey: process.env.POSTHOG_API_KEY,
|
||||
envId: process.env.POSTHOG_ENV_ID,
|
||||
host: 'https://us.i.posthog.com', // backtrace upload host
|
||||
verbose: false,
|
||||
logLevel: 'error', // lowered, too noisy
|
||||
sourcemaps: {
|
||||
enabled: process.env.NODE_ENV === 'production',
|
||||
project: 'big-agi',
|
||||
|
||||
Generated
+2702
-1299
File diff suppressed because it is too large
Load Diff
+37
-30
@@ -1,8 +1,9 @@
|
||||
{
|
||||
"name": "big-agi",
|
||||
"version": "2.0.0",
|
||||
"version": "2.0.4",
|
||||
"private": true,
|
||||
"author": "Enrico Ros <enrico.ros@gmail.com>",
|
||||
"author": "Enrico Ros <enrico@big-agi.com> (https://www.enricoros.com)",
|
||||
"homepage": "https://big-agi.com",
|
||||
"repository": "https://github.com/enricoros/big-agi",
|
||||
"scripts": {
|
||||
"dev": "next dev --turbopack",
|
||||
@@ -11,10 +12,13 @@
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"tsclint": "tsc --noEmit --pretty",
|
||||
"postinstall": "prisma generate --no-hints",
|
||||
"gen:icon-sprites": "node tools/develop/gen-icon-sprites/generate-llm-sprites.ts",
|
||||
"db:push": "prisma db push",
|
||||
"db:studio": "prisma studio",
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local"
|
||||
"vercel:env:pull": "npx vercel env pull .env.development.local",
|
||||
"sharp:win32_x64": "npm install --os=win32 --cpu=x64 sharp"
|
||||
},
|
||||
"prisma": {
|
||||
"schema": "src/server/prisma/schema.prisma"
|
||||
@@ -28,38 +32,40 @@
|
||||
"@emotion/react": "^11.14.0",
|
||||
"@emotion/server": "^11.11.0",
|
||||
"@emotion/styled": "^11.14.1",
|
||||
"@googleworkspace/drive-picker-react": "^0.2.0",
|
||||
"@mui/icons-material": "^5.18.0",
|
||||
"@mui/joy": "^5.0.0-beta.52",
|
||||
"@next/bundle-analyzer": "~15.1.8",
|
||||
"@next/bundle-analyzer": "~15.1.12",
|
||||
"@prisma/client": "~5.22.0",
|
||||
"@tanstack/react-query": "5.90.3",
|
||||
"@tanstack/react-virtual": "^3.13.12",
|
||||
"@tanstack/react-query": "5.90.21",
|
||||
"@tanstack/react-virtual": "^3.13.22",
|
||||
"@trpc/client": "11.5.1",
|
||||
"@trpc/next": "11.5.1",
|
||||
"@trpc/react-query": "11.5.1",
|
||||
"@trpc/server": "11.5.1",
|
||||
"@vercel/analytics": "^1.5.0",
|
||||
"@vercel/speed-insights": "^1.2.0",
|
||||
"@vercel/analytics": "^1.6.1",
|
||||
"@vercel/speed-insights": "^1.3.1",
|
||||
"aws4fetch": "^1.0.20",
|
||||
"browser-fs-access": "^0.38.0",
|
||||
"cheerio": "^1.1.2",
|
||||
"csv-stringify": "^6.6.0",
|
||||
"dexie": "^4.0.11",
|
||||
"dexie-react-hooks": "^1.1.7",
|
||||
"diff": "^8.0.2",
|
||||
"eventemitter3": "^5.0.1",
|
||||
"dexie": "~4.0.11",
|
||||
"dexie-react-hooks": "~1.1.7",
|
||||
"diff": "^8.0.3",
|
||||
"eventemitter3": "^5.0.4",
|
||||
"idb-keyval": "^6.2.2",
|
||||
"mammoth": "^1.11.0",
|
||||
"nanoid": "^5.1.6",
|
||||
"next": "~15.1.8",
|
||||
"next": "~15.1.12",
|
||||
"nprogress": "^0.2.0",
|
||||
"pdfjs-dist": "5.4.54",
|
||||
"posthog-js": "^1.297.0",
|
||||
"posthog-node": "^5.13.0",
|
||||
"posthog-js": "^1.369.0",
|
||||
"posthog-node": "^5.29.2",
|
||||
"prismjs": "^1.30.0",
|
||||
"puppeteer-core": "^24.30.0",
|
||||
"puppeteer-core": "^24.40.0",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-hook-form": "^7.66.1",
|
||||
"react-hook-form": "^7.71.2",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-player": "^3.4.0",
|
||||
"react-resizable-panels": "^3.0.6",
|
||||
@@ -68,31 +74,32 @@
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-mark-highlight": "^0.1.1",
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.33.5",
|
||||
"superjson": "^2.2.5",
|
||||
"tesseract.js": "^6.0.1",
|
||||
"sharp": "^0.34.5",
|
||||
"superjson": "^2.2.6",
|
||||
"tesseract.js": "^7.0.0",
|
||||
"tiktoken": "^1.0.22",
|
||||
"turndown": "^7.2.2",
|
||||
"zod": "^4.1.12",
|
||||
"zod": "^4.3.6",
|
||||
"zustand": "5.0.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@posthog/nextjs-config": "1.3.2",
|
||||
"@types/node": "^24.10.1",
|
||||
"@posthog/nextjs-config": "~1.6.4",
|
||||
"@types/node": "^25.6.0",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/prismjs": "^1.26.5",
|
||||
"@types/react": "^19.2.6",
|
||||
"@types/prismjs": "^1.26.6",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-csv": "^1.1.10",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@types/turndown": "^5.0.6",
|
||||
"cross-env": "^10.1.0",
|
||||
"eslint": "^9.39.1",
|
||||
"eslint-config-next": "~15.1.8",
|
||||
"prettier": "^3.6.2",
|
||||
"eslint": "^9.39.4",
|
||||
"eslint-config-next": "~15.1.12",
|
||||
"prettier": "^3.8.2",
|
||||
"prisma": "~5.22.0",
|
||||
"typescript": "^5.9.3"
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^6.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^26.0.0 || ^24.0.0 || ^22.0.0 || ^20.0.0"
|
||||
"node": "^24.0.0 || ^22.0.0 || ^20.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
+20
-3
@@ -37,14 +37,31 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
|
||||
<meta property='og:site_name' content={Brand.Meta.SiteName} />
|
||||
<meta property='og:type' content='website' />
|
||||
|
||||
{/* Twitter */}
|
||||
<meta property='twitter:card' content='summary_large_image' />
|
||||
{/* Twitter / X */}
|
||||
<meta name='twitter:card' content='summary_large_image' />
|
||||
<meta property='twitter:url' content={Brand.URIs.Home} />
|
||||
<meta property='twitter:title' content={Brand.Title.Common} />
|
||||
<meta property='twitter:description' content={Brand.Meta.Description} />
|
||||
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
|
||||
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
|
||||
<meta name='twitter:card' content='summary_large_image' />
|
||||
<meta name='twitter:creator' content='@enricoros' />
|
||||
<link rel='canonical' href={Brand.URIs.Home} />
|
||||
|
||||
{/* Author & Structured Data */}
|
||||
<meta name='author' content='Enrico Ros' />
|
||||
<link rel='author' href='https://www.enricoros.com' />
|
||||
<script type='application/ld+json' dangerouslySetInnerHTML={{ __html: JSON.stringify({
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'SoftwareApplication',
|
||||
'name': 'Big-AGI',
|
||||
'url': 'https://big-agi.com',
|
||||
'applicationCategory': 'ProductivityApplication',
|
||||
'operatingSystem': 'All, Web',
|
||||
'description': Brand.Meta.Description,
|
||||
'sameAs': ['https://github.com/enricoros/big-agi', 'https://discord.gg/MkH4qj2Jp9',],
|
||||
'author': { '@type': 'Person', 'name': 'Enrico Ros', 'url': 'https://www.enricoros.com' },
|
||||
'publisher': { '@type': 'Organization', 'name': 'Token Fabrics LLC', 'url': 'https://www.tokenfabrics.com' },
|
||||
}) }} />
|
||||
|
||||
{/* Style Sheets (injected and server-side) */}
|
||||
<meta name='emotion-insertion-point' content='' />
|
||||
|
||||
@@ -18,7 +18,7 @@ import { ROUTE_APP_CHAT, ROUTE_INDEX } from '~/common/app.routes';
|
||||
import { Release } from '~/common/app.release';
|
||||
|
||||
// capabilities access
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityTextToImage } from '~/common/components/useCapabilities';
|
||||
|
||||
// stores access
|
||||
import { getLLMsDebugInfo } from '~/common/stores/llms/store-llms';
|
||||
@@ -95,7 +95,6 @@ function AppDebug() {
|
||||
const cProduct = {
|
||||
capabilities: {
|
||||
mic: useCapabilityBrowserSpeechRecognition(),
|
||||
elevenLabs: useCapabilityElevenLabs(),
|
||||
textToImage: useCapabilityTextToImage(),
|
||||
},
|
||||
models: getLLMsDebugInfo(),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"short_name": "big-AGI",
|
||||
"theme_color": "#32383E",
|
||||
"background_color": "#9FA6AD",
|
||||
"description": "Your Generative AI Suite",
|
||||
"description": "Open-source AI workspace. Multi-model reasoning and personas for maximum control.",
|
||||
"categories": [
|
||||
"productivity",
|
||||
"AI",
|
||||
|
||||
@@ -6,13 +6,15 @@ import ChatIcon from '@mui/icons-material/Chat';
|
||||
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import { useSpeexGlobalEngine } from '~/modules/speex/store-module-speex';
|
||||
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { animationColorRainbow } from '~/common/util/animUtils';
|
||||
import { navigateBack } from '~/common/app.routes';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useCapabilityBrowserSpeechRecognition, useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useCapabilityBrowserSpeechRecognition } from '~/common/components/useCapabilities';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useUICounter } from '~/common/stores/store-ui';
|
||||
|
||||
@@ -45,7 +47,7 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// external state
|
||||
const recognition = useCapabilityBrowserSpeechRecognition();
|
||||
const synthesis = useCapabilityElevenLabs();
|
||||
const speexGlobalEngine = useSpeexGlobalEngine();
|
||||
const chatIsEmpty = useChatStore(state => {
|
||||
if (!props.conversationId)
|
||||
return false;
|
||||
@@ -56,17 +58,18 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
// derived state
|
||||
const outOfTheBlue = !props.conversationId;
|
||||
const overriddenEmptyChat = chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenEmptyChat = outOfTheBlue || chatEmptyOverride || !chatIsEmpty;
|
||||
const overriddenRecognition = recognitionOverride || recognition.mayWork;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesis.mayWork;
|
||||
const fatalGood = overriddenRecognition && synthesis.mayWork;
|
||||
const synthesisShallWork = !!speexGlobalEngine;
|
||||
const allGood = overriddenEmptyChat && overriddenRecognition && synthesisShallWork;
|
||||
const fatalGood = overriddenRecognition && synthesisShallWork;
|
||||
|
||||
|
||||
const handleOverrideChatEmpty = React.useCallback(() => setChatEmptyOverride(true), []);
|
||||
|
||||
const handleOverrideRecognition = React.useCallback(() => setRecognitionOverride(true), []);
|
||||
|
||||
const handleConfigureElevenLabs = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
const handleConfigureVoice = React.useCallback(() => optimaOpenPreferences('voice'), []);
|
||||
|
||||
const handleFinishButton = React.useCallback(() => {
|
||||
if (!allGood)
|
||||
@@ -128,17 +131,17 @@ export function CallWizard(props: { strict?: boolean, conversationId: string | n
|
||||
|
||||
{/* Text to Speech status */}
|
||||
<StatusCard
|
||||
icon={<RecordVoiceOverTwoToneIcon />}
|
||||
icon={<PhVoice />}
|
||||
text={
|
||||
(synthesis.mayWork ? 'Voice synthesis should be ready.' : 'There might be an issue with ElevenLabs voice synthesis.')
|
||||
+ (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
(synthesisShallWork ? 'Voice synthesis should be ready.' : 'There might be an issue with voice synthesis.')
|
||||
// + (synthesis.isConfiguredServerSide ? '' : (synthesis.isConfiguredClientSide ? '' : ' Please add your API key in the settings.'))
|
||||
}
|
||||
button={synthesis.mayWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureElevenLabs} sx={{ mx: 1 }}>
|
||||
button={synthesisShallWork ? undefined : (
|
||||
<Button variant='outlined' onClick={handleConfigureVoice} sx={{ mx: 1 }}>
|
||||
Configure
|
||||
</Button>
|
||||
)}
|
||||
hasIssue={!synthesis.mayWork}
|
||||
hasIssue={!synthesisShallWork}
|
||||
/>
|
||||
|
||||
{/*<Typography>*/}
|
||||
|
||||
@@ -317,7 +317,7 @@ export function Contacts(props: { setCallIntent: (intent: AppCallIntent) => void
|
||||
issue={354}
|
||||
text='Call App: Support thread and compatibility matrix'
|
||||
note={<>
|
||||
Voice input uses the HTML Web Speech API, and speech output requires an ElevenLabs API Key.
|
||||
Voice input uses the HTML Web Speech API.
|
||||
</>}
|
||||
// note2='Please report any issues you encounter'
|
||||
sx={{
|
||||
|
||||
+23
-36
@@ -7,16 +7,15 @@ import CallEndIcon from '@mui/icons-material/CallEnd';
|
||||
import CallIcon from '@mui/icons-material/Call';
|
||||
import MicIcon from '@mui/icons-material/Mic';
|
||||
import MicNoneIcon from '@mui/icons-material/MicNone';
|
||||
import RecordVoiceOverTwoToneIcon from '@mui/icons-material/RecordVoiceOverTwoTone';
|
||||
|
||||
import { ScrollToBottom } from '~/common/scroll-to-bottom/ScrollToBottom';
|
||||
import { ScrollToBottomButton } from '~/common/scroll-to-bottom/ScrollToBottomButton';
|
||||
import { useChatLLMDropdown } from '../chat/components/layout-bar/useLLMDropdown';
|
||||
|
||||
import { SystemPurposeId, SystemPurposes } from '../../data';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { AixChatGenerateContent_DMessageGuts, aixChatGenerateContent_DMessage_FromConversation } from '~/modules/aix/client/aix.client';
|
||||
import { useElevenLabsVoiceDropdown } from '~/modules/elevenlabs/useElevenLabsVoiceDropdown';
|
||||
|
||||
import { aixChatGenerateContent_DMessage_FromConversation, AixChatGenerateContent_DMessageGuts } from '~/modules/aix/client/aix.client';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { OptimaBarControlMethods } from '~/common/layout/optima/bar/OptimaBarDropdown';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
@@ -24,13 +23,14 @@ import { Link } from '~/common/components/Link';
|
||||
import { OptimaPanelGroupedList } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { OptimaPanelIn, OptimaToolbarIn } from '~/common/layout/optima/portals/OptimaPortalsIn';
|
||||
import { SpeechResult, useSpeechRecognition } from '~/common/components/speechrecognition/useSpeechRecognition';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { conversationTitle, remapMessagesSysToUsr } from '~/common/stores/chat/chat.conversation';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, messageFragmentsReduceText, messageWasInterruptedAtStart } from '~/common/stores/chat/chat.message';
|
||||
import { createErrorContentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { launchAppChat, navigateToIndex } from '~/common/app.routes';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { usePlayUrl } from '~/common/util/audio/usePlayUrl';
|
||||
import { usePlayUrlInterval } from './state/usePlayUrlInterval';
|
||||
|
||||
import type { AppCallIntent } from './AppCall';
|
||||
import { CallAvatar } from './components/CallAvatar';
|
||||
@@ -43,18 +43,13 @@ import { useAppCallStore } from './state/store-app-call';
|
||||
function CallMenu(props: {
|
||||
pushToTalk: boolean,
|
||||
setPushToTalk: (pushToTalk: boolean) => void,
|
||||
override: boolean,
|
||||
setOverride: (overridePersonaVoice: boolean) => void,
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const { grayUI, toggleGrayUI } = useAppCallStore();
|
||||
const { voicesDropdown } = useElevenLabsVoiceDropdown(false, !props.override);
|
||||
|
||||
const handlePushToTalkToggle = () => props.setPushToTalk(!props.pushToTalk);
|
||||
|
||||
const handleChangeVoiceToggle = () => props.setOverride(!props.override);
|
||||
|
||||
return <OptimaPanelGroupedList title='Call'>
|
||||
|
||||
<MenuItem onClick={handlePushToTalkToggle}>
|
||||
@@ -63,17 +58,6 @@ function CallMenu(props: {
|
||||
<Switch checked={props.pushToTalk} onChange={handlePushToTalkToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem onClick={handleChangeVoiceToggle}>
|
||||
<ListItemDecorator><RecordVoiceOverTwoToneIcon /></ListItemDecorator>
|
||||
Change Voice
|
||||
<Switch checked={props.override} onChange={handleChangeVoiceToggle} sx={{ ml: 'auto' }} />
|
||||
</MenuItem>
|
||||
|
||||
<MenuItem>
|
||||
<ListItemDecorator>{' '}</ListItemDecorator>
|
||||
{voicesDropdown}
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
|
||||
<MenuItem onClick={toggleGrayUI}>
|
||||
@@ -98,7 +82,6 @@ export function Telephone(props: {
|
||||
const [avatarClickCount, setAvatarClickCount] = React.useState<number>(0);// const [micMuted, setMicMuted] = React.useState(false);
|
||||
const [callElapsedTime, setCallElapsedTime] = React.useState<string>('00:00');
|
||||
const [callMessages, setCallMessages] = React.useState<DMessage[]>([]);
|
||||
const [overridePersonaVoice, setOverridePersonaVoice] = React.useState<boolean>(false);
|
||||
const [personaTextInterim, setPersonaTextInterim] = React.useState<string | null>(null);
|
||||
const [pushToTalk, setPushToTalk] = React.useState(true);
|
||||
const [stage, setStage] = React.useState<'ring' | 'declined' | 'connected' | 'ended'>('ring');
|
||||
@@ -118,7 +101,7 @@ export function Telephone(props: {
|
||||
}));
|
||||
const persona = SystemPurposes[props.callIntent.personaId as SystemPurposeId] ?? undefined;
|
||||
const personaCallStarters = persona?.call?.starters ?? undefined;
|
||||
const personaVoiceId = overridePersonaVoice ? undefined : (persona?.voices?.elevenLabs?.voiceId ?? undefined);
|
||||
// const personaVoiceSelector = React.useMemo(() => personaGetVoiceSelector(persona), [persona]);
|
||||
const personaSystemMessage = persona?.systemMessage ?? undefined;
|
||||
|
||||
// hooks and speech
|
||||
@@ -144,11 +127,11 @@ export function Telephone(props: {
|
||||
|
||||
// pickup / hangup
|
||||
React.useEffect(() => {
|
||||
!isRinging && AudioPlayer.playUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3');
|
||||
!isRinging && void AudioPlayer.playUrl(isConnected ? '/sounds/chat-begin.mp3' : '/sounds/chat-end.mp3').catch(() => {/* autoplay may be blocked */});
|
||||
}, [isRinging, isConnected]);
|
||||
|
||||
// ringtone
|
||||
usePlayUrl(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
usePlayUrlInterval(isRinging ? '/sounds/chat-ringtone.mp3' : null, 300, 2800 * 2);
|
||||
|
||||
|
||||
/// Shortcuts
|
||||
@@ -165,7 +148,6 @@ export function Telephone(props: {
|
||||
};
|
||||
|
||||
// [E] pickup -> seed message and call timer
|
||||
// FIXME: Overriding the voice will reset the call - not a desired behavior
|
||||
React.useEffect(() => {
|
||||
if (!isConnected) return;
|
||||
|
||||
@@ -185,11 +167,14 @@ export function Telephone(props: {
|
||||
|
||||
setCallMessages([createDMessageTextContent('assistant', firstMessage)]); // [state] set assistant:hello message
|
||||
|
||||
// fire/forget
|
||||
void elevenLabsSpeakText(firstMessage, personaVoiceId, true, true);
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
void speakText(firstMessage,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isConnected, personaCallStarters, personaVoiceId]);
|
||||
}, [isConnected, personaCallStarters]);
|
||||
|
||||
// [E] persona streaming response - upon new user message
|
||||
React.useEffect(() => {
|
||||
@@ -265,14 +250,17 @@ export function Telephone(props: {
|
||||
if (messageWasInterruptedAtStart(status.lastDMessage))
|
||||
return;
|
||||
|
||||
// whether status.outcome === 'success' or not, we get a valid DMessage, eventually with Error Fragments inside
|
||||
// whether status.outcome === 'completed' or not, we get a valid DMessage, eventually with Error Fragments inside
|
||||
const fullMessage = createDMessageFromFragments('assistant', status.lastDMessage.fragments);
|
||||
fullMessage.generator = status.lastDMessage.generator;
|
||||
setCallMessages(messages => [...messages, fullMessage]); // [state] append assistant:call_response
|
||||
|
||||
// fire/forget
|
||||
if (status.outcome === 'success' && finalText?.length >= 1)
|
||||
void elevenLabsSpeakText(finalText, personaVoiceId, true, true);
|
||||
// fire/forget - use 'fast' priority for real-time conversation
|
||||
if (status.outcome === 'completed' && finalText?.length >= 1)
|
||||
void speakText(finalText,
|
||||
undefined,
|
||||
{ label: 'Call', priority: 'fast' },
|
||||
);
|
||||
|
||||
}).catch((err: DOMException) => {
|
||||
if (err?.name !== 'AbortError') {
|
||||
@@ -288,7 +276,7 @@ export function Telephone(props: {
|
||||
responseAbortController.current?.abort();
|
||||
responseAbortController.current = null;
|
||||
};
|
||||
}, [isConnected, callMessages, modelId, personaVoiceId, personaSystemMessage, reMessages]);
|
||||
}, [callMessages, isConnected, modelId, personaSystemMessage, reMessages]);
|
||||
|
||||
// [E] Message interrupter
|
||||
const abortTrigger = isConnected && recognitionState.hasSpeech;
|
||||
@@ -325,7 +313,6 @@ export function Telephone(props: {
|
||||
<OptimaPanelIn>
|
||||
<CallMenu
|
||||
pushToTalk={pushToTalk} setPushToTalk={setPushToTalk}
|
||||
override={overridePersonaVoice} setOverride={setOverridePersonaVoice}
|
||||
/>
|
||||
</OptimaPanelIn>
|
||||
|
||||
@@ -373,7 +360,7 @@ export function Telephone(props: {
|
||||
|
||||
<ScrollToBottom stickToBottomInitial>
|
||||
|
||||
<Box sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
<Box onCopy={clipboardInterceptCtrlCForCleanup} sx={{ minHeight: '100%', p: 1, display: 'flex', flexDirection: 'column', gap: 1 }}>
|
||||
|
||||
{/* Call Messages [] */}
|
||||
{callMessages.map((message) =>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
|
||||
|
||||
@@ -8,15 +9,16 @@ import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
* @param firstDelay The delay before the first play, in milliseconds.
|
||||
* @param repeatMs The delay between each repeat, in milliseconds. If 0, the sound will only play once.
|
||||
*/
|
||||
export function usePlayUrl(url: string | null, firstDelay: number = 0, repeatMs: number = 0) {
|
||||
export function usePlayUrlInterval(url: string | null, firstDelay: number = 0, repeatMs: number = 0) {
|
||||
React.useEffect(() => {
|
||||
if (!url) return;
|
||||
|
||||
const abortController = new AbortController();
|
||||
let timer2: any = null;
|
||||
|
||||
const playFirstTime = () => {
|
||||
const playAudio = () => AudioPlayer.playUrl(url);
|
||||
void playAudio();
|
||||
const playAudio = () => void AudioPlayer.playUrl(url, abortController.signal).catch(() => {/* autoplay may be blocked */});
|
||||
playAudio();
|
||||
timer2 = repeatMs > 0 ? setInterval(playAudio, repeatMs) : null;
|
||||
};
|
||||
|
||||
@@ -24,8 +26,8 @@ export function usePlayUrl(url: string | null, firstDelay: number = 0, repeatMs:
|
||||
|
||||
return () => {
|
||||
clearTimeout(timer1);
|
||||
if (timer2)
|
||||
clearInterval(timer2);
|
||||
timer2 && clearInterval(timer2);
|
||||
abortController?.abort();
|
||||
};
|
||||
}, [firstDelay, repeatMs, url]);
|
||||
}
|
||||
+12
-21
@@ -4,13 +4,10 @@ import { Panel, PanelGroup, PanelResizeHandle } from 'react-resizable-panels';
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, useTheme } from '@mui/joy';
|
||||
|
||||
import { DEV_MODE_SETTINGS } from '../settings-modal/UxLabsSettings';
|
||||
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import type { TradeConfig } from '~/modules/trade/TradeModal';
|
||||
import { downloadSingleChat, importConversationsFromFilesAtRest, openConversationsAtRestPicker } from '~/modules/trade/trade.client';
|
||||
import { imaginePromptFromTextOrThrow } from '~/modules/aifn/imagine/imaginePromptFromText';
|
||||
import { elevenLabsSpeakText } from '~/modules/elevenlabs/elevenlabs.client';
|
||||
import { useAreBeamsOpen } from '~/modules/beam/store-beam.hooks';
|
||||
import { useCapabilityTextToImage } from '~/modules/t2i/t2i.client';
|
||||
|
||||
@@ -33,7 +30,7 @@ import { createErrorContentFragment, createTextContentFragment, DMessageAttachme
|
||||
import { gcChatImageAssets } from '~/common/stores/chat/chat.gc';
|
||||
import { getChatLLMId } from '~/common/stores/llms/store-llms';
|
||||
import { getConversation, getConversationSystemPurposeId, useConversation } from '~/common/stores/chat/store-chats';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { optimaActions, optimaOpenModels, optimaOpenPreferences, useOptimaChromeless } from '~/common/layout/optima/useOptima';
|
||||
import { useFolderStore } from '~/common/stores/folders/store-chat-folders';
|
||||
import { useIsMobile, useIsTallScreen } from '~/common/components/useMatchMedia';
|
||||
import { useLLM } from '~/common/stores/llms/llms.hooks';
|
||||
@@ -41,8 +38,6 @@ import { useModelDomain } from '~/common/stores/llms/hooks/useModelDomain';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
import { useRouterQuery } from '~/common/app.routes';
|
||||
import { useUIComplexityIsMinimal } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { ChatPane } from './components/layout-pane/ChatPane';
|
||||
import { ChatBarBeam } from './components/layout-bar/ChatBarBeam';
|
||||
import { ChatBarAltTitle } from './components/layout-bar/ChatBarAltTitle';
|
||||
@@ -152,8 +147,6 @@ export function AppChat() {
|
||||
|
||||
const intent = useRouterQuery<Partial<AppChatIntent>>();
|
||||
|
||||
const showAltTitleBar = useUXLabsStore(state => DEV_MODE_SETTINGS && state.labsChatBarAlt === 'title');
|
||||
|
||||
const { domainModelId: chatLLMId } = useModelDomain('primaryChat');
|
||||
const chatLLM = useLLM(chatLLMId) ?? null;
|
||||
|
||||
@@ -216,7 +209,8 @@ export function AppChat() {
|
||||
});
|
||||
|
||||
// Composer Auto-hiding
|
||||
const forceComposerHide = !!beamOpenStoreInFocusedPane /* || !focusedPaneConversationId */; // auto-hide when no chat (the 'please select a conversation...' state) doesn't feel good
|
||||
const isChromeless = useOptimaChromeless() && isMobile; // auto-hide on Chromeless too
|
||||
const forceComposerHide = isChromeless || !!beamOpenStoreInFocusedPane /* || !focusedPaneConversationId */; // auto-hide when no chat (the 'please select a conversation...' state) doesn't feel good
|
||||
const composerAutoHide = useComposerAutoHide(forceComposerHide, composerHasContent);
|
||||
|
||||
// Window actions
|
||||
@@ -346,11 +340,6 @@ export function AppChat() {
|
||||
});
|
||||
}, [handleExecuteAndOutcome]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string): Promise<void> => {
|
||||
await elevenLabsSpeakText(text, undefined, true, true);
|
||||
}, []);
|
||||
|
||||
|
||||
// Chat actions
|
||||
|
||||
const handleConversationNewInFocusedPane = React.useCallback((forceNoRecycle: boolean, isIncognito: boolean) => {
|
||||
@@ -469,7 +458,7 @@ export function AppChat() {
|
||||
|
||||
// Pluggable Optima components
|
||||
|
||||
const barAltTitle = showAltTitleBar ? focusedChatTitle ?? 'No Chat' : null;
|
||||
const barAltTitle = null;
|
||||
|
||||
const focusedBarContent = React.useMemo(() => beamOpenStoreInFocusedPane
|
||||
? <ChatBarBeam conversationTitle={focusedChatTitle ?? 'No Chat'} beamStore={beamOpenStoreInFocusedPane} isMobile={isMobile} />
|
||||
@@ -504,6 +493,7 @@ export function AppChat() {
|
||||
|
||||
const focusedChatPanelContent = React.useMemo(() => !focusedPaneConversationId ? null :
|
||||
<ChatPane
|
||||
isMobile={isMobile}
|
||||
conversationId={focusedPaneConversationId}
|
||||
disableItems={!focusedPaneConversationId || isFocusedChatEmpty}
|
||||
hasConversations={hasConversations}
|
||||
@@ -593,9 +583,11 @@ export function AppChat() {
|
||||
}, []);
|
||||
|
||||
useGlobalShortcuts('AppChat', React.useMemo(() => [
|
||||
// focused conversation
|
||||
{ key: 'z', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageRegenerateLastInFocusedPane, description: 'Retry' },
|
||||
{ key: 'b', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageBeamLastInFocusedPane, description: 'Beam Edit' },
|
||||
// focused conversation (excluded when Beam is open so the keystroke passes through to the browser)
|
||||
...(beamOpenStoreInFocusedPane ? [] : [
|
||||
{ key: 'z', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageRegenerateLastInFocusedPane, description: 'Retry' },
|
||||
{ key: 'b', ctrl: true, shift: true, disabled: isFocusedChatEmpty, action: handleMessageBeamLastInFocusedPane, description: 'Beam Edit' },
|
||||
]),
|
||||
{ key: 'o', ctrl: true, action: handleConversationsImportFormFilePicker },
|
||||
{ key: 's', ctrl: true, action: () => handleFileSaveConversation(focusedPaneConversationId) },
|
||||
{ key: 'n', ctrl: true, shift: true, action: () => handleConversationNewInFocusedPane(false, false) },
|
||||
@@ -613,7 +605,7 @@ export function AppChat() {
|
||||
{ key: 'p', ctrl: true, action: () => personaDropdownRef.current?.openListbox() /*, description: 'Open Persona Dropdown'*/ },
|
||||
// focused conversation llm
|
||||
{ key: 'o', ctrl: true, shift: true, action: handleOpenChatLlmOptions },
|
||||
], [focusedPaneConversationId, handleConversationNewInFocusedPane, handleConversationReset, handleConversationsImportFormFilePicker, handleDeleteConversations, handleFileSaveConversation, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleMoveFocus, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]));
|
||||
], [beamOpenStoreInFocusedPane, focusedPaneConversationId, handleConversationNewInFocusedPane, handleConversationReset, handleConversationsImportFormFilePicker, handleDeleteConversations, handleFileSaveConversation, handleMessageBeamLastInFocusedPane, handleMessageRegenerateLastInFocusedPane, handleMoveFocus, handleNavigateHistoryInFocusedPane, handleOpenChatLlmOptions, isFocusedChatEmpty]));
|
||||
|
||||
|
||||
return <>
|
||||
@@ -725,7 +717,6 @@ export function AppChat() {
|
||||
onConversationNew={handleConversationNewInFocusedPane}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={handleImagineFromText}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
sx={chatMessageListSx}
|
||||
/>
|
||||
)}
|
||||
@@ -781,7 +772,7 @@ export function AppChat() {
|
||||
</Box>
|
||||
|
||||
{/* Hover zone for auto-hide */}
|
||||
{!forceComposerHide && composerAutoHide.isHidden && <Box {...composerAutoHide.detectorProps} />}
|
||||
{!isChromeless && !forceComposerHide && composerAutoHide.isHidden && <Box {...composerAutoHide.detectorProps} />}
|
||||
|
||||
{/* Diagrams */}
|
||||
{!!diagramConfig && (
|
||||
|
||||
@@ -6,19 +6,20 @@ import { Box, List } from '@mui/joy';
|
||||
|
||||
import type { SystemPurposeExample } from '../../../data';
|
||||
|
||||
import type { AixReattachMode } from '~/modules/aix/client/aix.client';
|
||||
import type { DiagramConfig } from '~/modules/aifn/digrams/DiagramsModal';
|
||||
import { speakText } from '~/modules/speex/speex.client';
|
||||
|
||||
import type { ConversationHandler } from '~/common/chat-overlay/ConversationHandler';
|
||||
import type { DLLMContextTokens } from '~/common/stores/llms/llms.types';
|
||||
import { DConversationId, excludeSystemMessages } from '~/common/stores/chat/chat.conversation';
|
||||
import { ShortcutKey, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { clipboardInterceptCtrlCForCleanup } from '~/common/util/clipboardUtils';
|
||||
import { convertFilesToDAttachmentFragments } from '~/common/attachment-drafts/attachment.pipeline';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { createDMessageFromFragments, createDMessageTextContent, DMessage, DMessageGenerator, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { createTextContentFragment, DMessageFragment, DMessageFragmentId } from '~/common/stores/chat/chat.fragments';
|
||||
import { openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { useBrowserTranslationWarning } from '~/common/components/useIsBrowserTranslating';
|
||||
import { useCapabilityElevenLabs } from '~/common/components/useCapabilities';
|
||||
import { useChatOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { useScrollToBottom } from '~/common/scroll-to-bottom/useScrollToBottom';
|
||||
@@ -51,7 +52,6 @@ export function ChatMessageList(props: {
|
||||
onConversationNew: (forceNoRecycle: boolean, isIncognito: boolean) => void,
|
||||
onTextDiagram: (diagramConfig: DiagramConfig | null) => void,
|
||||
onTextImagine: (conversationId: DConversationId, selectedText: string) => Promise<void>,
|
||||
onTextSpeak: (selectedText: string) => Promise<void>,
|
||||
setIsMessageSelectionMode: (isMessageSelectionMode: boolean) => void,
|
||||
sx?: SxProps,
|
||||
}) {
|
||||
@@ -65,7 +65,6 @@ export function ChatMessageList(props: {
|
||||
const { notifyBooting } = useScrollToBottom();
|
||||
const danger_experimentalHtmlWebUi = useChatAutoSuggestHTMLUI();
|
||||
const [showSystemMessages] = useChatShowSystemMessages();
|
||||
const optionalTranslationWarning = useBrowserTranslationWarning();
|
||||
const { conversationMessages, historyTokenCount } = useChatStore(useShallow(({ conversations }) => {
|
||||
const conversation = conversations.find(conversation => conversation.id === props.conversationId);
|
||||
return {
|
||||
@@ -77,10 +76,9 @@ export function ChatMessageList(props: {
|
||||
_composerInReferenceToCount: state.inReferenceTo?.length ?? 0,
|
||||
ephemerals: state.ephemerals?.length ? state.ephemerals : null,
|
||||
})));
|
||||
const { mayWork: isSpeakable } = useCapabilityElevenLabs();
|
||||
|
||||
// derived state
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine, onTextSpeak } = props;
|
||||
const { conversationHandler, conversationId, capabilityHasT2I, onConversationBranch, onConversationExecuteHistory, onTextDiagram, onTextImagine } = props;
|
||||
const composerCanAddInReferenceTo = _composerInReferenceToCount < 5;
|
||||
const composerHasInReferenceto = _composerInReferenceToCount > 0;
|
||||
|
||||
@@ -127,6 +125,91 @@ export function ChatMessageList(props: {
|
||||
}, [conversationHandler, conversationId, onConversationExecuteHistory]);
|
||||
|
||||
|
||||
// Resume in-flight tracking - lives at this level (NOT inside BlockOpUpstreamResume) so it
|
||||
// survives any remount of the message bubble during a long-running stream (e.g. Deep Research).
|
||||
// - `resumeInFlight` (state) drives the loading/Detach UI on BlockOpUpstreamResume via props.
|
||||
// - `resumeAbortersRef` (ref) holds the AbortController so Detach can abort even after a remount.
|
||||
// Map keyed by messageId so multiple messages could in principle resume concurrently.
|
||||
const [resumeInFlight, setResumeInFlight] = React.useState<Record<DMessageId, AixReattachMode>>({});
|
||||
const resumeAbortersRef = React.useRef<Map<DMessageId, AbortController>>(new Map());
|
||||
|
||||
const handleMessageUpstreamResume = React.useCallback(async (generator: DMessageGenerator, messageId: DMessageId, mode: AixReattachMode) => {
|
||||
if (!conversationId || !conversationHandler) return;
|
||||
if (!generator.upstreamHandle) throw new Error('No upstream handle on generator');
|
||||
|
||||
// For AIX generators the DLLMId is at .aix.mId
|
||||
const llmId = generator.mgt === 'aix' ? generator.aix.mId : undefined;
|
||||
if (!llmId) throw new Error('No model id on generator');
|
||||
|
||||
const controller = new AbortController();
|
||||
resumeAbortersRef.current.set(messageId, controller);
|
||||
setResumeInFlight(prev => ({ ...prev, [messageId]: mode }));
|
||||
|
||||
const { aixCreateChatGenerateContext, aixReattachContent_DMessage_orThrow } = await import('~/modules/aix/client/aix.client');
|
||||
try {
|
||||
await aixReattachContent_DMessage_orThrow(
|
||||
llmId,
|
||||
generator,
|
||||
aixCreateChatGenerateContext('conversation', conversationId),
|
||||
mode,
|
||||
{ abortSignal: controller.signal, throttleParallelThreads: 0 }, // Detach: aborting kills the local fetch; upstream run keeps going.
|
||||
async (update, isDone) => {
|
||||
conversationHandler.messageEdit(messageId, {
|
||||
fragments: update.fragments,
|
||||
generator: update.generator,
|
||||
pendingIncomplete: update.pendingIncomplete,
|
||||
}, isDone, isDone); // remove the pending state and update only when done
|
||||
},
|
||||
);
|
||||
} finally {
|
||||
// Clear local tracking only if this attempt is still the current one (avoid races on rapid retry)
|
||||
if (resumeAbortersRef.current.get(messageId) === controller)
|
||||
resumeAbortersRef.current.delete(messageId);
|
||||
setResumeInFlight(prev => {
|
||||
if (prev[messageId] !== mode) return prev;
|
||||
const { [messageId]: _, ...rest } = prev;
|
||||
return rest;
|
||||
});
|
||||
}
|
||||
|
||||
// Manual reattach is one-shot: on failure (e.g. upstream 404 from expired or already-consumed handle),
|
||||
// drop the upstreamHandle so the Resume button doesn't keep luring the user into the same error.
|
||||
// On 'aborted' we keep it so the user can try again later; on 'completed' the reassembler already cleared it.
|
||||
// 2026-04-22: disabled; it was removing the connect button on a connection error (e.g. wifi drop)
|
||||
// if (result.outcome === 'failed' && result.generator?.upstreamHandle)
|
||||
// conversationHandler.messageEdit(messageId, {
|
||||
// generator: { ...result.generator, upstreamHandle: undefined },
|
||||
// }, false /* messageComplete */, true /* touch */);
|
||||
}, [conversationHandler, conversationId]);
|
||||
|
||||
const handleMessageUpstreamDetach = React.useCallback((messageId: DMessageId) => {
|
||||
resumeAbortersRef.current.get(messageId)?.abort();
|
||||
}, []);
|
||||
|
||||
|
||||
const handleMessageUpstreamDelete = React.useCallback(async (generator: DMessageGenerator, messageId: DMessageId) => {
|
||||
if (!conversationId || !conversationHandler) return;
|
||||
if (!generator.upstreamHandle) throw new Error('No upstream handle on generator');
|
||||
|
||||
// For AIX generators the DLLMId is at .aix.mId
|
||||
const llmId = generator.mgt === 'aix' ? generator.aix.mId : undefined;
|
||||
if (!llmId) throw new Error('No model id on generator');
|
||||
|
||||
const { aixDeleteUpstreamContent_orThrow } = await import('~/modules/aix/client/aix.client');
|
||||
const result = await aixDeleteUpstreamContent_orThrow(llmId, generator);
|
||||
|
||||
// On success (or 404 already-gone), clear the handle locally so the buttons disappear
|
||||
if (result.ok) {
|
||||
conversationHandler.messageEdit(messageId, {
|
||||
generator: { ...generator, upstreamHandle: undefined },
|
||||
}, false /* messageComplete */, true /* touch */);
|
||||
return;
|
||||
}
|
||||
// On failure: surface to the button's error UI
|
||||
throw new Error(result.message || `Delete failed${result.httpStatus ? ` (HTTP ${result.httpStatus})` : ''}`);
|
||||
}, [conversationHandler, conversationId]);
|
||||
|
||||
|
||||
// message menu methods proxy
|
||||
|
||||
const handleMessageAssistantFrom = React.useCallback(async (messageId: DMessageId, offset: number) => {
|
||||
@@ -214,12 +297,15 @@ export function ChatMessageList(props: {
|
||||
}, [capabilityHasT2I, conversationId, onTextImagine]);
|
||||
|
||||
const handleTextSpeak = React.useCallback(async (text: string) => {
|
||||
if (!isSpeakable)
|
||||
return optimaOpenPreferences('voice');
|
||||
// sandwich the speaking with the indicator
|
||||
setIsSpeaking(true);
|
||||
await onTextSpeak(text);
|
||||
const result = await speakText(text, undefined, { label: 'Chat speak' });
|
||||
setIsSpeaking(false);
|
||||
}, [isSpeakable, onTextSpeak]);
|
||||
|
||||
// open voice preferences
|
||||
if (!result.success && (result.errorType === 'tts-no-engine' || result.errorType === 'tts-unconfigured'))
|
||||
optimaOpenPreferences('voice');
|
||||
}, []);
|
||||
|
||||
|
||||
// operate on the local selection set
|
||||
@@ -324,9 +410,7 @@ export function ChatMessageList(props: {
|
||||
);
|
||||
|
||||
return (
|
||||
<List role='chat-messages-list' sx={listSx}>
|
||||
|
||||
{optionalTranslationWarning}
|
||||
<List role='chat-messages-list' sx={listSx} onCopy={clipboardInterceptCtrlCForCleanup}>
|
||||
|
||||
{props.isMessageSelectionMode && (
|
||||
<MessagesSelectionHeader
|
||||
@@ -342,7 +426,11 @@ export function ChatMessageList(props: {
|
||||
|
||||
{filteredMessages.map((message, idx) => {
|
||||
|
||||
// Optimization: only memo complete components, or we'd be memoizing garbage
|
||||
// Optimization: only memo complete components, or we'd be memoizing garbage (fragments
|
||||
// change every chunk during streaming, so the equality check would always fail).
|
||||
// CAVEAT: switching between memo and non-memo at the same position causes React to
|
||||
// remount the subtree (different component types). Any state that must survive that
|
||||
// boundary lives on this component (e.g. resumeInFlight, resumeAbortersRef).
|
||||
const ChatMessageMemoOrNot = !message.pendingIncomplete ? ChatMessageMemo : ChatMessage;
|
||||
|
||||
return props.isMessageSelectionMode ? (
|
||||
@@ -373,6 +461,10 @@ export function ChatMessageList(props: {
|
||||
onMessageBeam={handleMessageBeam}
|
||||
onMessageBranch={handleMessageBranch}
|
||||
onMessageContinue={handleMessageContinue}
|
||||
onMessageUpstreamResume={handleMessageUpstreamResume}
|
||||
onMessageUpstreamDetach={handleMessageUpstreamDetach}
|
||||
onMessageUpstreamDelete={handleMessageUpstreamDelete}
|
||||
upstreamResumeMode={resumeInFlight[message.id]}
|
||||
onMessageDelete={handleMessageDelete}
|
||||
onMessageFragmentAppend={handleMessageAppendFragment}
|
||||
onMessageFragmentDelete={handleMessageDeleteFragment}
|
||||
@@ -381,7 +473,7 @@ export function ChatMessageList(props: {
|
||||
onMessageTruncate={handleMessageTruncate}
|
||||
onTextDiagram={handleTextDiagram}
|
||||
onTextImagine={capabilityHasT2I ? handleTextImagine : undefined}
|
||||
onTextSpeak={isSpeakable ? handleTextSpeak : undefined}
|
||||
onTextSpeak={handleTextSpeak}
|
||||
/>
|
||||
|
||||
);
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import * as React from 'react';
|
||||
import { useShallow } from 'zustand/react/shallow';
|
||||
import type { FileWithHandle } from 'browser-fs-access';
|
||||
|
||||
import { Box, Button, ButtonGroup, Card, Dropdown, Grid, IconButton, Menu, MenuButton, MenuItem, Textarea, Typography } from '@mui/joy';
|
||||
import { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import AddCircleOutlineIcon from '@mui/icons-material/AddCircleOutline';
|
||||
import type { ColorPaletteProp, SxProps, VariantProp } from '@mui/joy/styles/types';
|
||||
import { Box, Button, ButtonGroup, Card, Grid, IconButton, Textarea, Typography } from '@mui/joy';
|
||||
import ExpandLessIcon from '@mui/icons-material/ExpandLess';
|
||||
import PsychologyIcon from '@mui/icons-material/Psychology';
|
||||
import SendIcon from '@mui/icons-material/Send';
|
||||
@@ -17,7 +15,8 @@ import { useChatAutoSuggestAttachmentPrompts, useChatMicTimeoutMsValue } from '.
|
||||
import { useAgiAttachmentPrompts } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
|
||||
import { useBrowseCapability } from '~/modules/browse/store-module-browsing';
|
||||
|
||||
import { DLLM, getLLMContextTokens, getLLMPricing, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { DLLM, getLLMContextTokens, LLM_IF_OAI_Vision } from '~/common/stores/llms/llms.types';
|
||||
import { llmChatPricing_adjusted } from '~/common/stores/llms/llms.pricing';
|
||||
import { AudioGenerator } from '~/common/util/audio/AudioGenerator';
|
||||
import { AudioPlayer } from '~/common/util/audio/AudioPlayer';
|
||||
import { ButtonAttachFilesMemo, openFileForAttaching } from '~/common/components/ButtonAttachFiles';
|
||||
@@ -25,6 +24,7 @@ import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { ConversationsManager } from '~/common/chat-overlay/ConversationsManager';
|
||||
import { DMessageId, DMessageMetadata, DMetaReferenceItem, messageFragmentsReduceText } from '~/common/stores/chat/chat.message';
|
||||
import { PhPaintBrush } from '~/common/components/icons/phosphor/PhPaintBrush';
|
||||
import { ShortcutKey, ShortcutObject, useGlobalShortcuts } from '~/common/components/shortcuts/useGlobalShortcuts';
|
||||
import { addSnackbar } from '~/common/components/snackbar/useSnackbarsStore';
|
||||
import { animationEnterBelow } from '~/common/util/animUtils';
|
||||
@@ -34,12 +34,13 @@ import { copyToClipboard, supportsClipboardRead } from '~/common/util/clipboardU
|
||||
import { createTextContentFragment, DMessageAttachmentFragment, DMessageContentFragment, duplicateDMessageFragments } from '~/common/stores/chat/chat.fragments';
|
||||
import { glueForMessageTokens, marshallWrapDocFragments } from '~/common/stores/chat/chat.tokens';
|
||||
import { isValidConversation, useChatStore } from '~/common/stores/chat/store-chats';
|
||||
import { getModelParameterValueOrThrow } from '~/common/stores/llms/llms.parameters';
|
||||
import { getModelParameterValueWithFallback } from '~/common/stores/llms/llms.parameters';
|
||||
import { launchAppCall, removeQueryParam, useRouterQuery } from '~/common/app.routes';
|
||||
import { lineHeightTextareaMd, themeBgAppChatComposer } from '~/common/app.theme';
|
||||
import { optimaOpenPreferences } from '~/common/layout/optima/useOptima';
|
||||
import { platformAwareKeystrokes } from '~/common/components/KeyStroke';
|
||||
import { supportsCameraCapture } from '~/common/components/camera/useCameraCapture';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
import { useAttachHandler_CameraOpen, useAttachHandler_Files, useAttachHandler_PasteIntercept, useAttachHandler_ScreenCapture, useAttachHandler_UrlWebLinks } from '~/common/attachment-drafts/attachment-sources/useAttachmentSourceHandlers';
|
||||
import { useChatComposerOverlayStore } from '~/common/chat-overlay/store-perchat_vanilla';
|
||||
import { useComposerStartupText, useLogicSherpaStore } from '~/common/logic/store-logic-sherpa';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
@@ -52,19 +53,15 @@ import { providerCommands } from './actile/providerCommands';
|
||||
import { providerStarredMessages, StarredMessageItem } from './actile/providerStarredMessage';
|
||||
import { useActileManager } from './actile/useActileManager';
|
||||
|
||||
import type { AttachmentDraftId } from '~/common/attachment-drafts/attachment.types';
|
||||
import { LLMAttachmentDraftsAction, LLMAttachmentsList } from './llmattachments/LLMAttachmentsList';
|
||||
import { PhPaintBrush } from '~/common/components/icons/phosphor/PhPaintBrush';
|
||||
import type { AttachmentDraftId, AttachmentDraftsAction } from '~/common/attachment-drafts/attachment.types';
|
||||
import { AttachmentSourcesMemo } from '~/common/attachment-drafts/attachment-sources/AttachmentSources';
|
||||
import { useAttachmentDrafts } from '~/common/attachment-drafts/useAttachmentDrafts';
|
||||
import { useLLMAttachmentDrafts } from './llmattachments/useLLMAttachmentDrafts';
|
||||
import { useAttachmentDraftsEnrichment } from '~/common/attachment-drafts/llm-enrichment/useAttachmentDraftsEnrichment';
|
||||
import { useGoogleDrivePicker } from '~/common/attachment-drafts/attachment-sources/useGoogleDrivePicker';
|
||||
|
||||
import type { ChatExecuteMode } from '../../execute-mode/execute-mode.types';
|
||||
import { chatExecuteModeCanAttach, useChatExecuteMode } from '../../execute-mode/useChatExecuteMode';
|
||||
|
||||
import { ButtonAttachCameraMemo, useCameraCaptureModalDialog } from './buttons/ButtonAttachCamera';
|
||||
import { ButtonAttachClipboardMemo } from './buttons/ButtonAttachClipboard';
|
||||
import { ButtonAttachScreenCaptureMemo } from './buttons/ButtonAttachScreenCapture';
|
||||
import { ButtonAttachWebMemo } from './buttons/ButtonAttachWeb';
|
||||
import { ButtonBeamMemo } from './buttons/ButtonBeam';
|
||||
import { ButtonCallMemo } from './buttons/ButtonCall';
|
||||
import { ButtonGroupDrawRepeat } from './buttons/ButtonGroupDrawRepeat';
|
||||
@@ -72,6 +69,7 @@ import { ButtonMicContinuationMemo } from './buttons/ButtonMicContinuation';
|
||||
import { ButtonMicMemo } from './buttons/ButtonMic';
|
||||
import { ButtonMultiChatMemo } from './buttons/ButtonMultiChat';
|
||||
import { ButtonOptionsDraw } from './buttons/ButtonOptionsDraw';
|
||||
import { ComposerAttachmentDraftsList } from './llmattachments/ComposerAttachmentDraftsList';
|
||||
import { ComposerTextAreaActions } from './textarea/ComposerTextAreaActions';
|
||||
import { ComposerTextAreaDrawActions } from './textarea/ComposerTextAreaDrawActions';
|
||||
import { StatusBarMemo } from '../StatusBar';
|
||||
@@ -79,7 +77,6 @@ import { TokenBadgeMemo } from './tokens/TokenBadge';
|
||||
import { TokenProgressbarMemo } from './tokens/TokenProgressbar';
|
||||
import { useComposerDragDrop } from './useComposerDragDrop';
|
||||
import { useTextTokenCount } from './tokens/useTextTokenCounter';
|
||||
import { useWebInputModal } from './WebInputModal';
|
||||
|
||||
|
||||
// configuration
|
||||
@@ -136,16 +133,13 @@ export function Composer(props: {
|
||||
// external state
|
||||
const { showPromisedOverlay } = useOverlayComponents();
|
||||
const { newChat: appChatNewChatIntent } = useRouterQuery<Partial<AppChatIntent>>();
|
||||
const { labsAttachScreenCapture, labsCameraDesktop, labsShowCost, labsShowShortcutBar } = useUXLabsStore(useShallow(state => ({
|
||||
labsAttachScreenCapture: state.labsAttachScreenCapture,
|
||||
labsCameraDesktop: state.labsCameraDesktop,
|
||||
labsShowCost: state.labsShowCost,
|
||||
const { labsComposerAttachmentsInline, labsShowShortcutBar } = useUXLabsStore(useShallow(state => ({
|
||||
labsComposerAttachmentsInline: state.labsComposerAttachmentsInline,
|
||||
labsShowShortcutBar: state.labsShowShortcutBar,
|
||||
})));
|
||||
const timeToShowTips = useLogicSherpaStore(state => state.usageCount >= SHOW_TIPS_AFTER_RELOADS);
|
||||
const { novel: explainShiftEnter, touch: touchShiftEnter } = useUICounter('composer-shift-enter');
|
||||
const { novel: explainAltEnter, touch: touchAltEnter } = useUICounter('composer-alt-enter');
|
||||
const { novel: explainCtrlEnter, touch: touchCtrlEnter } = useUICounter('composer-ctrl-enter');
|
||||
|
||||
const [startupText, setStartupText] = useComposerStartupText();
|
||||
const enterIsNewline = useUIPreferencesStore(state => state.enterIsNewline);
|
||||
const composerQuickButton = useUIPreferencesStore(state => state.composerQuickButton);
|
||||
@@ -174,8 +168,8 @@ export function Composer(props: {
|
||||
const chatLLMSupportsImages = !!props.chatLLM?.interfaces?.includes(LLM_IF_OAI_Vision);
|
||||
|
||||
// don't load URLs if the user is typing a command or there's no capability
|
||||
const hasComposerBrowseCapability = useBrowseCapability().inComposer;
|
||||
const enableLoadURLsInComposer = hasComposerBrowseCapability && !composeText.startsWith('/');
|
||||
const browseCapability = useBrowseCapability();
|
||||
const enableLoadURLsInComposer = browseCapability.inComposer && !composeText.startsWith('/');
|
||||
|
||||
// user message for attachments
|
||||
const { onConversationBeamEdit, onConversationsImportFromFiles } = props;
|
||||
@@ -197,12 +191,12 @@ export function Composer(props: {
|
||||
const showChatAttachments = chatExecuteModeCanAttach(chatExecuteMode, props.capabilityHasT2IEdit);
|
||||
const {
|
||||
/* items */ attachmentDrafts,
|
||||
/* append */ attachAppendClipboardItems, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* append */ attachAppendClipboardItems, attachAppendCloudFile, attachAppendDataTransfer, attachAppendEgoFragments, attachAppendFile, attachAppendUrl,
|
||||
/* take */ attachmentsRemoveAll, attachmentsTakeAllFragments, attachmentsTakeFragmentsByType,
|
||||
} = useAttachmentDrafts(conversationOverlayStore, enableLoadURLsInComposer, chatLLMSupportsImages, handleFilterAGIFile, showChatAttachments === 'only-images');
|
||||
|
||||
// attachments derived state
|
||||
const llmAttachmentDraftsCollection = useLLMAttachmentDrafts(attachmentDrafts, props.chatLLM, chatLLMSupportsImages);
|
||||
const { enrichment: attEnrichment, summary: attEnrichSummary } = useAttachmentDraftsEnrichment(attachmentDrafts, props.chatLLM, chatLLMSupportsImages);
|
||||
|
||||
// drag/drop
|
||||
const { dragContainerSx, dropComponent, handleContainerDragEnter, handleContainerDragStart } = useComposerDragDrop(!props.isMobile, attachAppendDataTransfer);
|
||||
@@ -227,13 +221,13 @@ export function Composer(props: {
|
||||
// tokens derived state
|
||||
|
||||
const tokensComposerTextDebounced = useTextTokenCount(composeText, props.chatLLM, 800, 1600);
|
||||
let tokensComposer = (tokensComposerTextDebounced ?? 0) + (llmAttachmentDraftsCollection.llmTokenCountApprox || 0);
|
||||
let tokensComposer = (tokensComposerTextDebounced ?? 0) + (attEnrichSummary.totalTokensApprox || 0);
|
||||
if (props.chatLLM && tokensComposer > 0)
|
||||
tokensComposer += glueForMessageTokens(props.chatLLM);
|
||||
const tokensHistory = _historyTokenCount;
|
||||
const tokensResponseMax = getModelParameterValueOrThrow('llmResponseTokens', props.chatLLM?.initialParameters, props.chatLLM?.userParameters, 0) ?? 0;
|
||||
const tokensResponseMax = getModelParameterValueWithFallback('llmResponseTokens', props.chatLLM?.initialParameters, props.chatLLM?.userParameters, 0) ?? 0 /* if null, assume 0*/;
|
||||
const tokenLimit = getLLMContextTokens(props.chatLLM) ?? 0;
|
||||
const tokenChatPricing = getLLMPricing(props.chatLLM)?.chat;
|
||||
const tokenChatPricing = React.useMemo(() => llmChatPricing_adjusted(props.chatLLM), [props.chatLLM]);
|
||||
|
||||
|
||||
// Effect: load initial text if queued up (e.g. by /link/share_targetF)
|
||||
@@ -271,7 +265,7 @@ export function Composer(props: {
|
||||
// Confirmation Modals
|
||||
|
||||
const confirmProceedIfAttachmentsNotSupported = React.useCallback(async (): Promise<boolean> => {
|
||||
if (llmAttachmentDraftsCollection.canAttachAllFragments) return true;
|
||||
if (attEnrichSummary.allCompatible) return true;
|
||||
return await showPromisedOverlay('composer-unsupported-attachments', { rejectWithValue: false }, ({ onResolve, onUserReject }) => (
|
||||
<ConfirmationModal
|
||||
open
|
||||
@@ -283,7 +277,7 @@ export function Composer(props: {
|
||||
title='Attachment Compatibility Notice'
|
||||
/>
|
||||
));
|
||||
}, [llmAttachmentDraftsCollection.canAttachAllFragments, showPromisedOverlay]);
|
||||
}, [attEnrichSummary.allCompatible, showPromisedOverlay]);
|
||||
|
||||
|
||||
// Primary button
|
||||
@@ -545,20 +539,21 @@ export function Composer(props: {
|
||||
|
||||
// Enter: primary action
|
||||
if (e.key === 'Enter') {
|
||||
// Skip if composing (e.g., CJK input methods) - issue #784
|
||||
if (e.nativeEvent.isComposing)
|
||||
return;
|
||||
|
||||
// Alt (Windows) or Option (Mac) + Enter: append the message instead of sending it
|
||||
if (e.altKey && !e.metaKey && !e.ctrlKey) {
|
||||
if (await handleSendAction('append-user', composeText)) // 'alt+enter' -> write
|
||||
touchAltEnter();
|
||||
e.stopPropagation();
|
||||
return e.preventDefault();
|
||||
}
|
||||
|
||||
// Ctrl (Windows) or Command (Mac) + Enter: send for beaming
|
||||
if (e.ctrlKey && !e.metaKey && !e.altKey) {
|
||||
if (await handleSendAction('beam-content', composeText)) { // 'ctrl+enter' -> beam
|
||||
touchCtrlEnter();
|
||||
if (await handleSendAction('beam-content', composeText)) // 'ctrl+enter' -> beam
|
||||
e.stopPropagation();
|
||||
}
|
||||
return e.preventDefault();
|
||||
}
|
||||
|
||||
@@ -572,7 +567,7 @@ export function Composer(props: {
|
||||
}
|
||||
}
|
||||
|
||||
}, [actileInterceptKeydown, assistantAbortible, chatExecuteMode, composeText, enterIsNewline, handleSendAction, touchAltEnter, touchCtrlEnter, touchShiftEnter]);
|
||||
}, [actileInterceptKeydown, assistantAbortible, chatExecuteMode, composeText, enterIsNewline, handleSendAction, touchShiftEnter]);
|
||||
|
||||
|
||||
// Focus mode
|
||||
@@ -589,41 +584,19 @@ export function Composer(props: {
|
||||
const handleToggleMinimized = React.useCallback(() => setIsMinimized(hide => !hide), []);
|
||||
|
||||
|
||||
// Attachment Up
|
||||
// Attachments Up
|
||||
|
||||
const handleAttachCtrlV = React.useCallback(async (event: React.ClipboardEvent) => {
|
||||
if (await attachAppendDataTransfer(event.clipboardData, 'paste', false) === 'as_files')
|
||||
event.preventDefault();
|
||||
}, [attachAppendDataTransfer]);
|
||||
|
||||
const handleAttachCameraImage = React.useCallback((file: FileWithHandle) => {
|
||||
void attachAppendFile('camera', file);
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const { openCamera, cameraCaptureComponent } = useCameraCaptureModalDialog(handleAttachCameraImage);
|
||||
|
||||
const handleAttachScreenCapture = React.useCallback((file: File) => {
|
||||
void attachAppendFile('screencapture', file);
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const handleAttachFiles = React.useCallback(async (files: FileWithHandle[], errorMessage: string | null) => {
|
||||
if (errorMessage)
|
||||
addSnackbar({ key: 'attach-files-open-fail', message: `Unable to open files: ${errorMessage}`, type: 'issue' });
|
||||
for (let file of files)
|
||||
await attachAppendFile('file-open', file)
|
||||
.catch((error: any) => addSnackbar({ key: 'attach-file-open-fail', message: `Unable to attach the file "${file.name}" (${error?.message || error?.toString() || 'unknown error'})`, type: 'issue' }));
|
||||
}, [attachAppendFile]);
|
||||
|
||||
const handleAttachWebLinks = React.useCallback(async (links: { url: string }[]) => {
|
||||
links.forEach(link => void attachAppendUrl('input-link', link.url));
|
||||
}, [attachAppendUrl]);
|
||||
|
||||
const { openWebInputDialog, webInputDialogComponent } = useWebInputModal(handleAttachWebLinks, composeText);
|
||||
const handleAttachCtrlV = useAttachHandler_PasteIntercept(attachAppendDataTransfer);
|
||||
const handleAttachFiles = useAttachHandler_Files(attachAppendFile);
|
||||
const handleOpenCamera = useAttachHandler_CameraOpen(attachAppendFile);
|
||||
const handleAttachScreenCapture = useAttachHandler_ScreenCapture(attachAppendFile);
|
||||
const { openWebInputDialog, webInputDialogComponent } = useAttachHandler_UrlWebLinks(attachAppendUrl, composeText);
|
||||
const { openGoogleDrivePicker, googleDrivePickerComponent } = useGoogleDrivePicker(attachAppendCloudFile, isMobile);
|
||||
|
||||
|
||||
// Attachments Down
|
||||
|
||||
const handleAttachmentDraftsAction = React.useCallback((attachmentDraftIdOrAll: AttachmentDraftId | null, action: LLMAttachmentDraftsAction) => {
|
||||
const handleAttachmentDraftsAction = React.useCallback((attachmentDraftIdOrAll: AttachmentDraftId | null, action: AttachmentDraftsAction) => {
|
||||
switch (action) {
|
||||
case 'copy-text':
|
||||
const copyFragments = attachmentsTakeFragmentsByType('doc', attachmentDraftIdOrAll, false);
|
||||
@@ -652,7 +625,7 @@ export function Composer(props: {
|
||||
if (supportsClipboardRead())
|
||||
composerShortcuts.push({ key: 'v', ctrl: true, shift: true, action: attachAppendClipboardItems, description: 'Attach Clipboard' });
|
||||
// Future: keep reactive state here to support Live Screen Capture and more
|
||||
// if (labsAttachScreenCapture && supportsScreenCapture)
|
||||
// if (supportsScreenCapture)
|
||||
// composerShortcuts.push({ key: 's', ctrl: true, shift: true, action: openScreenCaptureDialog, description: 'Attach Screen Capture' });
|
||||
}
|
||||
if (recognitionState.isActive) {
|
||||
@@ -685,12 +658,13 @@ export function Composer(props: {
|
||||
|
||||
const showChatInReferenceTo = !!inReferenceTo?.length;
|
||||
const showChatExtras = isText && !showChatInReferenceTo && !assistantAbortible && composerQuickButton !== 'off';
|
||||
const speechMayWork = browserSpeechRecognitionCapability().mayWork;
|
||||
|
||||
const sendButtonVariant: VariantProp = (isAppend || (isMobile && isTextBeam)) ? 'outlined' : 'solid';
|
||||
|
||||
const sendButtonColor: ColorPaletteProp =
|
||||
assistantAbortible ? 'warning'
|
||||
: !llmAttachmentDraftsCollection.canAttachAllFragments ? 'warning'
|
||||
: !attEnrichSummary.allCompatible ? 'warning'
|
||||
: chatExecuteModeSendColor;
|
||||
|
||||
const sendButtonLabel = chatExecuteModeSendLabel;
|
||||
@@ -704,7 +678,7 @@ export function Composer(props: {
|
||||
: <TelegramIcon />;
|
||||
|
||||
const beamButtonColor: ColorPaletteProp | undefined =
|
||||
!llmAttachmentDraftsCollection.canAttachAllFragments ? 'warning'
|
||||
!attEnrichSummary.allCompatible ? 'warning'
|
||||
: undefined;
|
||||
|
||||
const showTint: ColorPaletteProp | undefined = isDraw ? 'warning' : isReAct ? 'success' : undefined;
|
||||
@@ -731,10 +705,6 @@ export function Composer(props: {
|
||||
if (isDesktop && timeToShowTips && !isDraw) {
|
||||
if (explainShiftEnter)
|
||||
textPlaceholder += !enterIsNewline ? '\n\n⏎ Shift + Enter to add a new line' : '\n\n➤ Shift + Enter to send';
|
||||
// else if (explainAltEnter)
|
||||
// textPlaceholder += platformAwareKeystrokes('\n\n⭳ Tip: Alt + Enter to just append the message');
|
||||
else if (explainCtrlEnter)
|
||||
textPlaceholder += platformAwareKeystrokes('\n\n⫷ Tip: Ctrl + Enter to beam');
|
||||
}
|
||||
|
||||
const stableGridSx: SxProps = React.useMemo(() => ({
|
||||
@@ -775,37 +745,24 @@ export function Composer(props: {
|
||||
{/* [mobile] Mic button */}
|
||||
{recognitionState.isAvailable && <ButtonMicMemo variant={micVariant} color={micColor === 'danger' ? 'danger' : showTint || micColor} errorMessage={recognitionState.errorMessage} onClick={handleToggleMic} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{showChatAttachments && <ButtonAttachCameraMemo color={showTint} isMobile onOpenCamera={openCamera} />}
|
||||
|
||||
{/* [mobile] Attach file button (in draw with image mode) */}
|
||||
{showChatAttachments === 'only-images' && <ButtonAttachFilesMemo color={showTint} isMobile onAttachFiles={handleAttachFiles} fullWidth multiple />}
|
||||
{showChatAttachments === 'only-images' && <ButtonAttachFilesMemo color={showTint} isMobile onAttachFiles={handleAttachFiles} multiple />}
|
||||
|
||||
{/* [mobile] [+] button */}
|
||||
{/* [mobile] [+] attachment sources menu */}
|
||||
{showChatAttachments === true && (
|
||||
<Dropdown>
|
||||
<MenuButton slots={{ root: IconButton }}>
|
||||
<AddCircleOutlineIcon />
|
||||
</MenuButton>
|
||||
<Menu>
|
||||
|
||||
{/* Responsive Open Files button */}
|
||||
<MenuItem>
|
||||
<ButtonAttachFilesMemo onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
</MenuItem>
|
||||
|
||||
{/* Responsive Web button */}
|
||||
<MenuItem>
|
||||
<ButtonAttachWebMemo disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />
|
||||
</MenuItem>
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && <MenuItem>
|
||||
<ButtonAttachClipboardMemo onAttachClipboard={attachAppendClipboardItems} />
|
||||
</MenuItem>}
|
||||
|
||||
</Menu>
|
||||
</Dropdown>
|
||||
<AttachmentSourcesMemo
|
||||
mode='menu-compact'
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
onlyImages={false /* because if yes, we only show the attach files above */}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [Mobile] MultiChat button */}
|
||||
@@ -816,28 +773,27 @@ export function Composer(props: {
|
||||
|
||||
{/* [Desktop, Col1] Insert Multi-modal content buttons */}
|
||||
{isDesktop && showChatAttachments && (
|
||||
<Box sx={{ flexGrow: 0, display: 'grid', gap: (labsAttachScreenCapture && labsCameraDesktop) ? 0.5 : 1, alignSelf: 'flex-start' }}>
|
||||
<Box sx={{ flexGrow: 0, display: 'grid', gap: 0.5, alignSelf: 'flex-start' }}>
|
||||
|
||||
{/*<FormHelperText sx={{ mx: 'auto' }}>*/}
|
||||
{/* Attach*/}
|
||||
{/*</FormHelperText>*/}
|
||||
{/* [desktop] Attachment Sources: dropdown menu or inline buttons */}
|
||||
<AttachmentSourcesMemo
|
||||
mode={!labsComposerAttachmentsInline ? 'menu-rich' : 'inline-buttons'}
|
||||
color={!labsComposerAttachmentsInline ? (showTint || 'neutral') : showTint}
|
||||
richButtonStandOut={!isText && !isAppend}
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
onlyImages={showChatAttachments === 'only-images'}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
|
||||
{/* Responsive Open Files button */}
|
||||
<ButtonAttachFilesMemo color={showTint} onAttachFiles={handleAttachFiles} fullWidth multiple />
|
||||
|
||||
{/* Responsive Web button */}
|
||||
{showChatAttachments !== 'only-images' && <ButtonAttachWebMemo color={showTint} disabled={!hasComposerBrowseCapability} onOpenWebInput={openWebInputDialog} />}
|
||||
|
||||
{/* Responsive Paste button */}
|
||||
{supportsClipboardRead() && showChatAttachments !== 'only-images' && <ButtonAttachClipboardMemo color={showTint} onAttachClipboard={attachAppendClipboardItems} />}
|
||||
|
||||
{/* Responsive Screen Capture button */}
|
||||
{labsAttachScreenCapture && supportsScreenCapture && <ButtonAttachScreenCaptureMemo color={showTint} onAttachScreenCapture={handleAttachScreenCapture} />}
|
||||
|
||||
{/* Responsive Camera OCR button */}
|
||||
{labsCameraDesktop && <ButtonAttachCameraMemo color={showTint} onOpenCamera={openCamera} />}
|
||||
|
||||
</Box>)}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
|
||||
{/* Top: Textarea & Mic & Overlays, Bottom, Attachment Drafts */}
|
||||
@@ -905,7 +861,7 @@ export function Composer(props: {
|
||||
)}
|
||||
|
||||
{!showChatInReferenceTo && !isDraw && tokenLimit > 0 && (
|
||||
<TokenBadgeMemo hideBelowDollars={0.005} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} showCost={labsShowCost} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
<TokenBadgeMemo showCost hideBelowDollars={0.01} chatPricing={tokenChatPricing} direct={tokensComposer} history={tokensHistory} responseMax={tokensResponseMax} limit={tokenLimit} enableHover={!isMobile} showExcess absoluteBottomRight />
|
||||
)}
|
||||
|
||||
</Box>
|
||||
@@ -984,11 +940,12 @@ export function Composer(props: {
|
||||
|
||||
{/* Render any Attachments & menu items */}
|
||||
{!!conversationOverlayStore && showChatAttachments && (
|
||||
<LLMAttachmentsList
|
||||
agiAttachmentPrompts={agiAttachmentPrompts}
|
||||
<ComposerAttachmentDraftsList
|
||||
attachmentDraftsStoreApi={conversationOverlayStore}
|
||||
canInlineSomeFragments={llmAttachmentDraftsCollection.canInlineSomeFragments}
|
||||
llmAttachmentDrafts={llmAttachmentDraftsCollection.llmAttachmentDrafts}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
enrichment={attEnrichment}
|
||||
enrichmentSummary={attEnrichSummary}
|
||||
agiAttachmentPrompts={agiAttachmentPrompts}
|
||||
onAttachmentDraftsAction={handleAttachmentDraftsAction}
|
||||
/>
|
||||
)}
|
||||
@@ -1008,7 +965,7 @@ export function Composer(props: {
|
||||
|
||||
{/* [mobile] bottom-corner secondary button */}
|
||||
{isMobile && (showChatExtras
|
||||
? (composerQuickButton === 'call'
|
||||
? (composerQuickButton === 'call' && speechMayWork
|
||||
? <ButtonCallMemo isMobile disabled={noConversation || noLLM} onClick={handleCallClicked} />
|
||||
: <ButtonBeamMemo isMobile disabled={noConversation /*|| noLLM*/} color={beamButtonColor} hasContent={!!composeText} onClick={handleSendTextBeamClicked} />)
|
||||
: isDraw
|
||||
@@ -1099,8 +1056,8 @@ export function Composer(props: {
|
||||
{/* [desktop] secondary bottom-buttons (aligned to bottom for now, and mutually exclusive) */}
|
||||
{isDesktop && <Box sx={{ mt: 'auto', display: 'grid', gap: 1 }}>
|
||||
|
||||
{/* [desktop] Call secondary button */}
|
||||
{showChatExtras && <ButtonCallMemo disabled={noConversation || noLLM || assistantAbortible} onClick={handleCallClicked} />}
|
||||
{/* [desktop] Call secondary button - hidden when speech recognition is not available */}
|
||||
{showChatExtras && speechMayWork && <ButtonCallMemo disabled={noConversation || noLLM || assistantAbortible} onClick={handleCallClicked} />}
|
||||
|
||||
{/* [desktop] Draw Options secondary button */}
|
||||
{isDraw && <ButtonOptionsDraw onClick={handleDrawOptionsClicked} />}
|
||||
@@ -1120,8 +1077,8 @@ export function Composer(props: {
|
||||
{/* Execution Mode Menu */}
|
||||
{chatExecuteMenuComponent}
|
||||
|
||||
{/* Camera (when open) */}
|
||||
{cameraCaptureComponent}
|
||||
{/* Google Drive Picker (when open) */}
|
||||
{googleDrivePickerComponent}
|
||||
|
||||
{/* Web Input Dialog (when open) */}
|
||||
{webInputDialogComponent}
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { CircularProgress, ListDivider, ListItemDecorator, MenuItem } from '@mui/joy';
|
||||
import AutoFixHighIcon from '@mui/icons-material/AutoFixHigh';
|
||||
|
||||
import type { AgiAttachmentPromptsData } from '~/modules/aifn/agiattachmentprompts/useAgiAttachmentPrompts';
|
||||
|
||||
import type { AttachmentDraft, AttachmentDraftId, AttachmentDraftsAction } from '~/common/attachment-drafts/attachment.types';
|
||||
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
|
||||
import type { AttachmentEnrichmentSummary, IAttachmentEnrichment } from '~/common/attachment-drafts/llm-enrichment/attachment.enrichment';
|
||||
import { AttachmentDraftsList } from '~/common/attachment-drafts/attachment-drafts-ui/AttachmentDraftsList';
|
||||
|
||||
import { LLMAttachmentsPromptsButtonMemo } from './LLMAttachmentsPromptsButton';
|
||||
import { ViewDocPartModal } from '../../message/fragments-content/ViewDocPartModal';
|
||||
import { ViewImageRefPartModal } from '../../message/fragments-content/ViewImageRefPartModal';
|
||||
|
||||
|
||||
/**
|
||||
* Composer-specific wrapper around the generic AttachmentDraftsList.
|
||||
* Provides: viewer modals, AI prompts button, "What can I do?" menu item.
|
||||
*/
|
||||
export function ComposerAttachmentDraftsList(props: {
|
||||
attachmentDrafts: AttachmentDraft[],
|
||||
attachmentDraftsStoreApi: AttachmentDraftsStoreApi,
|
||||
enrichment: IAttachmentEnrichment,
|
||||
enrichmentSummary: AttachmentEnrichmentSummary,
|
||||
agiAttachmentPrompts: AgiAttachmentPromptsData,
|
||||
onAttachmentDraftsAction: (attachmentDraftId: AttachmentDraftId | null, actionId: AttachmentDraftsAction) => void,
|
||||
}) {
|
||||
|
||||
const { agiAttachmentPrompts, attachmentDrafts } = props;
|
||||
|
||||
|
||||
// memo components
|
||||
|
||||
const startDecorator = React.useMemo(() =>
|
||||
!agiAttachmentPrompts.isVisible && !agiAttachmentPrompts.hasData ? undefined
|
||||
: <LLMAttachmentsPromptsButtonMemo data={agiAttachmentPrompts} />
|
||||
, [agiAttachmentPrompts]);
|
||||
|
||||
|
||||
// memo rendering functions
|
||||
|
||||
const renderDocViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewDocPartModal>['docPart'], onClose: () => void) =>
|
||||
<ViewDocPartModal docPart={part} onClose={onClose} />
|
||||
, []);
|
||||
|
||||
const renderImageViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewImageRefPartModal>['imageRefPart'], onClose: () => void) =>
|
||||
<ViewImageRefPartModal imageRefPart={part} onClose={onClose} />
|
||||
, []);
|
||||
|
||||
const renderOverallMenuExtra = React.useCallback(() => <>
|
||||
<MenuItem color='primary' variant='soft' onClick={agiAttachmentPrompts.refetch} disabled={!attachmentDrafts.length || agiAttachmentPrompts.isFetching}>
|
||||
<ListItemDecorator>{agiAttachmentPrompts.isFetching ? <CircularProgress size='sm' /> : <AutoFixHighIcon />}</ListItemDecorator>
|
||||
What can I do?
|
||||
</MenuItem>
|
||||
<ListDivider />
|
||||
</>, [agiAttachmentPrompts.isFetching, agiAttachmentPrompts.refetch, attachmentDrafts.length]);
|
||||
|
||||
|
||||
return (
|
||||
<AttachmentDraftsList
|
||||
attachmentDraftsStoreApi={props.attachmentDraftsStoreApi}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
enrichment={props.enrichment}
|
||||
enrichmentSummary={props.enrichmentSummary}
|
||||
onAttachmentDraftsAction={props.onAttachmentDraftsAction}
|
||||
startDecorator={startDecorator}
|
||||
renderDocViewer={renderDocViewer}
|
||||
renderImageViewer={renderImageViewer}
|
||||
renderOverallMenuExtra={renderOverallMenuExtra}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { AttachmentDraft } from '~/common/attachment-drafts/attachment.types';
|
||||
import type { DLLM } from '~/common/stores/llms/llms.types';
|
||||
import type { DMessageAttachmentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { estimateTokensForFragments } from '~/common/stores/chat/chat.tokens';
|
||||
|
||||
|
||||
export interface LLMAttachmentDraftsCollection {
|
||||
llmAttachmentDrafts: LLMAttachmentDraft[];
|
||||
canAttachAllFragments: boolean;
|
||||
canInlineSomeFragments: boolean;
|
||||
llmTokenCountApprox: number | null;
|
||||
hasImageFragments: boolean;
|
||||
}
|
||||
|
||||
|
||||
export interface LLMAttachmentDraft {
|
||||
attachmentDraft: AttachmentDraft;
|
||||
llmSupportsAllFragments: boolean;
|
||||
llmSupportsTextFragments: boolean;
|
||||
llmTokenCountApprox: number | null;
|
||||
hasImageFragments: boolean;
|
||||
}
|
||||
|
||||
|
||||
export function useLLMAttachmentDrafts(attachmentDrafts: AttachmentDraft[], chatLLM: DLLM | null, chatLLMSupportsImages: boolean): LLMAttachmentDraftsCollection {
|
||||
|
||||
/* [Optimization] Use a Ref to store the previous state of llmAttachmentDrafts and chatLLM
|
||||
*
|
||||
* Note that this works on 2 levels:
|
||||
* - 1. avoids recomputation, but more importantly,
|
||||
* - 2. avoids re-rendering by keeping those llmAttachmentDrafts objects stable.
|
||||
*
|
||||
* Important to notice that the attachmentDraft objects[] are stable to start with, so we can
|
||||
* safely use reference equality to check if internal properties (or order) have changed.
|
||||
*/
|
||||
const prevStateRef = React.useRef<{
|
||||
chatLLM: DLLM | null;
|
||||
llmAttachmentDrafts: LLMAttachmentDraft[];
|
||||
}>({ llmAttachmentDrafts: [], chatLLM: null });
|
||||
|
||||
return React.useMemo(() => {
|
||||
|
||||
// [Optimization]
|
||||
const equalChatLLM = chatLLM === prevStateRef.current.chatLLM;
|
||||
|
||||
// LLM-dependent multi-modal enablement
|
||||
// TODO: consider also Audio inputs, maybe PDF binary inputs
|
||||
// FIXME: reference fragments could refer to non-image as well
|
||||
const imageTypes: DMessageAttachmentFragment['part']['pt'][] = ['reference', 'image_ref'];
|
||||
const supportedTypes: DMessageAttachmentFragment['part']['pt'][] = chatLLMSupportsImages ? [...imageTypes, 'doc'] : ['doc'];
|
||||
const supportedTextTypes: DMessageAttachmentFragment['part']['pt'][] = supportedTypes.filter(pt => pt === 'doc');
|
||||
|
||||
// Add LLM-specific properties to each attachment draft
|
||||
const llmAttachmentDrafts = attachmentDrafts.map((a, index) => {
|
||||
|
||||
// [Optimization] If not change in LLM and the attachmentDraft is the same object reference, reuse the previous LLMAttachmentDraft
|
||||
let prevDraft: LLMAttachmentDraft | undefined = prevStateRef.current.llmAttachmentDrafts[index];
|
||||
// if not found, search by id
|
||||
if (!prevDraft)
|
||||
prevDraft = prevStateRef.current.llmAttachmentDrafts.find(_pd => _pd.attachmentDraft.id === a.id);
|
||||
if (equalChatLLM && prevDraft && prevDraft.attachmentDraft === a)
|
||||
return prevDraft;
|
||||
|
||||
// Otherwise, create a new LLMAttachmentDraft
|
||||
return {
|
||||
attachmentDraft: a,
|
||||
llmSupportsAllFragments: !a.outputFragments ? false : a.outputFragments.every(op => supportedTypes.includes(op.part.pt)),
|
||||
llmSupportsTextFragments: !a.outputFragments ? false : a.outputFragments.some(op => supportedTextTypes.includes(op.part.pt)),
|
||||
llmTokenCountApprox: chatLLM
|
||||
? estimateTokensForFragments(chatLLM, 'user', a.outputFragments, true, 'useLLMAttachmentDrafts')
|
||||
: null,
|
||||
hasImageFragments: !a.outputFragments ? false : a.outputFragments.some(op => imageTypes.includes(op.part.pt)),
|
||||
};
|
||||
});
|
||||
|
||||
// Calculate the overall properties
|
||||
const canAttachAllFragments = llmAttachmentDrafts.every(a => a.llmSupportsAllFragments);
|
||||
const canInlineSomeFragments = llmAttachmentDrafts.some(a => a.llmSupportsTextFragments);
|
||||
const llmTokenCountApprox = chatLLM
|
||||
? llmAttachmentDrafts.reduce((acc, a) => acc + (a.llmTokenCountApprox || 0), 0)
|
||||
: null;
|
||||
const hasImageFragments = llmAttachmentDrafts.some(a => a.hasImageFragments);
|
||||
|
||||
// [Optimization] Update the ref with the new state
|
||||
prevStateRef.current = { llmAttachmentDrafts, chatLLM };
|
||||
|
||||
return {
|
||||
llmAttachmentDrafts,
|
||||
canAttachAllFragments,
|
||||
canInlineSomeFragments,
|
||||
llmTokenCountApprox,
|
||||
hasImageFragments,
|
||||
};
|
||||
|
||||
}, [attachmentDrafts, chatLLM, chatLLMSupportsImages]); // Dependencies for the outer useMemo
|
||||
}
|
||||
@@ -47,9 +47,9 @@ function TokenBadge(props: {
|
||||
const showAltCosts = !!props.showCost && !!costMax && costMin !== undefined;
|
||||
if (showAltCosts) {
|
||||
// Note: switched to 'min cost (>= ...)' on mobile as well, to restore the former behavior, just uncomment the !props.enableHover (a proxy for isMobile)
|
||||
badgeValue = (/*!props.enableHover ||*/ isHovering)
|
||||
? '< ' + formatModelsCost(costMax)
|
||||
: '> ' + formatModelsCost(costMin);
|
||||
badgeValue =
|
||||
// (/*!props.enableHover ||*/ isHovering) ? '< ' + formatModelsCost(costMax) :
|
||||
'> ' + formatModelsCost(costMin);
|
||||
} else {
|
||||
|
||||
// show the direct tokens, unless we exceed the limit and 'showExcess' is enabled
|
||||
@@ -77,7 +77,7 @@ function TokenBadge(props: {
|
||||
slotProps={{
|
||||
root: {
|
||||
sx: {
|
||||
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: 8 }),
|
||||
...((props.absoluteBottomRight) && { position: 'absolute', bottom: 8, right: '1rem' }),
|
||||
cursor: 'help',
|
||||
...(shallInvisible && {
|
||||
opacity: 0,
|
||||
@@ -92,6 +92,13 @@ function TokenBadge(props: {
|
||||
fontFamily: 'code',
|
||||
fontSize: 'xs',
|
||||
...((props.absoluteBottomRight || props.inline) && { position: 'static', transform: 'none' }),
|
||||
// make it transparent over text
|
||||
// backgroundColor: `rgb(var(--joy-palette-${color}-lightChannel) / 15%)`, // similar to success.50
|
||||
background: 'transparent',
|
||||
boxShadow: 'none', // outline
|
||||
'&:hover': {
|
||||
backgroundColor: `${color}.softHoverBg`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
@@ -33,7 +33,10 @@ const _styles = {
|
||||
} as const,
|
||||
'& nav > ol > li:first-of-type': {
|
||||
overflow: 'hidden',
|
||||
maxWidth: { xs: '110px', md: '140px' },
|
||||
// allow the chat title to use available space, shrinking gracefully when the bar is narrow
|
||||
// NOTE: already performed by virtue of the breadcrumb having agi-ellipsize on the crumbs
|
||||
// flexShrink: 1,
|
||||
// minWidth: '60px',
|
||||
} as const,
|
||||
|
||||
} as const,
|
||||
|
||||
@@ -8,13 +8,14 @@ import SettingsIcon from '@mui/icons-material/Settings';
|
||||
import { findModelVendor } from '~/modules/llms/vendors/vendors.registry';
|
||||
|
||||
import type { DModelsServiceId } from '~/common/stores/llms/llms.service.types';
|
||||
import { DLLM, DLLMId, isLLMVisible } from '~/common/stores/llms/llms.types';
|
||||
import { DLLM, DLLMId, getLLMLabel, isLLMVisible } from '~/common/stores/llms/llms.types';
|
||||
import { DebouncedInputMemo } from '~/common/components/DebouncedInput';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
import { OptimaBarControlMethods, OptimaBarDropdownMemo, OptimaDropdownItems } from '~/common/layout/optima/bar/OptimaBarDropdown';
|
||||
import { findModelsServiceOrNull } from '~/common/stores/llms/store-llms';
|
||||
import { isDeepEqual } from '~/common/util/hooks/useDeep';
|
||||
import { sortLLMsByServiceLabel } from '~/common/stores/llms/components/llms.dropdown.utils';
|
||||
import { optimaActions, optimaOpenModels } from '~/common/layout/optima/useOptima';
|
||||
import { useAllLLMs } from '~/common/stores/llms/hooks/useAllLLMs';
|
||||
import { useModelDomain } from '~/common/stores/llms/hooks/useModelDomain';
|
||||
@@ -65,14 +66,17 @@ function LLMDropdown(props: {
|
||||
return true;
|
||||
|
||||
// filter-out models that don't contain the search string
|
||||
if (lcFilterString && !llm.label.toLowerCase().includes(lcFilterString))
|
||||
if (lcFilterString && !getLLMLabel(llm).toLowerCase().includes(lcFilterString))
|
||||
return false;
|
||||
|
||||
// filter-out hidden models from the dropdown
|
||||
return lcFilterString ? true : isLLMVisible(llm);
|
||||
});
|
||||
|
||||
for (const llm of filteredLLMs) {
|
||||
// sort by service label so vendor groups appear alphabetically (groups remain contiguous because sort is stable on equal keys)
|
||||
const sortedLLMs = sortLLMsByServiceLabel(filteredLLMs);
|
||||
|
||||
for (const llm of sortedLLMs) {
|
||||
// add separators when changing services
|
||||
if (!prevServiceId || llm.sId !== prevServiceId) {
|
||||
const vendor = findModelVendor(llm.vId);
|
||||
@@ -89,7 +93,7 @@ function LLMDropdown(props: {
|
||||
|
||||
// add the model item
|
||||
llmItems[llm.id] = {
|
||||
title: llm.label,
|
||||
title: getLLMLabel(llm),
|
||||
...(llm.userStarred ? { symbol: '⭐' } : {}),
|
||||
// icon: llm.id.startsWith('some vendor') ? <VendorIcon /> : undefined,
|
||||
};
|
||||
|
||||
@@ -16,6 +16,7 @@ import MoreVertIcon from '@mui/icons-material/MoreVert';
|
||||
import StarOutlineRoundedIcon from '@mui/icons-material/StarOutlineRounded';
|
||||
|
||||
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { CloseablePopup } from '~/common/components/CloseablePopup';
|
||||
import { DFolder, useFolderStore } from '~/common/stores/folders/store-chat-folders';
|
||||
import { DebouncedInputMemo } from '~/common/components/DebouncedInput';
|
||||
@@ -89,6 +90,7 @@ function ChatDrawer(props: {
|
||||
// external state
|
||||
const {
|
||||
clearFilters,
|
||||
filterHasBeamOpen, toggleFilterHasBeamOpen,
|
||||
filterHasDocFragments, toggleFilterHasDocFragments,
|
||||
filterHasImageAssets, toggleFilterHasImageAssets,
|
||||
filterHasStars, toggleFilterHasStars,
|
||||
@@ -98,7 +100,7 @@ function ChatDrawer(props: {
|
||||
} = useChatDrawerFilters();
|
||||
const { activeFolder, allFolders, enableFolders, toggleEnableFolders } = useFolders(props.activeFolderId);
|
||||
const { filteredChatsCount, filteredChatIDs, filteredChatsAreEmpty, filteredChatsBarBasis, filteredChatsIncludeActive, renderNavItems } = useChatDrawerRenderItems(
|
||||
props.activeConversationId, props.chatPanesConversationIds, debouncedSearchQuery, activeFolder, allFolders, filterHasStars, filterHasImageAssets, filterHasDocFragments, filterIsArchived, navGrouping, searchSorting, showRelativeSize, searchDepth,
|
||||
props.activeConversationId, props.chatPanesConversationIds, debouncedSearchQuery, activeFolder, allFolders, filterHasBeamOpen, filterHasStars, filterHasImageAssets, filterHasDocFragments, filterIsArchived, navGrouping, searchSorting, showRelativeSize, searchDepth,
|
||||
);
|
||||
const [uiComplexityMode, contentScaling] = useUIPreferencesStore(useShallow((state) => [state.complexityMode, state.contentScaling]));
|
||||
const zenMode = uiComplexityMode === 'minimal';
|
||||
@@ -240,6 +242,10 @@ function ChatDrawer(props: {
|
||||
<ListItemDecorator>{filterHasDocFragments && <CheckRoundedIcon />}</ListItemDecorator>
|
||||
Has Attachments <AttachFileRoundedIcon />
|
||||
</MenuItem>
|
||||
<MenuItem onClick={toggleFilterHasBeamOpen}>
|
||||
<ListItemDecorator>{filterHasBeamOpen && <CheckRoundedIcon />}</ListItemDecorator>
|
||||
Beam Open <ChatBeamIcon />
|
||||
</MenuItem>
|
||||
|
||||
<ListDivider />
|
||||
<ListItem>
|
||||
@@ -288,10 +294,33 @@ function ChatDrawer(props: {
|
||||
)}
|
||||
</Dropdown>
|
||||
), [
|
||||
filterHasDocFragments, filterHasImageAssets, filterHasStars, isSearching, navGrouping, searchSorting, searchDepth, filterIsArchived, showPersonaIcons, showRelativeSize,
|
||||
toggleFilterHasDocFragments, toggleFilterHasImageAssets, toggleFilterHasStars, toggleFilterIsArchived, toggleShowPersonaIcons, toggleShowRelativeSize,
|
||||
filterHasBeamOpen, filterHasDocFragments, filterHasImageAssets, filterHasStars, isSearching, navGrouping, searchSorting, searchDepth, filterIsArchived, showPersonaIcons, showRelativeSize,
|
||||
toggleFilterHasBeamOpen, toggleFilterHasDocFragments, toggleFilterHasImageAssets, toggleFilterHasStars, toggleFilterIsArchived, toggleShowPersonaIcons, toggleShowRelativeSize,
|
||||
]);
|
||||
|
||||
const displayNavItems = React.useMemo(() => {
|
||||
if (renderLimit === Infinity || renderLimit >= renderNavItems.length) return renderNavItems;
|
||||
|
||||
// return sliced if it contains the active conversation
|
||||
const sliced = renderNavItems.slice(0, renderLimit);
|
||||
if (!props.activeConversationId || sliced.some(i => i.type === 'nav-item-chat-data' && i.conversationId === props.activeConversationId)) return sliced;
|
||||
|
||||
// include the active conversation if it's beyond the fold
|
||||
const activeItem = renderNavItems.find((i, idx) => idx >= renderLimit && i.type === 'nav-item-chat-data' && i.conversationId === props.activeConversationId);
|
||||
return activeItem ? [...sliced, activeItem] : sliced;
|
||||
}, [renderNavItems, renderLimit, props.activeConversationId]);
|
||||
|
||||
|
||||
// when filters/search transition from active to inactive, the active chat may end up
|
||||
// submerged below the fold of a much longer list - scroll it back into view
|
||||
const chatsListRef = React.useRef<HTMLDivElement>(null);
|
||||
const isFiltering = isSearching || filterHasBeamOpen || filterHasDocFragments || filterHasImageAssets || filterHasStars || filterIsArchived;
|
||||
React.useLayoutEffect(() => {
|
||||
if (isFiltering) return;
|
||||
const activeEl = chatsListRef.current?.querySelector('[aria-current="true"]') as HTMLElement | null;
|
||||
activeEl?.scrollIntoView({ block: 'nearest' });
|
||||
}, [isFiltering]);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
@@ -379,8 +408,8 @@ function ChatDrawer(props: {
|
||||
</Box>
|
||||
|
||||
{/* Chat Titles List (shrink as half the rate as the Folders List) */}
|
||||
<Box sx={{ flexGrow: 1, flexShrink: 1, flexBasis: '20rem', overflowY: 'auto', ...themeScalingMap[contentScaling].chatDrawerItemSx }}>
|
||||
{renderNavItems.slice(0, renderLimit).map((item, idx) => item.type === 'nav-item-chat-data' ? (
|
||||
<Box key='chatlist' ref={chatsListRef} sx={{ flexGrow: 1, flexShrink: 1, flexBasis: '20rem', overflowY: 'auto', ...themeScalingMap[contentScaling].chatDrawerItemSx }}>
|
||||
{displayNavItems.map((item, idx) => item.type === 'nav-item-chat-data' ? (
|
||||
<ChatDrawerItemMemo
|
||||
key={'nav-chat-' + item.conversationId}
|
||||
item={item}
|
||||
@@ -411,7 +440,7 @@ function ChatDrawer(props: {
|
||||
{filterHasStars && <StarOutlineRoundedIcon sx={{ color: 'primary.softColor', fontSize: 'xl', mb: -0.5, mr: 1 }} />}
|
||||
{item.message}
|
||||
</Typography>
|
||||
{(filterHasStars || filterHasImageAssets || filterHasDocFragments || filterIsArchived) && (
|
||||
{(filterHasBeamOpen || filterHasStars || filterHasImageAssets || filterHasDocFragments || filterIsArchived) && (
|
||||
<Tooltip title='Clear Filters'>
|
||||
<IconButton size='sm' color='primary' onClick={clearFilters}>
|
||||
<ClearIcon />
|
||||
|
||||
@@ -282,7 +282,7 @@ function ChatDrawerItem(props: {
|
||||
{searchFrequency > 0 ? (
|
||||
// Display search frequency if it exists and is greater than 0
|
||||
<Typography level='body-sm'>
|
||||
{searchFrequency}
|
||||
{Math.round(searchFrequency * 10) / 10}
|
||||
</Typography>
|
||||
) : (props.showSymbols && (userFlagsSummary || containsDocAttachments || containsImageAssets)) ? (
|
||||
<Box sx={{
|
||||
@@ -308,6 +308,7 @@ function ChatDrawerItem(props: {
|
||||
|
||||
// Active or Also Open
|
||||
<Sheet
|
||||
aria-current={isActive ? 'true' : undefined}
|
||||
variant={isActive ? 'solid' : 'outlined'}
|
||||
invertedColors={isActive}
|
||||
onClick={!isActive ? handleConversationActivate : undefined}
|
||||
|
||||
@@ -5,7 +5,7 @@ import { useModuleBeamStore } from '~/modules/beam/store-module-beam';
|
||||
import type { DFolder } from '~/common/stores/folders/store-chat-folders';
|
||||
import { DMessage, DMessageUserFlag, MESSAGE_FLAG_STARRED, messageFragmentsReduceText, messageHasUserFlag, messageUserFlagToEmoji } from '~/common/stores/chat/chat.message';
|
||||
import { conversationTitle, DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { getLocalMidnightInUTCTimestamp, getTimeBucketEn } from '~/common/util/timeUtils';
|
||||
import { createTimeBucketClassifierEn } from '~/common/util/timeUtils';
|
||||
import { isAttachmentFragment, isContentOrAttachmentFragment, isDocPart, isImageRefPart, isZyncAssetImageReferencePart } from '~/common/stores/chat/chat.fragments';
|
||||
import { shallowEquals } from '~/common/util/hooks/useShallowObject';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats';
|
||||
@@ -86,6 +86,7 @@ export function useChatDrawerRenderItems(
|
||||
filterByQuery: string,
|
||||
activeFolder: DFolder | null,
|
||||
allFolders: DFolder[],
|
||||
filterHasBeamOpen: boolean,
|
||||
filterHasStars: boolean,
|
||||
filterHasImageAssets: boolean,
|
||||
filterHasDocFragments: boolean,
|
||||
@@ -146,7 +147,8 @@ export function useChatDrawerRenderItems(
|
||||
}
|
||||
|
||||
// filter for required attributes
|
||||
if ((filterHasStars && !hasStars) || (filterHasImageAssets && !hasImages) || (filterHasDocFragments && !hasDocs))
|
||||
const hasBeamOpen = openBeamConversationIds[_c.id];
|
||||
if ((filterHasBeamOpen && !hasBeamOpen) || (filterHasStars && !hasStars) || (filterHasImageAssets && !hasImages) || (filterHasDocFragments && !hasDocs))
|
||||
return null;
|
||||
|
||||
// rich properties
|
||||
@@ -186,7 +188,7 @@ export function useChatDrawerRenderItems(
|
||||
? allFolders.find(folder => folder.conversationIds.includes(_c.id)) ?? null
|
||||
: null,
|
||||
updatedAt: _c.updated || _c.created || 0,
|
||||
hasBeamOpen: !!openBeamConversationIds?.[_c.id],
|
||||
hasBeamOpen,
|
||||
messageCount,
|
||||
beingGenerated: !!_c._abortController, // FIXME: when the AbortController is moved at the message level, derive the state in the conv
|
||||
systemPurposeId: _c.systemPurposeId,
|
||||
@@ -235,14 +237,14 @@ export function useChatDrawerRenderItems(
|
||||
break;
|
||||
}
|
||||
|
||||
const midnightTime = getLocalMidnightInUTCTimestamp();
|
||||
const getTimeBucket = createTimeBucketClassifierEn();
|
||||
const grouped = chatNavItems.reduce((acc, item) => {
|
||||
|
||||
// derive the bucket name
|
||||
let bucket: string;
|
||||
switch (grouping) {
|
||||
case 'date':
|
||||
bucket = getTimeBucketEn(item.updatedAt || midnightTime, midnightTime);
|
||||
bucket = getTimeBucket(item.updatedAt || Date.now());
|
||||
break;
|
||||
case 'persona':
|
||||
bucket = item.systemPurposeId;
|
||||
@@ -287,19 +289,21 @@ export function useChatDrawerRenderItems(
|
||||
renderNavItems.push({
|
||||
type: 'nav-item-info-message',
|
||||
message: (filterHasStars && (filterHasImageAssets || filterHasDocFragments)) ? 'No results'
|
||||
: filterHasDocFragments ? 'No attachment results'
|
||||
: filterHasImageAssets ? 'No image results'
|
||||
: filterHasStars ? 'No starred results'
|
||||
: filterIsArchived ? 'No archived conversations'
|
||||
: isSearching ? 'Text not found'
|
||||
: 'No conversations in folder',
|
||||
: filterHasBeamOpen ? 'No beam conversations'
|
||||
: filterHasDocFragments ? 'No attachment results'
|
||||
: filterHasImageAssets ? 'No image results'
|
||||
: filterHasStars ? 'No starred results'
|
||||
: filterIsArchived ? 'No archived conversations'
|
||||
: isSearching ? 'Text not found'
|
||||
: 'No conversations in folder',
|
||||
});
|
||||
} else {
|
||||
// filtering reminder (will be rendered with a clear button too)
|
||||
if (filterHasStars || filterHasImageAssets || filterHasDocFragments || filterIsArchived) {
|
||||
if (filterHasBeamOpen || filterHasStars || filterHasImageAssets || filterHasDocFragments || filterIsArchived) {
|
||||
renderNavItems.unshift({
|
||||
type: 'nav-item-info-message',
|
||||
message: `${filterIsArchived ? 'Showing' : 'Filtering by'} ${[
|
||||
filterHasBeamOpen && 'beam',
|
||||
filterHasStars && 'stars',
|
||||
filterHasImageAssets && 'images',
|
||||
filterHasDocFragments && 'attachments',
|
||||
|
||||
@@ -6,7 +6,6 @@ import AddIcon from '@mui/icons-material/Add';
|
||||
import ArchiveOutlinedIcon from '@mui/icons-material/ArchiveOutlined';
|
||||
import CleaningServicesOutlinedIcon from '@mui/icons-material/CleaningServicesOutlined';
|
||||
import CompressIcon from '@mui/icons-material/Compress';
|
||||
import EngineeringIcon from '@mui/icons-material/Engineering';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
|
||||
import RestartAltIcon from '@mui/icons-material/RestartAlt';
|
||||
@@ -14,15 +13,14 @@ import SettingsSuggestOutlinedIcon from '@mui/icons-material/SettingsSuggestOutl
|
||||
import UnarchiveOutlinedIcon from '@mui/icons-material/UnarchiveOutlined';
|
||||
|
||||
import type { DConversationId } from '~/common/stores/chat/chat.conversation';
|
||||
import { ChromelessItemButton } from '~/common/layout/optima/ChromelessItemButton';
|
||||
import { CodiconSplitHorizontal } from '~/common/components/icons/CodiconSplitHorizontal';
|
||||
import { CodiconSplitHorizontalRemove } from '~/common/components/icons/CodiconSplitHorizontalRemove';
|
||||
import { CodiconSplitVertical } from '~/common/components/icons/CodiconSplitVertical';
|
||||
import { CodiconSplitVerticalRemove } from '~/common/components/icons/CodiconSplitVerticalRemove';
|
||||
import { FormLabelStart } from '~/common/components/forms/FormLabelStart';
|
||||
import { OptimaPanelGroupedList, OptimaPanelGroupGutter } from '~/common/layout/optima/panel/OptimaPanelGroupedList';
|
||||
import { optimaActions } from '~/common/layout/optima/useOptima';
|
||||
import { useChatStore } from '~/common/stores/chat/store-chats'; // may be replaced with a dedicated hook for the chat pane
|
||||
import { useLabsDevMode } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { useChatShowSystemMessages } from '../../store-app-chat';
|
||||
import { panesManagerActions, usePaneDuplicateOrClose } from '../panes/store-panes-manager';
|
||||
@@ -40,6 +38,7 @@ function VariformPaneFrame() {
|
||||
|
||||
|
||||
export function ChatPane(props: {
|
||||
isMobile: boolean,
|
||||
conversationId: DConversationId | null,
|
||||
disableItems: boolean,
|
||||
hasConversations: boolean,
|
||||
@@ -55,7 +54,6 @@ export function ChatPane(props: {
|
||||
// external state
|
||||
const { canAddPane, isMultiPane } = usePaneDuplicateOrClose();
|
||||
const [showSystemMessages, setShowSystemMessages] = useChatShowSystemMessages();
|
||||
const labsDevMode = useLabsDevMode();
|
||||
|
||||
const { isArchived, setArchived } = useChatStore(useShallow((state) => {
|
||||
const conversation = state.conversations.find(_c => _c.id === props.conversationId);
|
||||
@@ -147,6 +145,8 @@ export function ChatPane(props: {
|
||||
</ListItemButton>
|
||||
</ListItem>
|
||||
|
||||
{props.isMobile && <ChromelessItemButton />}
|
||||
|
||||
</OptimaPanelGroupedList>
|
||||
|
||||
{/* Chat Actions group */}
|
||||
@@ -213,15 +213,5 @@ export function ChatPane(props: {
|
||||
</ListItemButton>
|
||||
</OptimaPanelGroupedList>
|
||||
|
||||
{/* [DEV] Development */}
|
||||
{labsDevMode && (
|
||||
<OptimaPanelGroupedList title='[Developers]'>
|
||||
<MenuItem onClick={optimaActions().openAIXDebugger}>
|
||||
<ListItemDecorator><EngineeringIcon /></ListItemDecorator>
|
||||
AIX: Show Last Request...
|
||||
</MenuItem>
|
||||
</OptimaPanelGroupedList>
|
||||
)}
|
||||
|
||||
</>;
|
||||
}
|
||||
@@ -36,7 +36,7 @@ const optionGroupSx: SxProps = {
|
||||
flexDirection: 'column',
|
||||
alignItems: 'flex-start',
|
||||
gap: 0,
|
||||
};
|
||||
} as const;
|
||||
|
||||
const optionSx: SxProps = {
|
||||
// style
|
||||
@@ -52,7 +52,19 @@ const optionSx: SxProps = {
|
||||
|
||||
// layout
|
||||
justifyContent: 'flex-start',
|
||||
};
|
||||
} as const;
|
||||
|
||||
const optionBoldSx: SxProps = {
|
||||
...optionSx,
|
||||
fontWeight: 'lg',
|
||||
} as const;
|
||||
|
||||
|
||||
// '1. **text**' -> '1. text', or: **text** -> text
|
||||
function _stripMarkdownBold(text: string): { text: string; isBold: boolean } {
|
||||
const stripped = text.replace(/(\*{2,})(.+)\1\s*$/, '$2').trimEnd();
|
||||
return { text: stripped, isBold: stripped !== text };
|
||||
}
|
||||
|
||||
|
||||
export function optionsExtractFromFragments_dangerModifyFragment(enabled: boolean, fragments: InterleavedFragment[]): { fragments: InterleavedFragment[], options: string[] } {
|
||||
@@ -164,21 +176,25 @@ export function BlockOpOptions(props: {
|
||||
options: string[],
|
||||
onContinue: (continueText: null | string) => void,
|
||||
}) {
|
||||
const buttonSx = React.useMemo(() => ({ ...optionSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
const normalSx = React.useMemo(() => ({ ...optionSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
const boldSx = React.useMemo(() => ({ ...optionBoldSx, fontSize: props.contentScaling }), [props.contentScaling]);
|
||||
return (
|
||||
<Box sx={optionGroupSx}>
|
||||
{props.options.map((option, index) => (
|
||||
<Button
|
||||
key={index}
|
||||
color={OPTION_ACTIVE_COLOR}
|
||||
variant='soft'
|
||||
size={props.contentScaling === 'md' ? 'md' : 'sm'}
|
||||
onClick={() => props.onContinue(option.endsWith('?') ? option.slice(0, -1) : option)}
|
||||
sx={buttonSx}
|
||||
>
|
||||
{option}
|
||||
</Button>
|
||||
))}
|
||||
{props.options.map((option, index) => {
|
||||
const { text, isBold } = _stripMarkdownBold(option);
|
||||
return (
|
||||
<Button
|
||||
key={index}
|
||||
color={OPTION_ACTIVE_COLOR}
|
||||
variant='soft'
|
||||
size={props.contentScaling === 'md' ? 'md' : 'sm'}
|
||||
onClick={() => props.onContinue(text.endsWith('?') ? text.slice(0, -1) : text)}
|
||||
sx={isBold ? boldSx : normalSx}
|
||||
>
|
||||
{text}
|
||||
</Button>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,57 +1,90 @@
|
||||
import * as React from 'react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import { Box, Button, ButtonGroup, Tooltip, Typography } from '@mui/joy';
|
||||
import DownloadIcon from '@mui/icons-material/Download';
|
||||
import LinkOffRoundedIcon from '@mui/icons-material/LinkOffRounded';
|
||||
import PlayArrowRoundedIcon from '@mui/icons-material/PlayArrowRounded';
|
||||
import StopRoundedIcon from '@mui/icons-material/StopRounded';
|
||||
|
||||
import type { AixReattachMode } from '~/modules/aix/client/aix.client';
|
||||
|
||||
import type { DMessageGenerator } from '~/common/stores/chat/chat.message';
|
||||
|
||||
|
||||
const ARM_TIMEOUT_MS = 4000;
|
||||
|
||||
|
||||
/**
|
||||
* FIXME: COMPLETE THIS
|
||||
* Resume controls for an upstream-stored run.
|
||||
* - Resume: SSE replay (live deltas) - canonical path. Always offered when onResume exists.
|
||||
* - Recover: one-shot JSON GET - shown only for vendors that benefit from it (Gemini Interactions).
|
||||
* - Detach: abort the local fetch but leave the upstream run alive. Visible only when a resume
|
||||
* is in-flight (`inFlightMode != null`). Resume/Recover stay available afterwards.
|
||||
* - Stop: terminate the upstream run + delete the resource.
|
||||
*
|
||||
* IMPORTANT: in-flight state is owned by the parent (`inFlightMode` + `onDetach`) so it survives
|
||||
* remounts that happen while a long-running stream is active (e.g. Deep Research).
|
||||
*/
|
||||
export function BlockOpUpstreamResume(props: {
|
||||
upstreamHandle: Exclude<DMessageGenerator['upstreamHandle'], undefined>,
|
||||
onResume?: () => void | Promise<void>;
|
||||
onCancel?: () => void | Promise<void>;
|
||||
pending?: boolean; // true iff a local in-flight op (initial POST or resume); drives the state machine + hides the expiry footer
|
||||
inFlightMode?: AixReattachMode; // set by the parent while a resume is in flight; drives the loading/Detach UI
|
||||
onResume?: (mode: AixReattachMode) => void | Promise<void>;
|
||||
onDetach?: () => void;
|
||||
onDelete?: () => void | Promise<void>;
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [isResuming, setIsResuming] = React.useState(false);
|
||||
const [isCancelling, setIsCancelling] = React.useState(false);
|
||||
// local state - only for short-lived ops the parent doesn't own
|
||||
const [isDeleting, setIsDeleting] = React.useState(false);
|
||||
const [deleteArmed, setDeleteArmed] = React.useState(false);
|
||||
const [error, setError] = React.useState<string | null>(null);
|
||||
|
||||
// expiration: boolean is evaluated at render (may lag briefly if nothing re-renders past expiry).
|
||||
const { expiresAt /*, runId = ''*/ } = props.upstreamHandle;
|
||||
|
||||
// State machine - mutually exclusive triplet (idle | initial-POST | resume | recover):
|
||||
// - Idle : !pending - run not active locally (incl. post-reload, since
|
||||
// chats.converters.ts clears pendingIncomplete on hydrate).
|
||||
// - Initial POST : pending && !inFlightMode - first generation streaming.
|
||||
// - Resume replay : pending && mode='replay' - we own this resume cycle.
|
||||
// - Recover snap : pending && mode='snapshot' - we own this snapshot fetch.
|
||||
//
|
||||
// Visibility matrix (see BlockOpUpstreamResume props doc):
|
||||
// Resume Recover Detach Cancel
|
||||
// Idle ✅ ✅¹ — ✅
|
||||
// Initial POST — — — ✅
|
||||
// Resume in flight — — ✅ ✅
|
||||
// Recover in flight — ✅² — —
|
||||
// ¹ only for Gemini Interactions ² with loading spinner
|
||||
const isReplaying = props.inFlightMode === 'replay';
|
||||
const isSnapshotting = props.inFlightMode === 'snapshot';
|
||||
const isIdle = !props.pending;
|
||||
|
||||
const canRecoverVendor = props.upstreamHandle.uht === 'vnd.gem.interactions';
|
||||
const showResume = isIdle && !!props.onResume;
|
||||
const showRecover = (isIdle || isSnapshotting) && !!props.onResume && canRecoverVendor;
|
||||
const showDetach = isReplaying && !!props.onDetach;
|
||||
const showCancel = !isSnapshotting && !!props.onDelete;
|
||||
|
||||
// handlers
|
||||
|
||||
const handleResume = React.useCallback(async () => {
|
||||
const handleResume = React.useCallback((mode: AixReattachMode) => {
|
||||
if (!props.onResume) return;
|
||||
setError(null);
|
||||
setIsResuming(true);
|
||||
try {
|
||||
await props.onResume();
|
||||
} catch (err: any) {
|
||||
setError(err?.message || 'Resume failed');
|
||||
} finally {
|
||||
setIsResuming(false);
|
||||
}
|
||||
}, [props]);
|
||||
|
||||
const handleCancel = React.useCallback(async () => {
|
||||
if (!props.onCancel) return;
|
||||
setError(null);
|
||||
setIsCancelling(true);
|
||||
try {
|
||||
await props.onCancel();
|
||||
} catch (err: any) {
|
||||
setError(err?.message || 'Cancel failed');
|
||||
} finally {
|
||||
setIsCancelling(false);
|
||||
}
|
||||
// fire-and-forget: parent owns the promise lifecycle and the abort controller.
|
||||
// If it rejects, the parent surfaces the error via its own UI; we stay silent.
|
||||
Promise.resolve(props.onResume(mode)).catch(() => { /* parent handles */ });
|
||||
}, [props]);
|
||||
|
||||
// Two-click arm: first click arms (visible red "Confirm?"), second click (within ARM_TIMEOUT_MS) executes.
|
||||
const handleDelete = React.useCallback(async () => {
|
||||
if (!props.onDelete) return;
|
||||
if (!deleteArmed) {
|
||||
setDeleteArmed(true);
|
||||
return;
|
||||
}
|
||||
setDeleteArmed(false);
|
||||
setError(null);
|
||||
setIsDeleting(true);
|
||||
try {
|
||||
@@ -61,7 +94,14 @@ export function BlockOpUpstreamResume(props: {
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}, [props]);
|
||||
}, [deleteArmed, props]);
|
||||
|
||||
// Auto-disarm after ARM_TIMEOUT_MS so the armed state can't leak into a later session
|
||||
React.useEffect(() => {
|
||||
if (!deleteArmed) return;
|
||||
const t = setTimeout(() => setDeleteArmed(false), ARM_TIMEOUT_MS);
|
||||
return () => clearTimeout(t);
|
||||
}, [deleteArmed]);
|
||||
|
||||
return (
|
||||
<Box
|
||||
@@ -74,41 +114,55 @@ export function BlockOpUpstreamResume(props: {
|
||||
}}
|
||||
>
|
||||
<ButtonGroup>
|
||||
{props.onResume && (
|
||||
<Tooltip title='Resume generation from last checkpoint'>
|
||||
{showResume && (
|
||||
<Tooltip title='Resume by re-streaming from the upstream run'>
|
||||
<Button
|
||||
disabled={isResuming || isCancelling || isDeleting}
|
||||
loading={isResuming}
|
||||
startDecorator={<PlayArrowRoundedIcon sx={{ color: 'success.solidBg' }} />}
|
||||
onClick={handleResume}
|
||||
disabled={isDeleting}
|
||||
startDecorator={<PlayArrowRoundedIcon color='success' />}
|
||||
onClick={() => handleResume('replay')}
|
||||
>
|
||||
Resume
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{props.onCancel && (
|
||||
<Tooltip title='Cancel the response generation'>
|
||||
{showRecover && (
|
||||
<Tooltip title='Fetch the result without streaming - recovers stuck or hung runs'>
|
||||
<Button
|
||||
disabled={isResuming || isCancelling || isDeleting}
|
||||
loading={isCancelling}
|
||||
// startDecorator={<CancelIcon />}
|
||||
onClick={handleCancel}
|
||||
disabled={isDeleting}
|
||||
loading={isSnapshotting}
|
||||
loadingPosition='start'
|
||||
startDecorator={<DownloadIcon />}
|
||||
onClick={() => handleResume('snapshot')}
|
||||
>
|
||||
Cancel
|
||||
Recover
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{props.onDelete && (
|
||||
<Tooltip title='Delete the stored response'>
|
||||
{showDetach && (
|
||||
<Tooltip title='Close this connection only - the upstream run keeps going. Click Resume or Recover later to fetch results.'>
|
||||
<Button
|
||||
disabled={isDeleting}
|
||||
startDecorator={<LinkOffRoundedIcon />}
|
||||
onClick={props.onDetach}
|
||||
>
|
||||
Detach
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{showCancel && (
|
||||
<Tooltip title={deleteArmed ? 'Click again to confirm - cancels the upstream run and clears the handle' : 'Cancel the upstream run'}>
|
||||
<Button
|
||||
loading={isDeleting}
|
||||
// startDecorator={<DeleteIcon />}
|
||||
color={deleteArmed ? 'danger' : 'neutral'}
|
||||
variant={deleteArmed ? 'solid' : 'outlined'}
|
||||
startDecorator={<StopRoundedIcon />}
|
||||
onClick={handleDelete}
|
||||
disabled={isResuming || isCancelling || isDeleting}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Delete
|
||||
{deleteArmed ? 'Confirm?' : 'Cancel'}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
@@ -120,9 +174,11 @@ export function BlockOpUpstreamResume(props: {
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
<Typography level='body-xs' sx={{ fontSize: '0.65rem', opacity: 0.6 }}>
|
||||
Response ID: {props.upstreamHandle.responseId.slice(0, 12)}...
|
||||
</Typography>
|
||||
{!props.pending && !!expiresAt && <Typography level='body-xs' sx={{ fontSize: '0.65rem', opacity: 0.6 }}>
|
||||
{/*Run ID: {runId.slice(0, 12)}...*/}
|
||||
{/*{!!expiresAt && <> · Expires <TimeAgo date={expiresAt} /></>}*/}
|
||||
Expires <TimeAgo date={expiresAt} />
|
||||
</Typography>}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import TimeAgo from 'react-timeago';
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box, ButtonGroup, CircularProgress, Divider, IconButton, ListDivider, ListItem, ListItemDecorator, MenuItem, Switch, Tooltip, Typography } from '@mui/joy';
|
||||
import { ClickAwayListener, Popper } from '@mui/base';
|
||||
import AccountTreeOutlinedIcon from '@mui/icons-material/AccountTreeOutlined';
|
||||
import AlternateEmailIcon from '@mui/icons-material/AlternateEmail';
|
||||
import CheckRoundedIcon from '@mui/icons-material/CheckRounded';
|
||||
import CloseRoundedIcon from '@mui/icons-material/CloseRounded';
|
||||
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
|
||||
@@ -17,11 +15,10 @@ import EditRoundedIcon from '@mui/icons-material/EditRounded';
|
||||
import ForkRightIcon from '@mui/icons-material/ForkRight';
|
||||
import FormatBoldIcon from '@mui/icons-material/FormatBold';
|
||||
import FormatPaintOutlinedIcon from '@mui/icons-material/FormatPaintOutlined';
|
||||
import InsertLinkIcon from '@mui/icons-material/InsertLink';
|
||||
import InfoOutlinedIcon from '@mui/icons-material/InfoOutlined';
|
||||
import MoreVertIcon from '@mui/icons-material/MoreVert';
|
||||
import NotificationsActiveIcon from '@mui/icons-material/NotificationsActive';
|
||||
import NotificationsOutlinedIcon from '@mui/icons-material/NotificationsOutlined';
|
||||
import RecordVoiceOverOutlinedIcon from '@mui/icons-material/RecordVoiceOverOutlined';
|
||||
import ReplayIcon from '@mui/icons-material/Replay';
|
||||
import ReplyAllRoundedIcon from '@mui/icons-material/ReplyAllRounded';
|
||||
import ReplyRoundedIcon from '@mui/icons-material/ReplyRounded';
|
||||
@@ -32,27 +29,32 @@ import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
|
||||
import VisibilityIcon from '@mui/icons-material/Visibility';
|
||||
import VisibilityOffIcon from '@mui/icons-material/VisibilityOff';
|
||||
|
||||
import type { AixReattachMode } from '~/modules/aix/client/aix.client';
|
||||
import { ModelVendorAnthropic } from '~/modules/llms/vendors/anthropic/anthropic.vendor';
|
||||
|
||||
import { AnthropicIcon } from '~/common/components/icons/vendors/AnthropicIcon';
|
||||
import { ChatBeamIcon } from '~/common/components/icons/ChatBeamIcon';
|
||||
import { CloseablePopup } from '~/common/components/CloseablePopup';
|
||||
import { DMessage, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, MESSAGE_FLAG_NOTIFY_COMPLETE, MESSAGE_FLAG_STARRED, MESSAGE_FLAG_VND_ANT_CACHE_AUTO, MESSAGE_FLAG_VND_ANT_CACHE_USER, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { DMessage, DMessageGenerator, DMessageId, DMessageUserFlag, DMetaReferenceItem, MESSAGE_FLAG_AIX_SKIP, MESSAGE_FLAG_NOTIFY_COMPLETE, MESSAGE_FLAG_STARRED, MESSAGE_FLAG_VND_ANT_CACHE_AUTO, MESSAGE_FLAG_VND_ANT_CACHE_USER, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { KeyStroke } from '~/common/components/KeyStroke';
|
||||
import { MarkHighlightIcon } from '~/common/components/icons/MarkHighlightIcon';
|
||||
import { PhTreeStructure } from '~/common/components/icons/phosphor/PhTreeStructure';
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { Release } from '~/common/app.release';
|
||||
import { StarredState } from '~/common/components/StarIcons';
|
||||
import { TooltipOutlined } from '~/common/components/TooltipOutlined';
|
||||
import { adjustContentScaling, themeScalingMap, themeZIndexChatBubble } from '~/common/app.theme';
|
||||
import { avatarIconSx, makeMessageAvatarIcon, messageBackground, useMessageAvatarLabel } from '~/common/util/dMessageUtils';
|
||||
import { copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
import { clipboardCopyDOMSelectionOrFallback, copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
import { createTextContentFragment, DMessageFragment, DMessageFragmentId, updateFragmentWithEditedText } from '~/common/stores/chat/chat.fragments';
|
||||
import { useFragmentBuckets } from '~/common/stores/chat/hooks/useFragmentBuckets';
|
||||
import { useUIPreferencesStore } from '~/common/stores/store-ui';
|
||||
import { useUXLabsStore } from '~/common/stores/store-ux-labs';
|
||||
|
||||
import { BlockOpContinue } from './BlockOpContinue';
|
||||
import { BlockOpOptions, optionsExtractFromFragments_dangerModifyFragment } from './BlockOpOptions';
|
||||
import { BlockOpUpstreamResume } from './BlockOpUpstreamResume';
|
||||
import { ChatMessageEditAttachments, type EditModeAttachmentsHandle } from './ChatMessageEditAttachments';
|
||||
import { ChatMessageInfoPopup } from './ChatMessageInfoPopup';
|
||||
import { ContentFragments } from './fragments-content/ContentFragments';
|
||||
import { DocumentAttachmentFragments } from './fragments-attachment-doc/DocumentAttachmentFragments';
|
||||
import { ImageAttachmentFragments } from './fragments-attachment-image/ImageAttachmentFragments';
|
||||
@@ -69,7 +71,7 @@ const ENABLE_BUBBLE = true;
|
||||
export const BUBBLE_MIN_TEXT_LENGTH = 3;
|
||||
|
||||
// Enable the hover button to copy the whole message. The Copy button is also available in Blocks, or in the Avatar Menu.
|
||||
const ENABLE_COPY_MESSAGE_OVERLAY: boolean = false;
|
||||
// const ENABLE_COPY_MESSAGE_OVERLAY: boolean = false;
|
||||
|
||||
|
||||
const messageBodySx: SxProps = {
|
||||
@@ -160,6 +162,10 @@ export function ChatMessage(props: {
|
||||
onMessageBeam?: (messageId: string) => Promise<void>,
|
||||
onMessageBranch?: (messageId: string) => void,
|
||||
onMessageContinue?: (messageId: string, continueText: null | string) => void,
|
||||
onMessageUpstreamResume?: (generator: DMessageGenerator, messageId: string, mode: AixReattachMode) => Promise<void>,
|
||||
onMessageUpstreamDetach?: (messageId: string) => void,
|
||||
onMessageUpstreamDelete?: (generator: DMessageGenerator, messageId: string) => Promise<void>,
|
||||
upstreamResumeMode?: AixReattachMode, // set by parent while a resume is in flight on this message
|
||||
onMessageDelete?: (messageId: string) => void,
|
||||
onMessageFragmentAppend?: (messageId: DMessageId, fragment: DMessageFragment) => void
|
||||
onMessageFragmentDelete?: (messageId: DMessageId, fragmentId: DMessageFragmentId) => void,
|
||||
@@ -180,6 +186,8 @@ export function ChatMessage(props: {
|
||||
const [contextMenuAnchor, setContextMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [opsMenuAnchor, setOpsMenuAnchor] = React.useState<HTMLElement | null>(null);
|
||||
const [textContentEditState, setTextContentEditState] = React.useState<ChatMessageTextPartEditState | null>(null);
|
||||
const [showInfoModal, setShowInfoModal] = React.useState(false);
|
||||
const attachmentsEditRef = React.useRef<EditModeAttachmentsHandle>(null);
|
||||
|
||||
// external state
|
||||
const { adjContentScaling, disableMarkdown, doubleClickToEdit, uiComplexityMode } = useUIPreferencesStore(useShallow(state => ({
|
||||
@@ -188,7 +196,6 @@ export function ChatMessage(props: {
|
||||
doubleClickToEdit: state.doubleClickToEdit,
|
||||
uiComplexityMode: state.complexityMode,
|
||||
})));
|
||||
const labsEnhanceCodeBlocks = useUXLabsStore(state => state.labsEnhanceCodeBlocks);
|
||||
const [showDiff, setShowDiff] = useChatShowTextDiff();
|
||||
|
||||
|
||||
@@ -243,7 +250,7 @@ export function ChatMessage(props: {
|
||||
// const wordsDiff = useWordsDifference(textSubject, props.diffPreviousText, showDiff);
|
||||
|
||||
|
||||
const { onMessageAssistantFrom, onMessageDelete, onMessageFragmentAppend, onMessageFragmentDelete, onMessageFragmentReplace, onMessageContinue } = props;
|
||||
const { onMessageAssistantFrom, onMessageDelete, onMessageFragmentAppend, onMessageFragmentDelete, onMessageFragmentReplace, onMessageContinue, onMessageUpstreamResume, onMessageUpstreamDetach, onMessageUpstreamDelete } = props;
|
||||
|
||||
const handleFragmentNew = React.useCallback(() => {
|
||||
onMessageFragmentAppend?.(messageId, createTextContentFragment(''));
|
||||
@@ -261,6 +268,20 @@ export function ChatMessage(props: {
|
||||
onMessageContinue?.(messageId, continueText);
|
||||
}, [messageId, onMessageContinue]);
|
||||
|
||||
const handleUpstreamResume = React.useCallback((mode: AixReattachMode) => {
|
||||
if (!messageGenerator) return;
|
||||
return onMessageUpstreamResume?.(messageGenerator, messageId, mode);
|
||||
}, [messageGenerator, messageId, onMessageUpstreamResume]);
|
||||
|
||||
const handleUpstreamDetach = React.useCallback(() => {
|
||||
onMessageUpstreamDetach?.(messageId);
|
||||
}, [messageId, onMessageUpstreamDetach]);
|
||||
|
||||
const handleUpstreamDelete = React.useCallback(() => {
|
||||
if (!messageGenerator) return;
|
||||
return onMessageUpstreamDelete?.(messageGenerator, messageId);
|
||||
}, [messageGenerator, messageId, onMessageUpstreamDelete]);
|
||||
|
||||
|
||||
// Text Editing
|
||||
|
||||
@@ -280,14 +301,25 @@ export function ChatMessage(props: {
|
||||
}, [handleFragmentDelete, handleFragmentReplace, messageFragments]);
|
||||
|
||||
const handleApplyAllEdits = React.useCallback(async (withControl: boolean) => {
|
||||
const state = textContentEditState || {};
|
||||
// 0. take state, including new attachment drafts BEFORE clearing state
|
||||
const fragmentsEdits = textContentEditState || {};
|
||||
const newFragments = await attachmentsEditRef.current?.takeAllFragments() ?? [];
|
||||
|
||||
// 1. clear edit state (unmounts EditModeAttachments, triggers cleanup)
|
||||
setTextContentEditState(null);
|
||||
for (const [fragmentId, editedText] of Object.entries(state))
|
||||
|
||||
// 2A. apply text fragment edits
|
||||
for (const [fragmentId, editedText] of Object.entries(fragmentsEdits))
|
||||
handleApplyEdit(fragmentId, editedText);
|
||||
// if the user pressed Ctrl, we begin a regeneration from here
|
||||
|
||||
// 2B. append new attachment fragments
|
||||
for (const fragment of newFragments)
|
||||
onMessageFragmentAppend?.(messageId, fragment);
|
||||
|
||||
// 3. if the user pressed Ctrl, we begin a regeneration from here
|
||||
if (withControl && onMessageAssistantFrom)
|
||||
await onMessageAssistantFrom(messageId, 0);
|
||||
}, [handleApplyEdit, messageId, onMessageAssistantFrom, textContentEditState]);
|
||||
}, [handleApplyEdit, messageId, onMessageAssistantFrom, onMessageFragmentAppend, textContentEditState]);
|
||||
|
||||
const handleEditsApplyClicked = React.useCallback(() => handleApplyAllEdits(false), [handleApplyAllEdits]);
|
||||
|
||||
@@ -314,11 +346,17 @@ export function ChatMessage(props: {
|
||||
|
||||
const handleCloseOpsMenu = React.useCallback(() => setOpsMenuAnchor(null), []);
|
||||
|
||||
const handleOpsCopy = (e: React.MouseEvent) => {
|
||||
copyToClipboard(textSubject, 'Text');
|
||||
const handleOpsMessageCopySrc = React.useCallback((e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
// copy full source text (ops menu) - bypasses DOM, always gets pre-collapsed content
|
||||
copyToClipboard(fragmentFlattenedText, 'Message');
|
||||
handleCloseOpsMenu();
|
||||
closeContextMenu();
|
||||
}, [fragmentFlattenedText, handleCloseOpsMenu]);
|
||||
|
||||
const handleBubbleCopyDOM = (e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
// copy cleaned DOM selection (bubble) - rich text for pasting into Google Docs, etc.
|
||||
clipboardCopyDOMSelectionOrFallback(blocksRendererRef.current, textSubject, 'Selection');
|
||||
closeBubble();
|
||||
};
|
||||
|
||||
@@ -342,6 +380,13 @@ export function ChatMessage(props: {
|
||||
onMessageToggleUserFlag?.(messageId, MESSAGE_FLAG_STARRED);
|
||||
}, [messageId, onMessageToggleUserFlag]);
|
||||
|
||||
const handleOpsShowInfo = React.useCallback(() => {
|
||||
setOpsMenuAnchor(null);
|
||||
setShowInfoModal(true);
|
||||
}, []);
|
||||
|
||||
const handleInfoClose = React.useCallback(() => setShowInfoModal(false), []);
|
||||
|
||||
const handleOpsToggleNotifyComplete = React.useCallback(() => {
|
||||
// also remember the preference, for auto-setting flags by the persona
|
||||
setIsNotificationEnabledForModel(messageId, !isUserNotifyComplete);
|
||||
@@ -797,11 +842,11 @@ export function ChatMessage(props: {
|
||||
fitScreen={props.fitScreen}
|
||||
isMobile={props.isMobile}
|
||||
messageRole={messageRole}
|
||||
messageGeneratorLlmId={messageGenerator?.mgt === 'aix' ? messageGenerator.aix?.mId : undefined}
|
||||
messagePendingIncomplete={messagePendingIncomplete}
|
||||
optiAllowSubBlocksMemo={!!messagePendingIncomplete}
|
||||
disableMarkdownText={disableMarkdown || fromUser /* User messages are edited as text. Try to have them in plain text. NOTE: This may bite. */}
|
||||
showUnsafeHtmlCode={props.showUnsafeHtmlCode}
|
||||
enhanceCodeBlocks={labsEnhanceCodeBlocks}
|
||||
|
||||
textEditsState={textContentEditState}
|
||||
setEditedText={(!props.onMessageFragmentReplace || messagePendingIncomplete) ? undefined : handleEditSetText}
|
||||
@@ -832,6 +877,14 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [Edit Mode] Add new attachments (right below the Document Fragments) */}
|
||||
{isEditingText && !fromAssistant && !!onMessageFragmentAppend && (
|
||||
<ChatMessageEditAttachments
|
||||
ref={attachmentsEditRef}
|
||||
isMobile={props.isMobile}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* [SYSTEM, REAL] Image Attachment Fragments - just for a realistic display below the system instruction text/docs */}
|
||||
{fromSystem && imageAttachments.length >= 1 && (
|
||||
<ImageAttachmentFragments
|
||||
@@ -852,13 +905,15 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Upstream Resume... */}
|
||||
{props.isBottom && fromAssistant && lastFragmentIsError && messageGenerator?.upstreamHandle?.responseId && (
|
||||
{/* Upstream Resume - shows whenever there's a stored handle (incl. post-reload, and while streaming so Stop can cancel the upstream run) */}
|
||||
{props.isBottom && fromAssistant && messageGenerator?.upstreamHandle && (!!onMessageUpstreamResume || !!onMessageUpstreamDelete) && (
|
||||
<BlockOpUpstreamResume
|
||||
upstreamHandle={messageGenerator.upstreamHandle}
|
||||
onResume={console.error}
|
||||
onCancel={console.error}
|
||||
onDelete={console.error}
|
||||
pending={messagePendingIncomplete}
|
||||
inFlightMode={props.upstreamResumeMode}
|
||||
onResume={onMessageUpstreamResume ? handleUpstreamResume : undefined}
|
||||
onDetach={onMessageUpstreamDetach ? handleUpstreamDetach : undefined}
|
||||
onDelete={onMessageUpstreamDelete ? handleUpstreamDelete : undefined}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -871,6 +926,13 @@ export function ChatMessage(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Char & Word count */}
|
||||
{/*{!zenMode && !isEditingText && !messagePendingIncomplete && fragmentFlattenedText.length > 0 && (*/}
|
||||
{/* <Typography level='body-xs' sx={{ mx: 1.5, mt: 0.5, textAlign: fromAssistant ? 'left' : 'right', opacity: 0.5 }}>*/}
|
||||
{/* {fragmentFlattenedText.length.toLocaleString()} chars · {(fragmentFlattenedText.match(/\S+/g) || []).length.toLocaleString()} words*/}
|
||||
{/* </Typography>*/}
|
||||
{/*)}*/}
|
||||
|
||||
</Box>
|
||||
|
||||
|
||||
@@ -892,18 +954,18 @@ export function ChatMessage(props: {
|
||||
|
||||
|
||||
{/* Overlay copy icon */}
|
||||
{ENABLE_COPY_MESSAGE_OVERLAY && !fromSystem && !isEditingText && (
|
||||
<Tooltip title={messagePendingIncomplete ? null : (fromAssistant ? 'Copy message' : 'Copy input')} variant='solid'>
|
||||
<IconButton
|
||||
variant='outlined' onClick={handleOpsCopy}
|
||||
sx={{
|
||||
position: 'absolute', ...(fromAssistant ? { right: { xs: 12, md: 28 } } : { left: { xs: 12, md: 28 } }), zIndex: 10,
|
||||
opacity: 0, transition: 'opacity 0.16s cubic-bezier(.17,.84,.44,1)',
|
||||
}}>
|
||||
<ContentCopyIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
)}
|
||||
{/*{ENABLE_COPY_MESSAGE_OVERLAY && !fromSystem && !isEditingText && (*/}
|
||||
{/* <Tooltip title={messagePendingIncomplete ? null : (fromAssistant ? 'Copy message' : 'Copy input')} variant='solid'>*/}
|
||||
{/* <IconButton*/}
|
||||
{/* variant='outlined' onClick={handleOpsMessageCopySrc}*/}
|
||||
{/* sx={{*/}
|
||||
{/* position: 'absolute', ...(fromAssistant ? { right: { xs: 12, md: 28 } } : { left: { xs: 12, md: 28 } }), zIndex: 10,*/}
|
||||
{/* opacity: 0, transition: 'opacity 0.16s cubic-bezier(.17,.84,.44,1)',*/}
|
||||
{/* }}>*/}
|
||||
{/* <ContentCopyIcon />*/}
|
||||
{/* </IconButton>*/}
|
||||
{/* </Tooltip>*/}
|
||||
{/*)}*/}
|
||||
|
||||
|
||||
{/* Message Operations Menu (3 dots) */}
|
||||
@@ -933,25 +995,22 @@ export function ChatMessage(props: {
|
||||
</MenuItem>
|
||||
)}
|
||||
{/* Copy */}
|
||||
<MenuItem onClick={handleOpsCopy} sx={{ flex: 1 }}>
|
||||
<MenuItem onClick={handleOpsMessageCopySrc} sx={{ flex: 1 }}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
Copy
|
||||
</MenuItem>
|
||||
{/* Starred */}
|
||||
{!!onMessageToggleUserFlag && (
|
||||
<MenuItem onClick={handleOpsToggleStarred} sx={{ flexGrow: 0, px: 1 }}>
|
||||
<Tooltip disableInteractive title={!isUserStarred ? 'Link message - use @ to refer to it from another chat' : 'Remove link'}>
|
||||
{isUserStarred
|
||||
? <AlternateEmailIcon color='primary' sx={{ fontSize: 'xl' }} />
|
||||
: <InsertLinkIcon sx={{ rotate: '45deg' }} />
|
||||
}
|
||||
{/*{isUserStarred*/}
|
||||
{/* ? <StarRoundedIcon color='primary' sx={{ fontSize: 'xl2' }} />*/}
|
||||
{/* : <StarOutlineRoundedIcon sx={{ fontSize: 'xl2' }} />*/}
|
||||
{/*}*/}
|
||||
<Tooltip disableInteractive title={!isUserStarred ? 'Star message - use @ to refer to it from another chat' : 'Remove star'}>
|
||||
<StarredState isStarred={isUserStarred} />
|
||||
</Tooltip>
|
||||
</MenuItem>
|
||||
)}
|
||||
{/* Info */}
|
||||
<MenuItem onClick={handleOpsShowInfo} sx={{ flexGrow: 0, px: 1 }}>
|
||||
<InfoOutlinedIcon sx={{ fontSize: 'xl' }} />
|
||||
</MenuItem>
|
||||
</Box>
|
||||
|
||||
{/* Notify Complete */}
|
||||
@@ -1014,7 +1073,7 @@ export function ChatMessage(props: {
|
||||
{!!props.onTextDiagram && <ListDivider />}
|
||||
{!!props.onTextDiagram && (
|
||||
<MenuItem onClick={handleOpsDiagram} disabled={!couldDiagram}>
|
||||
<ListItemDecorator><AccountTreeOutlinedIcon /></ListItemDecorator>
|
||||
<ListItemDecorator><PhTreeStructure /></ListItemDecorator>
|
||||
Auto-Diagram ...
|
||||
</MenuItem>
|
||||
)}
|
||||
@@ -1026,7 +1085,7 @@ export function ChatMessage(props: {
|
||||
)}
|
||||
{!!props.onTextSpeak && (
|
||||
<MenuItem onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverOutlinedIcon />}</ListItemDecorator>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <PhVoice />}</ListItemDecorator>
|
||||
Speak
|
||||
</MenuItem>
|
||||
)}
|
||||
@@ -1144,7 +1203,7 @@ export function ChatMessage(props: {
|
||||
{/* Intelligent functions */}
|
||||
{!!props.onTextDiagram && <Tooltip disableInteractive arrow placement='top' title={couldDiagram ? 'Auto-Diagram...' : 'Too short to Auto-Diagram'}>
|
||||
<IconButton color='success' onClick={couldDiagram ? handleOpsDiagram : undefined}>
|
||||
<AccountTreeOutlinedIcon sx={{ color: couldDiagram ? 'primary' : 'neutral.plainDisabledColor' }} />
|
||||
<PhTreeStructure sx={{ color: couldDiagram ? 'primary' : 'neutral.plainDisabledColor' }} />
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{!!props.onTextImagine && <Tooltip disableInteractive arrow placement='top' title='Auto-Draw'>
|
||||
@@ -1154,18 +1213,26 @@ export function ChatMessage(props: {
|
||||
</Tooltip>}
|
||||
{!!props.onTextSpeak && <Tooltip disableInteractive arrow placement='top' title='Speak'>
|
||||
<IconButton color='success' onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
{!props.isSpeaking ? <RecordVoiceOverOutlinedIcon /> : <CircularProgress sx={{ '--CircularProgress-size': '16px' }} />}
|
||||
{!props.isSpeaking ? <PhVoice /> : <CircularProgress sx={{ '--CircularProgress-size': '16px' }} />}
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{(!!props.onTextDiagram || !!props.onTextImagine || !!props.onTextSpeak) && <Divider />}
|
||||
|
||||
{/* Bubble Copy */}
|
||||
<Tooltip disableInteractive arrow placement='top' title='Copy Selection'>
|
||||
<IconButton onClick={handleOpsCopy}>
|
||||
<IconButton onClick={handleBubbleCopyDOM}>
|
||||
<ContentCopyIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
|
||||
{/* Selection char & word count */}
|
||||
{!!selText && <Divider />}
|
||||
{!!selText && (
|
||||
<Typography level='body-xs' sx={{ px: 1, whiteSpace: 'nowrap' }}>
|
||||
{selText.length.toLocaleString()}c · {(selText.match(/\S+/g) || []).length.toLocaleString()}w
|
||||
</Typography>
|
||||
)}
|
||||
|
||||
</ButtonGroup>
|
||||
</ClickAwayListener>
|
||||
</Popper>
|
||||
@@ -1180,13 +1247,13 @@ export function ChatMessage(props: {
|
||||
minWidth={220}
|
||||
placement='bottom-start'
|
||||
>
|
||||
<MenuItem onClick={handleOpsCopy} sx={{ flex: 1, alignItems: 'center' }}>
|
||||
<MenuItem onClick={(e) => { handleOpsMessageCopySrc(e); closeContextMenu(); }} sx={{ flex: 1, alignItems: 'center' }}>
|
||||
<ListItemDecorator><ContentCopyIcon /></ListItemDecorator>
|
||||
Copy
|
||||
</MenuItem>
|
||||
{!!props.onTextDiagram && <ListDivider />}
|
||||
{!!props.onTextDiagram && <MenuItem onClick={handleOpsDiagram} disabled={!couldDiagram || props.isImagining}>
|
||||
<ListItemDecorator><AccountTreeOutlinedIcon /></ListItemDecorator>
|
||||
<ListItemDecorator><PhTreeStructure /></ListItemDecorator>
|
||||
Auto-Diagram ...
|
||||
</MenuItem>}
|
||||
{!!props.onTextImagine && <MenuItem onClick={handleOpsImagine} disabled={!couldImagine || props.isImagining}>
|
||||
@@ -1194,12 +1261,22 @@ export function ChatMessage(props: {
|
||||
Auto-Draw
|
||||
</MenuItem>}
|
||||
{!!props.onTextSpeak && <MenuItem onClick={handleOpsSpeak} disabled={!couldSpeak || props.isSpeaking}>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <RecordVoiceOverOutlinedIcon />}</ListItemDecorator>
|
||||
<ListItemDecorator>{props.isSpeaking ? <CircularProgress size='sm' /> : <PhVoice />}</ListItemDecorator>
|
||||
Speak
|
||||
</MenuItem>}
|
||||
</CloseablePopup>
|
||||
)}
|
||||
|
||||
|
||||
{/* Message Info Modal */}
|
||||
{showInfoModal && (
|
||||
<ChatMessageInfoPopup
|
||||
open
|
||||
onClose={handleInfoClose}
|
||||
message={props.message}
|
||||
/>
|
||||
)}
|
||||
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Sheet } from '@mui/joy';
|
||||
|
||||
import { useBrowseCapability } from '~/modules/browse/store-module-browsing';
|
||||
|
||||
import type { AttachmentDraftsStoreApi } from '~/common/attachment-drafts/store-attachment-drafts_slice';
|
||||
import type { DMessageAttachmentFragment } from '~/common/stores/chat/chat.fragments';
|
||||
import { AttachmentDraftsList } from '~/common/attachment-drafts/attachment-drafts-ui/AttachmentDraftsList';
|
||||
import { AttachmentSourcesMemo } from '~/common/attachment-drafts/attachment-sources/AttachmentSources';
|
||||
import { useAttachHandler_CameraOpen, useAttachHandler_Files, useAttachHandler_ScreenCapture, useAttachHandler_UrlWebLinks } from '~/common/attachment-drafts/attachment-sources/useAttachmentSourceHandlers';
|
||||
import { createAttachmentDraftsVanillaStore } from '~/common/attachment-drafts/store-attachment-drafts_vanilla';
|
||||
import { supportsCameraCapture } from '~/common/components/camera/useCameraCapture';
|
||||
import { supportsScreenCapture } from '~/common/util/screenCaptureUtils';
|
||||
import { useAttachmentDrafts } from '~/common/attachment-drafts/useAttachmentDrafts';
|
||||
import { useGoogleDrivePicker } from '~/common/attachment-drafts/attachment-sources/useGoogleDrivePicker';
|
||||
|
||||
import { ViewDocPartModal } from './fragments-content/ViewDocPartModal';
|
||||
import { ViewImageRefPartModal } from './fragments-content/ViewImageRefPartModal';
|
||||
|
||||
|
||||
/**
|
||||
* Imperative interface used outside
|
||||
*/
|
||||
export interface EditModeAttachmentsHandle {
|
||||
takeAllFragments: () => Promise<DMessageAttachmentFragment[]>;
|
||||
}
|
||||
|
||||
|
||||
const _styles = {
|
||||
box: {
|
||||
overflow: 'hidden',
|
||||
p: 0.5,
|
||||
|
||||
// looks - exactly from BoxTextArea - the Text editor
|
||||
boxShadow: 'inset 1px 0px 3px -2px var(--joy-palette-warning-softColor)',
|
||||
outline: '1px solid',
|
||||
outlineColor: 'var(--joy-palette-warning-solidBg)',
|
||||
borderRadius: 'sm',
|
||||
|
||||
// layout
|
||||
display: 'flex',
|
||||
flexWrap: 'wrap',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
|
||||
// shade to the buttons inside this > div > div > button
|
||||
'& > div > div > button': {
|
||||
// backgroundColor: 'warning.softActiveBg',
|
||||
borderColor: 'warning.outlinedBorder',
|
||||
borderRadius: 'sm',
|
||||
boxShadow: 'sm',
|
||||
},
|
||||
},
|
||||
} as const satisfies Record<string, SxProps>;
|
||||
|
||||
|
||||
/**
|
||||
* Encapsulates all attachment wiring for ChatMessage edit mode.
|
||||
* Owns a standalone attachment drafts store (one per edit session).
|
||||
* Exposes an imperative handle for the parent to "take" fragments on save.
|
||||
*/
|
||||
export const ChatMessageEditAttachments = React.forwardRef<EditModeAttachmentsHandle, { isMobile: boolean }>(
|
||||
function EditModeAttachments(props, ref) {
|
||||
|
||||
// state
|
||||
const storeApiRef = React.useRef<AttachmentDraftsStoreApi | null>(null);
|
||||
if (!storeApiRef.current) storeApiRef.current = createAttachmentDraftsVanillaStore(); // created only on mount
|
||||
|
||||
// external state
|
||||
const {
|
||||
attachmentDrafts,
|
||||
attachAppendClipboardItems, attachAppendCloudFile, attachAppendFile, attachAppendUrl, // attachAppendDataTransfer
|
||||
attachmentsTakeAllFragments,
|
||||
} = useAttachmentDrafts(storeApiRef.current, false, false, undefined, false);
|
||||
const browseCapability = useBrowseCapability();
|
||||
|
||||
|
||||
// imperative handle for parent to take fragments on save
|
||||
React.useImperativeHandle(ref, () => ({
|
||||
takeAllFragments: () => attachmentsTakeAllFragments('global', 'app-chat'),
|
||||
}), [attachmentsTakeAllFragments]);
|
||||
|
||||
|
||||
// [effect] cleanup on unmount - remove all drafts (deleted their DBlob assets, except for 'taken' ones)
|
||||
React.useEffect(() => {
|
||||
const store = storeApiRef.current;
|
||||
return () => {
|
||||
store?.getState().removeAllAttachmentDrafts();
|
||||
};
|
||||
}, []);
|
||||
|
||||
|
||||
// handlers - composed from shared attachment source hooks
|
||||
|
||||
const handleAttachFiles = useAttachHandler_Files(attachAppendFile);
|
||||
const handleOpenCamera = useAttachHandler_CameraOpen(attachAppendFile);
|
||||
const handleAttachScreenCapture = useAttachHandler_ScreenCapture(attachAppendFile);
|
||||
const { openWebInputDialog, webInputDialogComponent } = useAttachHandler_UrlWebLinks(attachAppendUrl);
|
||||
const { openGoogleDrivePicker, googleDrivePickerComponent } = useGoogleDrivePicker(attachAppendCloudFile, props.isMobile);
|
||||
|
||||
// viewer render props - same pattern as ComposerAttachmentDraftsList.tsx:44-52
|
||||
const renderDocViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewDocPartModal>['docPart'], onClose: () => void) =>
|
||||
<ViewDocPartModal docPart={part} onClose={onClose} />,
|
||||
[],
|
||||
);
|
||||
|
||||
const renderImageViewer = React.useCallback(
|
||||
(part: React.ComponentProps<typeof ViewImageRefPartModal>['imageRefPart'], onClose: () => void) =>
|
||||
<ViewImageRefPartModal imageRefPart={part} onClose={onClose} />,
|
||||
[],
|
||||
);
|
||||
|
||||
|
||||
return <>
|
||||
|
||||
<Sheet color='warning' variant='soft' sx={_styles.box}>
|
||||
|
||||
{/* [+] Attachment Sources menu */}
|
||||
<AttachmentSourcesMemo
|
||||
mode='menu-message'
|
||||
canBrowse={browseCapability.mayWork}
|
||||
hasScreenCapture={supportsScreenCapture}
|
||||
hasCamera={supportsCameraCapture()}
|
||||
// onlyImages={showAttachOnlyImages}
|
||||
onAttachClipboard={attachAppendClipboardItems}
|
||||
onAttachFiles={handleAttachFiles}
|
||||
onAttachScreenCapture={handleAttachScreenCapture}
|
||||
onOpenCamera={handleOpenCamera}
|
||||
onOpenGoogleDrivePicker={openGoogleDrivePicker}
|
||||
onOpenWebInput={openWebInputDialog}
|
||||
/>
|
||||
|
||||
{/* Attachment Drafts list */}
|
||||
{attachmentDrafts.length > 0 ? (
|
||||
<AttachmentDraftsList
|
||||
attachmentDraftsStoreApi={storeApiRef.current!}
|
||||
attachmentDrafts={attachmentDrafts}
|
||||
buttonsCanWrap
|
||||
renderDocViewer={renderDocViewer}
|
||||
renderImageViewer={renderImageViewer}
|
||||
/>
|
||||
) : null}
|
||||
|
||||
</Sheet>
|
||||
|
||||
{/* Modal portals */}
|
||||
{webInputDialogComponent}
|
||||
{googleDrivePickerComponent}
|
||||
|
||||
</>;
|
||||
},
|
||||
);
|
||||
@@ -0,0 +1,104 @@
|
||||
import * as React from 'react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box } from '@mui/joy';
|
||||
|
||||
import { llmsGetVendorIcon } from '~/modules/llms/components/LLMVendorIcon';
|
||||
|
||||
import type { DMessage } from '~/common/stores/chat/chat.message';
|
||||
import type { Immutable } from '~/common/types/immutable.types';
|
||||
import { GoodModal } from '~/common/components/modals/GoodModal';
|
||||
import { tooltipMetricsGridSx, prettyMessageMetrics, prettyShortChatModelName, prettyTokenStopReason } from '~/common/util/dMessageUtils';
|
||||
|
||||
|
||||
const contentSx: SxProps = {
|
||||
fontSize: 'sm',
|
||||
display: 'grid',
|
||||
gap: 1.5,
|
||||
};
|
||||
|
||||
const vendorIconContainerSx: SxProps = {
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
};
|
||||
|
||||
const timestampSx: SxProps = {
|
||||
fontSize: 'xs',
|
||||
color: 'text.tertiary',
|
||||
};
|
||||
|
||||
|
||||
export function ChatMessageInfoPopup(props: {
|
||||
open: boolean,
|
||||
onClose: () => void,
|
||||
message: Immutable<DMessage>,
|
||||
}) {
|
||||
|
||||
const { message } = props;
|
||||
const { generator, created, updated, tokenCount, role } = message;
|
||||
|
||||
const isAix = generator?.mgt === 'aix';
|
||||
const vendorId = isAix ? generator.aix?.vId ?? null : null;
|
||||
const VendorIcon = vendorId ? llmsGetVendorIcon(vendorId) : null;
|
||||
const metrics = generator?.metrics ? prettyMessageMetrics(generator.metrics, 'extra') : null;
|
||||
const stopReason = generator?.tokenStopReason ? prettyTokenStopReason(generator.tokenStopReason, 'extra') : null;
|
||||
|
||||
return (
|
||||
<GoodModal
|
||||
open={props.open}
|
||||
onClose={props.onClose}
|
||||
title='Message Info'
|
||||
hideBottomClose
|
||||
sx={{ minWidth: { xs: 300, sm: 400 }, maxWidth: 480 }}
|
||||
>
|
||||
<Box sx={contentSx}>
|
||||
|
||||
{/* Model / Generator */}
|
||||
{generator && (
|
||||
<Box sx={tooltipMetricsGridSx}>
|
||||
<div>Model:</div>
|
||||
<div>
|
||||
{VendorIcon
|
||||
? <Box sx={vendorIconContainerSx}><VendorIcon />{prettyShortChatModelName(generator.name)}</Box>
|
||||
: prettyShortChatModelName(generator.name)}
|
||||
</div>
|
||||
{isAix && generator.aix?.mId && <>
|
||||
<div>ID:</div>
|
||||
<div style={{ opacity: 0.75 }}>{generator.aix.mId}</div>
|
||||
</>}
|
||||
{generator.providerInfraLabel && <>
|
||||
<div>Provider:</div>
|
||||
<div>{generator.providerInfraLabel}</div>
|
||||
</>}
|
||||
{stopReason && <>
|
||||
<div>Status:</div>
|
||||
<div>{stopReason}</div>
|
||||
</>}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Metrics (tokens, speed, cost, time) */}
|
||||
{metrics}
|
||||
|
||||
{/* Message metadata */}
|
||||
<Box sx={tooltipMetricsGridSx}>
|
||||
<div>Role:</div>
|
||||
<div>{role}</div>
|
||||
{tokenCount > 0 && <>
|
||||
<div>Tokens:</div>
|
||||
<div>{tokenCount.toLocaleString()} (visible text ~approx)</div>
|
||||
</>}
|
||||
</Box>
|
||||
|
||||
{/* Timestamps */}
|
||||
<Box sx={timestampSx}>
|
||||
{!!created && <div>Created <TimeAgo date={created} /> - {new Date(created).toLocaleString()}</div>}
|
||||
{!!updated && <div>Updated <TimeAgo date={updated} /> - {new Date(updated).toLocaleString()}</div>}
|
||||
</Box>
|
||||
|
||||
</Box>
|
||||
</GoodModal>
|
||||
);
|
||||
}
|
||||
@@ -5,13 +5,13 @@ import AttachFileRoundedIcon from '@mui/icons-material/AttachFileRounded';
|
||||
import ClearIcon from '@mui/icons-material/Clear';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import ErrorIcon from '@mui/icons-material/ErrorRounded';
|
||||
import ImageIcon from '@mui/icons-material/ImageRounded';
|
||||
import TextFieldsIcon from '@mui/icons-material/TextFieldsRounded';
|
||||
import VisibilityIcon from '@mui/icons-material/Visibility';
|
||||
import VisibilityOffIcon from '@mui/icons-material/VisibilityOff';
|
||||
|
||||
import { DMessage, MESSAGE_FLAG_AIX_SKIP, messageFragmentsReduceText, messageHasUserFlag } from '~/common/stores/chat/chat.message';
|
||||
import { DMessageAttachmentFragment, DMessageFragment, isAttachmentFragment, isContentFragment, isImageRefPart, isZyncAssetImageReferencePart } from '~/common/stores/chat/chat.fragments';
|
||||
import { PhImageSquare } from '~/common/components/icons/phosphor/PhImageSquare';
|
||||
import { makeMessageAvatarIcon, messageBackground } from '~/common/util/dMessageUtils';
|
||||
|
||||
import { TokenBadgeMemo } from '../composer/tokens/TokenBadge';
|
||||
@@ -273,7 +273,7 @@ export function CleanerMessage(props: { message: DMessage, selected: boolean, re
|
||||
</Chip>
|
||||
)}
|
||||
{analysis.imageCount > 0 && (
|
||||
<Chip size='sm' variant='solid' color='success' startDecorator={<ImageIcon />} sx={{ px: 1 }}>
|
||||
<Chip size='sm' variant='solid' color='success' startDecorator={<PhImageSquare />} sx={{ px: 1 }}>
|
||||
{analysis.imageCount} image{analysis.imageCount > 1 ? 's' : ''}
|
||||
</Chip>
|
||||
)}
|
||||
|
||||
+24
-10
@@ -5,15 +5,15 @@ import { Box, Button, ColorPaletteProp } from '@mui/joy';
|
||||
import AbcIcon from '@mui/icons-material/Abc';
|
||||
import CodeIcon from '@mui/icons-material/Code';
|
||||
import EditRoundedIcon from '@mui/icons-material/EditRounded';
|
||||
import ImageOutlinedIcon from '@mui/icons-material/ImageOutlined';
|
||||
import PictureAsPdfIcon from '@mui/icons-material/PictureAsPdf';
|
||||
import RecordVoiceOverOutlinedIcon from '@mui/icons-material/RecordVoiceOverOutlined';
|
||||
import TextFieldsIcon from '@mui/icons-material/TextFields';
|
||||
import TextureIcon from '@mui/icons-material/Texture';
|
||||
|
||||
import { ContentScaling, themeScalingMap } from '~/common/app.theme';
|
||||
import { DMessageAttachmentFragment, DMessageFragmentId, DVMimeType, isDocPart } from '~/common/stores/chat/chat.fragments';
|
||||
import { LiveFileIcon } from '~/common/livefile/liveFile.icons';
|
||||
import { PhImageSquare } from '~/common/components/icons/phosphor/PhImageSquare';
|
||||
import { PhVoice } from '~/common/components/icons/phosphor/PhVoice';
|
||||
import { TooltipOutlined } from '~/common/components/TooltipOutlined';
|
||||
import { ellipsizeMiddle } from '~/common/util/textUtils';
|
||||
import { useLiveFileMetadata } from '~/common/livefile/useLiveFileMetadata';
|
||||
@@ -24,6 +24,15 @@ export const DocSelColor: ColorPaletteProp = 'primary';
|
||||
const DocUnselColor: ColorPaletteProp = 'primary';
|
||||
|
||||
|
||||
const _styles = {
|
||||
label: {
|
||||
whiteSpace: 'nowrap',
|
||||
fontWeight: 'md',
|
||||
minWidth: 48,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
||||
export function buttonIconForFragment(part: DMessageAttachmentFragment['part']): React.ComponentType<any> {
|
||||
const pt = part.pt;
|
||||
switch (pt) {
|
||||
@@ -39,9 +48,9 @@ export function buttonIconForFragment(part: DMessageAttachmentFragment['part']):
|
||||
const assetType = part.assetType;
|
||||
switch (assetType) {
|
||||
case 'image':
|
||||
return ImageOutlinedIcon;
|
||||
return PhImageSquare;
|
||||
case 'audio':
|
||||
return RecordVoiceOverOutlinedIcon;
|
||||
return PhVoice;
|
||||
default:
|
||||
const _exhaustiveCheck: never = assetType;
|
||||
return TextureIcon; // missing zync asset type
|
||||
@@ -84,7 +93,7 @@ export function buttonIconForFragment(part: DMessageAttachmentFragment['part']):
|
||||
|
||||
// [OLD-style] Image Attachment Fragment
|
||||
case 'image_ref':
|
||||
return ImageOutlinedIcon;
|
||||
return PhImageSquare;
|
||||
|
||||
case '_pt_sentinel':
|
||||
return TextureIcon; // nothing to do here - this is a sentinel type
|
||||
@@ -146,10 +155,14 @@ export function DocAttachmentFragmentButton(props: {
|
||||
if (!isDocPart(fragment.part))
|
||||
return 'Unexpected: ' + fragment.part.pt;
|
||||
|
||||
const buttonText = ellipsizeMiddle(fragment.part.l1Title || fragment.title || 'Document', 28 /* totally arbitrary length */);
|
||||
|
||||
const Icon = isSelected ? EditRoundedIcon : buttonIconForFragment(fragment.part);
|
||||
|
||||
const fullTitle = fragment.part.l1Title || fragment.title || 'Document';
|
||||
const buttonText = ellipsizeMiddle(fullTitle, 28 /* totally arbitrary length */);
|
||||
const showFilenameTooltip = fullTitle !== buttonText;
|
||||
|
||||
const labelContent = <Box sx={_styles.label}>{buttonText}</Box>;
|
||||
|
||||
return (
|
||||
<Button
|
||||
size={props.contentScaling === 'md' ? 'md' : 'sm'}
|
||||
@@ -171,9 +184,10 @@ export function DocAttachmentFragmentButton(props: {
|
||||
</Box>
|
||||
)}
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column', alignItems: 'flex-start', paddingX: '0.5rem' }}>
|
||||
<Box sx={{ whiteSpace: 'nowrap', fontWeight: 'md', minWidth: 48 }}>
|
||||
{buttonText}
|
||||
</Box>
|
||||
{showFilenameTooltip
|
||||
? <TooltipOutlined title={<span style={{ wordBreak: 'break-all' }}>{fullTitle}</span>}>{labelContent}</TooltipOutlined>
|
||||
: labelContent
|
||||
}
|
||||
{/*<Box sx={{ fontSize: 'xs', fontWeight: 'sm' }}>*/}
|
||||
{/* {fragment.caption}*/}
|
||||
{/*</Box>*/}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
|
||||
import { BlocksTextarea } from '~/modules/blocks/BlocksContainers';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
@@ -96,6 +98,8 @@ export function BlockEdit_TextFragment(props: {
|
||||
|
||||
const handleEditKeyDown = React.useCallback((e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
if (e.nativeEvent.isComposing)
|
||||
return;
|
||||
const withControl = e.ctrlKey;
|
||||
if (enterIsNewline ? e.shiftKey : !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
@@ -120,6 +124,32 @@ export function BlockEdit_TextFragment(props: {
|
||||
{ key: ShortcutKey.Esc, description: 'Cancel', level: 3, action: onEscapePressed },
|
||||
], [isControlled, isEdited, isFocused, onEscapePressed, onSubmit, props.enableRestart]));
|
||||
|
||||
|
||||
// memo style
|
||||
const sx = React.useMemo((): SxProps | undefined => {
|
||||
// check sources of custom, and early outs
|
||||
const isXS = props.contentScaling === 'xs';
|
||||
const isSquareTop = !!props.squareTopBorder;
|
||||
if (!isXS && !isSquareTop) return undefined;
|
||||
if (isSquareTop && !isXS) return _styles.squareTop;
|
||||
|
||||
return {
|
||||
// scaling note: in Chat, this can go xs/sm/md, while in Beam, this is xs/xs/sm
|
||||
...(isXS && {
|
||||
fontSize: 'xs',
|
||||
lineHeight: 'md', // was 1.75 on all
|
||||
// '--Textarea-paddingBlock': 'calc(0.25rem - 0.5px - var(--variant-borderWidth, 0px))', // not used, overridden in BlocksTextarea
|
||||
'--Textarea-paddingInline': '6px',
|
||||
'--Textarea-minHeight': '1.75rem', // was 2rem on 'sm'
|
||||
'--Icon-fontSize': 'lg', // was 'xl' on 'sm'
|
||||
'--Textarea-focusedThickness': '1px',
|
||||
boxShadow: 'none', // too small to show this
|
||||
}),
|
||||
...(isSquareTop && _styles.squareTop),
|
||||
};
|
||||
}, [props.contentScaling, props.squareTopBorder]);
|
||||
|
||||
|
||||
return (
|
||||
<BlocksTextarea
|
||||
variant={/*props.invertedColors ? 'plain' :*/ 'soft'}
|
||||
@@ -140,7 +170,7 @@ export function BlockEdit_TextFragment(props: {
|
||||
onKeyDown={handleEditKeyDown}
|
||||
slotProps={enterIsNewline ? _textAreaSlotPropsEnter : _textAreaSlotPropsDone}
|
||||
// endDecorator={props.endDecorator}
|
||||
sx={!props.squareTopBorder ? undefined : _styles.squareTop}
|
||||
sx={sx}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -3,15 +3,48 @@ import * as React from 'react';
|
||||
import { ScaledTextBlockRenderer } from '~/modules/blocks/ScaledTextBlockRenderer';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
import type { DMessageErrorPart } from '~/common/stores/chat/chat.fragments';
|
||||
import type { DMessageRole } from '~/common/stores/chat/chat.message';
|
||||
|
||||
import { BlockPartError_NetDisconnected } from './BlockPartError_NetDisconnected';
|
||||
import { BlockPartError_RequestExceeded } from './BlockPartError_RequestExceeded';
|
||||
|
||||
|
||||
export function BlockPartError(props: {
|
||||
errorText: string,
|
||||
errorHint?: DMessageErrorPart['hint'],
|
||||
messageRole: DMessageRole,
|
||||
messageGeneratorLlmId?: string | null,
|
||||
contentScaling: ContentScaling,
|
||||
}) {
|
||||
|
||||
// special error presentation, based on hints
|
||||
switch (props.errorHint) {
|
||||
case 'aix-net-disconnected':
|
||||
// determine the 'kinds' of disconnection errors in aix.client.ts
|
||||
// - 'network error' (browser) -> client side
|
||||
// - 'connection terminated' (tRPC 'Stream closed' wrapper) -> server/edge side (CSF recovery)
|
||||
// - 'upstream dropped' (undici TypeError 'terminated') -> upstream provider socket drop (CSF recovery applies)
|
||||
const kind =
|
||||
props.errorText.includes('**network error**') ? 'net-client-closed'
|
||||
: props.errorText.includes('**connection terminated**') ? 'net-server-closed'
|
||||
: props.errorText.includes('**upstream dropped**') ? 'net-server-closed'
|
||||
: 'net-unknown-closed';
|
||||
|
||||
// For client-side error, we don't show the _NetDisconnected component
|
||||
if (kind === 'net-client-closed')
|
||||
break;
|
||||
|
||||
return <BlockPartError_NetDisconnected disconnectionKind={kind} messageGeneratorLlmId={props.messageGeneratorLlmId} contentScaling={props.contentScaling} />;
|
||||
|
||||
case 'aix-request-exceeded':
|
||||
return <BlockPartError_RequestExceeded messageGeneratorLlmId={props.messageGeneratorLlmId} contentScaling={props.contentScaling} />;
|
||||
|
||||
default:
|
||||
// continue rendering generic error
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if the errorText starts with '**' and has a closing '**' following Markdown rules
|
||||
let textToRender = props.errorText;
|
||||
let renderAsMarkdown = false;
|
||||
|
||||
+103
@@ -0,0 +1,103 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Alert, Box, FormHelperText, Switch } from '@mui/joy';
|
||||
import WifiOffRoundedIcon from '@mui/icons-material/WifiOffRounded';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
import { useLLM } from '~/common/stores/llms/llms.hooks';
|
||||
import { useModelServiceClientSideFetch } from '~/common/stores/llms/hooks/useModelServiceClientSideFetch';
|
||||
|
||||
|
||||
/**
|
||||
* Error recovery component for "Connection terminated" errors.
|
||||
*/
|
||||
export function BlockPartError_NetDisconnected(props: {
|
||||
disconnectionKind: 'net-client-closed' | 'net-server-closed' | 'net-unknown-closed';
|
||||
messageGeneratorLlmId?: string | null;
|
||||
contentScaling: ContentScaling;
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const model = useLLM(props.messageGeneratorLlmId) ?? null;
|
||||
const isServerSideClosed = props.disconnectionKind === 'net-server-closed'; // do not show CSF option for non-server-side
|
||||
const { csfAvailable, csfActive, csfToggle, vendorName } = useModelServiceClientSideFetch(isServerSideClosed, model);
|
||||
|
||||
return (
|
||||
<Alert
|
||||
size={props.contentScaling === 'xs' ? 'sm' : 'md'}
|
||||
color='danger'
|
||||
variant='plain'
|
||||
sx={{ display: 'flex', alignItems: 'flex-start', gap: 1 }}
|
||||
>
|
||||
|
||||
|
||||
<Box sx={{ flex: 1, display: 'flex', flexDirection: 'column', gap: 0.5, alignItems: 'flex-start' }}>
|
||||
|
||||
{/* Header */}
|
||||
<Box sx={{ display: 'flex', gap: 2 }}>
|
||||
<WifiOffRoundedIcon sx={{ flexShrink: 0, mt: 0.5 }} />
|
||||
<div>
|
||||
<Box fontSize='larger'>
|
||||
Connection Terminated
|
||||
</Box>
|
||||
<div>
|
||||
The connection was unexpectedly closed before the response completed.
|
||||
</div>
|
||||
</div>
|
||||
</Box>
|
||||
|
||||
|
||||
{/* Recovery options */}
|
||||
{csfAvailable ? <>
|
||||
|
||||
{/* Explanation */}
|
||||
<Box color='text.tertiary' fontSize='sm' my={2}>
|
||||
<strong>Experimental:</strong> enable direct connection to {vendorName} to bypass server timeouts - then try again.
|
||||
</Box>
|
||||
|
||||
{/* Toggle */}
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 2,
|
||||
p: 2,
|
||||
borderRadius: 'sm',
|
||||
bgcolor: 'background.popup',
|
||||
boxShadow: 'md',
|
||||
// border: '1px solid',
|
||||
// borderColor: 'divider',
|
||||
}}
|
||||
>
|
||||
|
||||
<Box sx={{ flex: 1 }}>
|
||||
<Box color={!csfActive ? undefined : 'primary.solidBg'} fontWeight='lg' mb={0.5}>
|
||||
Direct Connection {csfActive && '- Now Try Again'}
|
||||
</Box>
|
||||
<FormHelperText>
|
||||
Connect directly from this client -> {vendorName || 'AI service'}
|
||||
</FormHelperText>
|
||||
</Box>
|
||||
|
||||
<Switch
|
||||
checked={csfActive}
|
||||
onChange={(e) => csfToggle(e.target.checked)}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
</> : (
|
||||
<div>
|
||||
<Box sx={{ color: 'text.secondary', my: 1 }}>
|
||||
Suggestions:
|
||||
</Box>
|
||||
<Box component='ul' sx={{ color: 'text.secondary' }}>
|
||||
<li>Check your internet connection and try again</li>
|
||||
<li>The AI service may be experiencing issues - wait a moment and retry</li>
|
||||
<li>If the issue persists, please let us know promptly on Discord or GitHib</li>
|
||||
</Box>
|
||||
</div>
|
||||
)}
|
||||
</Box>
|
||||
</Alert>
|
||||
);
|
||||
}
|
||||
+109
@@ -0,0 +1,109 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import { Alert, Box, FormHelperText, Switch } from '@mui/joy';
|
||||
import WarningRoundedIcon from '@mui/icons-material/WarningRounded';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
import { useLLM } from '~/common/stores/llms/llms.hooks';
|
||||
import { useModelServiceClientSideFetch } from '~/common/stores/llms/hooks/useModelServiceClientSideFetch';
|
||||
|
||||
|
||||
/**
|
||||
* Error recovery component for "Request too large" errors.
|
||||
*/
|
||||
export function BlockPartError_RequestExceeded(props: {
|
||||
messageGeneratorLlmId?: string | null;
|
||||
contentScaling: ContentScaling;
|
||||
onRegenerate?: () => void;
|
||||
}) {
|
||||
|
||||
// external state
|
||||
const model = useLLM(props.messageGeneratorLlmId) ?? null;
|
||||
const { csfAvailable, csfActive, csfToggle, vendorName } = useModelServiceClientSideFetch(true, model);
|
||||
|
||||
return (
|
||||
<Alert
|
||||
size={props.contentScaling === 'xs' ? 'sm' : 'md'}
|
||||
color='warning'
|
||||
sx={{ display: 'flex', alignItems: 'flex-start', gap: 1, border: '1px solid', borderColor: 'warning.outlinedBorder' }}
|
||||
>
|
||||
|
||||
<WarningRoundedIcon sx={{ flexShrink: 0, mt: 0.25 }} />
|
||||
|
||||
<Box sx={{ flex: 1, display: 'flex', flexDirection: 'column', gap: 0.5 }}>
|
||||
|
||||
<Box fontSize='larger'>
|
||||
Request Too Large
|
||||
</Box>
|
||||
<div>
|
||||
Your message or attachments exceed the limit
|
||||
of the Vercel edge network
|
||||
{/* Note: Assumption here - since explaing to any 413, it could be any network */}
|
||||
</div>
|
||||
|
||||
{/* Recovery options */}
|
||||
{csfAvailable ? <>
|
||||
|
||||
{/* Explanation */}
|
||||
<Box color='text.secondary' fontSize='sm' my={2}>
|
||||
<strong>Experimental:</strong> enable Direct Connection to {vendorName} to work around size limitations.
|
||||
</Box>
|
||||
|
||||
{/* Toggle */}
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 2,
|
||||
p: 2,
|
||||
borderRadius: 'sm',
|
||||
bgcolor: 'background.popup',
|
||||
boxShadow: 'md',
|
||||
}}
|
||||
>
|
||||
|
||||
<Box sx={{ flex: 1 }}>
|
||||
<Box color={!csfActive ? undefined : 'primary.solidBg'} fontWeight='lg' mb={0.5}>
|
||||
Direct Connection {csfActive && '- Now Try Again'}
|
||||
</Box>
|
||||
<FormHelperText>
|
||||
Connect directly from this client -> {vendorName || 'AI service'}
|
||||
</FormHelperText>
|
||||
</Box>
|
||||
|
||||
<Switch
|
||||
checked={csfActive}
|
||||
onChange={(e) => csfToggle(e.target.checked)}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{/* Regenerate button */}
|
||||
{/*{props.onRegenerate && (*/}
|
||||
{/* <Button*/}
|
||||
{/* size='sm'*/}
|
||||
{/* variant={csfActive ? 'solid' : 'outlined'}*/}
|
||||
{/* color={csfActive ? 'success' : 'neutral'}*/}
|
||||
{/* startDecorator={<RefreshIcon />}*/}
|
||||
{/* onClick={props.onRegenerate}*/}
|
||||
{/* sx={{ alignSelf: 'flex-start' }}*/}
|
||||
{/* >*/}
|
||||
{/* {csfActive ? 'Regenerate with Direct Connection' : 'Regenerate'}*/}
|
||||
{/* </Button>*/}
|
||||
{/*)}*/}
|
||||
|
||||
</> : (
|
||||
<Box>
|
||||
<Box sx={{ color: 'text.secondary', my: 1 }}>
|
||||
Suggestions:
|
||||
</Box>
|
||||
<Box component='ul' sx={{ color: 'text.secondary' }}>
|
||||
<li>Use the cleanup button in the right pane to hide old messages</li>
|
||||
<li>Remove large attachments from the conversation</li>
|
||||
{/*<li>Reduce conversation length before sending</li>*/}
|
||||
</Box>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
</Alert>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,378 @@
|
||||
import * as React from 'react';
|
||||
import TimeAgo from 'react-timeago';
|
||||
|
||||
import { Box, Checkbox, CircularProgress, Dropdown, IconButton, ListDivider, ListItemDecorator, Menu, MenuButton, MenuItem, Sheet, Typography } from '@mui/joy';
|
||||
import AttachFileRoundedIcon from '@mui/icons-material/AttachFileRounded';
|
||||
import ContentCopyIcon from '@mui/icons-material/ContentCopy';
|
||||
import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline';
|
||||
import DownloadIcon from '@mui/icons-material/Download';
|
||||
import MoreVertIcon from '@mui/icons-material/MoreVert';
|
||||
import VerticalAlignBottomIcon from '@mui/icons-material/VerticalAlignBottom';
|
||||
|
||||
import type { AnthropicAccessSchema } from '~/modules/llms/server/anthropic/anthropic.access';
|
||||
|
||||
import type { ContentScaling } from '~/common/app.theme';
|
||||
import { ConfirmationModal } from '~/common/components/modals/ConfirmationModal';
|
||||
import { GoodTooltip } from '~/common/components/GoodTooltip';
|
||||
import { apiAsync, apiQuery } from '~/common/util/trpc.client';
|
||||
import { convert_Base64_To_UInt8Array } from '~/common/util/blobUtils';
|
||||
import { createTextContentFragment, DMessageContentFragment, DMessageFragmentId, DMessageHostedResourcePart } from '~/common/stores/chat/chat.fragments';
|
||||
import { copyBlobPromiseToClipboard, copyToClipboard } from '~/common/util/clipboardUtils';
|
||||
import { downloadBlob } from '~/common/util/downloadUtils';
|
||||
import { humanReadableBytes } from '~/common/util/textUtils';
|
||||
import { mimeTypeIsPlainText, mimeTypeIsSupportedImage } from '~/common/attachment-drafts/attachment.mimetypes';
|
||||
import { useAIPreferencesStore } from '~/common/stores/store-ai';
|
||||
import { useLlmServiceAccess } from '~/common/stores/llms/hooks/useLlmServiceAccess';
|
||||
import { useOverlayComponents } from '~/common/layout/overlays/useOverlayComponents';
|
||||
|
||||
|
||||
// -- react-query enrichers - stable select functions --
|
||||
|
||||
function _enrichMetadataWithMimeFlags<T extends { mime_type: string }>(meta: T) {
|
||||
return {
|
||||
...meta,
|
||||
mimeIsText: mimeTypeIsPlainText(meta.mime_type),
|
||||
mimeIsImage: mimeTypeIsSupportedImage(meta.mime_type),
|
||||
};
|
||||
}
|
||||
|
||||
function _base64ResponseToBlob({ base64Data, mimeType }: { base64Data: string; mimeType: string }) {
|
||||
const bytes = convert_Base64_To_UInt8Array(base64Data, 'hosted-resource-ant-file');
|
||||
return {
|
||||
blob: new Blob([bytes], { type: mimeType }),
|
||||
httpMimeType: mimeType,
|
||||
httpMimeIsText: mimeTypeIsPlainText(mimeType),
|
||||
httpMimeIsImage: mimeTypeIsSupportedImage(mimeType),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
function AnthropicFileChip(props: {
|
||||
access: AnthropicAccessSchema,
|
||||
fileId: string,
|
||||
contentScaling: ContentScaling,
|
||||
onFragmentDelete?: () => void,
|
||||
onFragmentReplace?: (newFragment: DMessageContentFragment) => void,
|
||||
}) {
|
||||
|
||||
// state
|
||||
const [busy, setBusy] = React.useState<false | 'download' | 'copy' | 'delete' | 'inline'>(false);
|
||||
const [actionError, setActionError] = React.useState<string | null>(null);
|
||||
const { showPromisedOverlay } = useOverlayComponents();
|
||||
|
||||
// props
|
||||
const { access, fileId, onFragmentDelete, onFragmentReplace } = props;
|
||||
|
||||
// external state
|
||||
const autoEmbedEnabled = useAIPreferencesStore(state => state.vndAntInlineFiles !== 'off');
|
||||
const { data: metadata, isLoading: metaLoading, error: metaError } = apiQuery.llmAnthropic.fileApiGetMetadata.useQuery({ access, fileId }, {
|
||||
staleTime: Infinity,
|
||||
select: _enrichMetadataWithMimeFlags,
|
||||
});
|
||||
const { data: fileContent, refetch: refetchFileContent } = apiQuery.llmAnthropic.fileApiDownload.useQuery({ access, fileId }, {
|
||||
enabled: false, // on-demand only
|
||||
select: _base64ResponseToBlob,
|
||||
});
|
||||
|
||||
|
||||
// derive display info from typed metadata
|
||||
const fileName = metadata?.filename || fileId;
|
||||
const displayName = fileName.length > 40 ? fileName.slice(0, 20) + '...' + fileName.slice(-15) : fileName;
|
||||
|
||||
|
||||
// handlers
|
||||
|
||||
const handleDownload = React.useCallback(async () => {
|
||||
setBusy('download');
|
||||
setActionError(null);
|
||||
try {
|
||||
const data = fileContent || (await refetchFileContent({ cancelRefetch: false, throwOnError: true })).data;
|
||||
data && downloadBlob(data.blob, fileName);
|
||||
} catch (error: any) {
|
||||
setActionError(error?.message || 'Download failed');
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}, [fileContent, refetchFileContent, fileName]);
|
||||
|
||||
const handleCopy = React.useCallback(async () => {
|
||||
setBusy('copy');
|
||||
setActionError(null);
|
||||
try {
|
||||
const data = fileContent || (await refetchFileContent({ cancelRefetch: false, throwOnError: true })).data;
|
||||
if (!data) return;
|
||||
if (data.httpMimeIsText)
|
||||
copyToClipboard(await data.blob.text(), fileName);
|
||||
else
|
||||
copyBlobPromiseToClipboard(data.httpMimeType, Promise.resolve(data.blob), fileName);
|
||||
} catch (error: any) {
|
||||
setActionError(error?.message || 'Copy failed');
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}, [fileContent, refetchFileContent, fileName]);
|
||||
|
||||
const handleDelete = React.useCallback(async (event: React.MouseEvent) => {
|
||||
if (!onFragmentDelete) return;
|
||||
if (!event.shiftKey && !await showPromisedOverlay('chat-message-delete-hosted-resource', { rejectWithValue: false }, ({ onResolve, onUserReject }) =>
|
||||
<ConfirmationModal
|
||||
open onClose={onUserReject} onPositive={() => onResolve(true)}
|
||||
confirmationText={<>Delete "{fileName}" from Anthropic servers?<br />This action cannot be undone.</>}
|
||||
positiveActionText='Delete'
|
||||
/>,
|
||||
)) return;
|
||||
setBusy('delete');
|
||||
setActionError(null);
|
||||
try {
|
||||
// remote deletion
|
||||
await apiAsync.llmAnthropic.fileApiDelete.mutate({ access, fileId });
|
||||
// fragment removal
|
||||
onFragmentDelete();
|
||||
} catch (error: any) {
|
||||
setActionError(error?.message || 'Delete failed');
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}, [access, fileId, fileName, onFragmentDelete, showPromisedOverlay]);
|
||||
|
||||
|
||||
const handleInline = React.useCallback(async () => {
|
||||
if (!onFragmentReplace) return;
|
||||
setBusy('inline');
|
||||
setActionError(null);
|
||||
try {
|
||||
const data = fileContent || (await refetchFileContent({ cancelRefetch: false, throwOnError: true })).data;
|
||||
if (!data) return;
|
||||
|
||||
// text: inline as fenced code block
|
||||
if (data.httpMimeIsText) {
|
||||
const text = await data.blob.text();
|
||||
|
||||
// fence with adaptive depth (extra backticks if content contains ```)
|
||||
let fence = '```';
|
||||
while (text.includes(fence) && fence.length < 10)
|
||||
fence += '`';
|
||||
onFragmentReplace(createTextContentFragment(`${fence}${fileName}\n${text}\n${fence}\n`));
|
||||
}
|
||||
// image: get dimensions, store in DBlob, and create a Zync asset reference
|
||||
// else if (data.httpMimeIsImage) {
|
||||
//
|
||||
// const { width, height } = await imageBlobGetDimensions(data.blob).catch(() => ({ width: 0, height: 0 }));
|
||||
//
|
||||
// const dblobAssetId = await addDBImageAsset('app-chat', data.blob, {
|
||||
// label: fileName,
|
||||
// origin: { ot: 'generated', source: 'ai-text-to-image', generatorName: 'anthropic-code-execution', prompt: '', parameters: {}, generatedAt: new Date().toISOString() },
|
||||
// metadata: { width, height },
|
||||
// });
|
||||
//
|
||||
// onFragmentReplace(createZyncAssetReferenceContentFragment(
|
||||
// nanoidToUuidV4(dblobAssetId, 'convert-dblob-to-dasset'),
|
||||
// fileName,
|
||||
// 'image',
|
||||
// {
|
||||
// pt: 'image_ref',
|
||||
// dataRef: createDMessageDataRefDBlob(dblobAssetId, data.httpMimeType, data.blob.size),
|
||||
// ...(fileName ? { altText: fileName } : {}),
|
||||
// ...(width ? { width } : {}),
|
||||
// ...(height ? { height } : {}),
|
||||
// },
|
||||
// ));
|
||||
// }
|
||||
else
|
||||
return setActionError('Cannot inline this file type');
|
||||
|
||||
// fire-and-forget: delete from provider
|
||||
apiAsync.llmAnthropic.fileApiDelete.mutate({ access, fileId }).catch(console.error);
|
||||
} catch (error: any) {
|
||||
setActionError(error?.message || 'Inline failed');
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}, [fileContent, refetchFileContent, access, fileId, fileName, onFragmentReplace]);
|
||||
|
||||
|
||||
const handleToggleAutoEmbed = React.useCallback(async () => {
|
||||
if (autoEmbedEnabled)
|
||||
return useAIPreferencesStore.getState().setVndAntInlineFiles('off');
|
||||
if (await showPromisedOverlay('chat-message-auto-embed-notice', { rejectWithValue: false }, ({ onResolve, onUserReject }) =>
|
||||
<ConfirmationModal
|
||||
open onClose={onUserReject} onPositive={() => onResolve(true)}
|
||||
noTitleBar
|
||||
lowStakes
|
||||
confirmationText={<>
|
||||
From now on, files generated by Claude tools (code execution, etc.) will be automatically downloaded and embedded into messages, then removed from Anthropic's File API.
|
||||
<br /><br />
|
||||
You can change this anytime in <b>Settings > Chat AI > Anthropic File Inlining</b>.
|
||||
</>}
|
||||
positiveActionText='Enable & Embed'
|
||||
negativeActionText='Cancel'
|
||||
/>,
|
||||
)) {
|
||||
useAIPreferencesStore.getState().setVndAntInlineFiles('inline-file-and-delete');
|
||||
await handleInline();
|
||||
}
|
||||
}, [autoEmbedEnabled, handleInline, showPromisedOverlay]);
|
||||
|
||||
|
||||
const canCopy = !!metadata?.mimeIsText || !!metadata?.mimeIsImage;
|
||||
const canInline = !!onFragmentReplace && !!metadata?.mimeIsText; // for images, replace with ... && canCopy
|
||||
|
||||
const isBusy = !!busy || metaLoading;
|
||||
const hasError = !!metaError || !!actionError;
|
||||
const isFileGone = !!metaError && typeof metaError === 'object' && 'data' in metaError && (metaError.data?.httpStatus === 404 || metaError.data?.aixFHttpStatus === 404);
|
||||
|
||||
|
||||
return (
|
||||
<Sheet
|
||||
variant='soft'
|
||||
color='primary'
|
||||
sx={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
mx: 1.5,
|
||||
px: 1.125,
|
||||
py: 0.5,
|
||||
borderRadius: 'sm',
|
||||
overflow: 'hidden',
|
||||
maxWidth: '100%',
|
||||
boxShadow: 'inset 1px 2px 2px -2px rgba(0, 0, 0, 0.2)',
|
||||
}}
|
||||
>
|
||||
<AttachFileRoundedIcon sx={{ fontSize: 'lg', opacity: 0.5 }} />
|
||||
|
||||
<Box sx={{ minWidth: 0, flex: 1 }}>
|
||||
<Box className='agi-ellipsize' sx={{ fontSize: 'sm', fontWeight: 'md', color: hasError ? 'var(--joy-palette-danger-plainColor)' : undefined }}>
|
||||
{metaLoading ? 'Loading...' : isFileGone ? `${fileId} - file no longer available` : hasError ? `${displayName} - ${actionError || metaError?.message || 'Could not load file info'}` : displayName}
|
||||
</Box>
|
||||
{metadata && (
|
||||
<Box sx={{ fontSize: 'xs', opacity: 0.6 }}>
|
||||
{humanReadableBytes(metadata.size_bytes)} · <TimeAgo date={metadata.created_at} /> · {metadata.mime_type}
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{!isFileGone ? <>
|
||||
|
||||
{canCopy && (
|
||||
<GoodTooltip title='Copy to clipboard'>
|
||||
<IconButton variant='soft' color='primary' disabled={isBusy} onClick={handleCopy} size='sm'>
|
||||
{busy === 'copy' ? <CircularProgress size='sm' /> : <ContentCopyIcon sx={{ fontSize: 'lg' }} />}
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
)}
|
||||
{/*{canInline && (*/}
|
||||
{/* <GoodTooltip title='Embed in chat'>*/}
|
||||
{/* <IconButton variant='soft' color='primary' disabled={isBusy} onClick={handleInline} size='sm'>*/}
|
||||
{/* {busy === 'inline' ? <CircularProgress size='sm' /> : <VerticalAlignBottomIcon sx={{ fontSize: 'lg' }} />}*/}
|
||||
{/* </IconButton>*/}
|
||||
{/* </GoodTooltip>*/}
|
||||
{/*)}*/}
|
||||
<GoodTooltip title='Download file'>
|
||||
<IconButton variant='soft' color='primary' disabled={isBusy || isFileGone} onClick={handleDownload} size='sm'>
|
||||
{busy === 'download' ? <CircularProgress size='sm' /> : <DownloadIcon sx={{ fontSize: 'lg' }} />}
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
{(onFragmentDelete || onFragmentReplace) && (
|
||||
<Dropdown>
|
||||
<MenuButton slots={{ root: IconButton }} slotProps={{ root: { variant: 'soft', color: 'primary', size: 'sm', disabled: isBusy && busy !== 'inline' } }}>
|
||||
{(busy === 'delete' || busy === 'inline') ? <CircularProgress size='sm' /> : <MoreVertIcon sx={{ fontSize: 'lg' }} />}
|
||||
</MenuButton>
|
||||
<Menu placement='bottom-end' sx={{ minWidth: 220 }}>
|
||||
{/* Inline as doc attachment */}
|
||||
<MenuItem disabled={!canInline || isBusy} onClick={handleInline}>
|
||||
<ListItemDecorator><VerticalAlignBottomIcon /></ListItemDecorator>
|
||||
<div>
|
||||
Embed
|
||||
{!canInline && <Typography level='body-xs' sx={{ opacity: 0.6 }}>
|
||||
File type not supported
|
||||
</Typography>}
|
||||
</div>
|
||||
</MenuItem>
|
||||
{/* Auto-embed toggle - shared global preference */}
|
||||
{!autoEmbedEnabled && <>
|
||||
<MenuItem disabled={!canInline || isBusy} onClick={handleToggleAutoEmbed}>
|
||||
<ListItemDecorator><Checkbox checked={autoEmbedEnabled} readOnly color='neutral' /></ListItemDecorator>
|
||||
<div>
|
||||
Always embed
|
||||
<Typography level='body-xs' sx={{ opacity: 0.6 }}>
|
||||
Change anytime in Settings
|
||||
</Typography>
|
||||
</div>
|
||||
</MenuItem>
|
||||
</>}
|
||||
{!!onFragmentDelete && <ListDivider />}
|
||||
{/* Delete from provider */}
|
||||
{!!onFragmentDelete && (
|
||||
<MenuItem color='danger' disabled={isBusy} onClick={handleDelete}>
|
||||
<ListItemDecorator><DeleteOutlineIcon /></ListItemDecorator>
|
||||
Delete
|
||||
</MenuItem>
|
||||
)}
|
||||
</Menu>
|
||||
</Dropdown>
|
||||
)}
|
||||
|
||||
</> : onFragmentDelete && (
|
||||
<GoodTooltip title='Remove from message'>
|
||||
<IconButton variant='plain' color='danger' onClick={onFragmentDelete} size='sm'>
|
||||
<DeleteOutlineIcon sx={{ fontSize: 'lg' }} />
|
||||
</IconButton>
|
||||
</GoodTooltip>
|
||||
)}
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
|
||||
function NoAccessChip(props: { fileId: string }) {
|
||||
return (
|
||||
<Sheet variant='outlined' sx={{ display: 'inline-flex', alignItems: 'center', gap: 1, px: 1.5, py: 0.5, borderRadius: 'sm' }}>
|
||||
<AttachFileRoundedIcon sx={{ fontSize: 'lg', opacity: 0.4 }} />
|
||||
<Typography level='body-sm' sx={{ opacity: 0.5 }}>
|
||||
{props.fileId} (no credentials)
|
||||
</Typography>
|
||||
</Sheet>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
export function BlockPartHostedResource(props: {
|
||||
hostedResourcePart: DMessageHostedResourcePart,
|
||||
fragmentId: DMessageFragmentId,
|
||||
messageGeneratorLlmId?: string | null,
|
||||
contentScaling: ContentScaling,
|
||||
onFragmentDelete?: (fragmentId: DMessageFragmentId) => void,
|
||||
onFragmentReplace?: (fragmentId: DMessageFragmentId, newFragment: DMessageContentFragment) => void,
|
||||
}) {
|
||||
|
||||
const { resource } = props.hostedResourcePart;
|
||||
const { fragmentId, onFragmentDelete, onFragmentReplace } = props;
|
||||
|
||||
const handleFragmentDelete = React.useCallback(() => {
|
||||
onFragmentDelete?.(fragmentId);
|
||||
}, [fragmentId, onFragmentDelete]);
|
||||
|
||||
const handleFragmentReplace = React.useCallback((newFragment: DMessageContentFragment) => {
|
||||
onFragmentReplace?.(fragmentId, newFragment);
|
||||
}, [fragmentId, onFragmentReplace]);
|
||||
|
||||
// TODO: OpenAI container_file_citation support (via: 'openai' with fileId + containerId)?
|
||||
|
||||
// reactive service + access resolution
|
||||
const isAnthropic = resource.via === 'anthropic';
|
||||
const antAccess = useLlmServiceAccess(isAnthropic ? props.messageGeneratorLlmId : undefined, 'anthropic');
|
||||
|
||||
// only support Anthropic files for now
|
||||
if (!isAnthropic || !antAccess)
|
||||
return <NoAccessChip fileId={resource?.fileId || 'unknown'} />;
|
||||
|
||||
return (
|
||||
<AnthropicFileChip
|
||||
access={antAccess}
|
||||
fileId={resource.fileId}
|
||||
contentScaling={props.contentScaling}
|
||||
onFragmentDelete={onFragmentDelete ? handleFragmentDelete : undefined}
|
||||
onFragmentReplace={onFragmentReplace ? handleFragmentReplace : undefined}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
import * as React from 'react';
|
||||
|
||||
import type { SxProps } from '@mui/joy/styles/types';
|
||||
import { Box } from '@mui/joy';
|
||||
|
||||
import { BlocksContainer } from '~/modules/blocks/BlocksContainers';
|
||||
import { RenderImageRefDBlob } from '~/modules/blocks/image/RenderImageRefDBlob';
|
||||
@@ -78,17 +77,15 @@ export function BlockPartImageRef(props: {
|
||||
scaledImageSx={scaledImageSx}
|
||||
variant='content-part'
|
||||
/>
|
||||
) : (
|
||||
<Box>
|
||||
ContentPartImageRef: unknown reftype
|
||||
</Box>
|
||||
)}
|
||||
) : 'BlockPartImageRef: unknown reftype'}
|
||||
|
||||
{/* Image viewer modal */}
|
||||
{!props.disableViewer && viewingImageRefPart && (
|
||||
<ViewImageRefPartModal
|
||||
imageRefPart={viewingImageRefPart}
|
||||
onClose={() => setViewingImageRefPart(null)}
|
||||
onDeleteFragment={onFragmentDelete ? handleDeleteFragment : undefined}
|
||||
onReplaceFragment={onFragmentReplace ? handleReplaceFragment : undefined}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
||||
@@ -27,11 +27,11 @@ export function BlockPartText_AutoBlocks(props: {
|
||||
isMobile: boolean,
|
||||
fitScreen: boolean,
|
||||
disableMarkdownText: boolean,
|
||||
enhanceCodeBlocks: boolean,
|
||||
renderAsWordsDiff?: WordsDiff,
|
||||
|
||||
showUnsafeHtmlCode?: boolean,
|
||||
optiAllowSubBlocksMemo: boolean,
|
||||
optiStreamingLastFragment?: boolean,
|
||||
|
||||
onContextMenu?: (event: React.MouseEvent) => void;
|
||||
onDoubleClick?: (event: React.MouseEvent) => void;
|
||||
@@ -75,9 +75,10 @@ export function BlockPartText_AutoBlocks(props: {
|
||||
isMobile={props.isMobile}
|
||||
showUnsafeHtmlCode={props.showUnsafeHtmlCode}
|
||||
renderAsWordsDiff={props.renderAsWordsDiff}
|
||||
codeRenderVariant={props.enhanceCodeBlocks ? 'enhanced' : 'outlined'}
|
||||
codeRenderVariant='enhanced' // was: { props.enhanceCodeBlocks ? 'enhanced' : 'outlined' }
|
||||
textRenderVariant={props.disableMarkdownText ? 'text' : 'markdown'}
|
||||
optiAllowSubBlocksMemo={props.optiAllowSubBlocksMemo}
|
||||
optiStreamingLastFragment={props.optiStreamingLastFragment}
|
||||
onContextMenu={props.onContextMenu}
|
||||
onDoubleClick={props.onDoubleClick}
|
||||
setText={!props.setEditedText ? undefined : handleSetText}
|
||||
|
||||
@@ -14,8 +14,9 @@ import type { ChatMessageTextPartEditState } from '../ChatMessage';
|
||||
import { BlockEdit_TextFragment } from './BlockEdit_TextFragment';
|
||||
import { BlockOpEmpty } from './BlockOpEmpty';
|
||||
import { BlockPartError } from './BlockPartError';
|
||||
import { BlockPartHostedResource } from './BlockPartHostedResource';
|
||||
import { BlockPartImageRef } from './BlockPartImageRef';
|
||||
import { BlockPartModelAux } from '../fragments-void/BlockPartModelAux';
|
||||
import { BlockPartModelAux, BlockPartModelAuxMemo } from '../fragments-void/BlockPartModelAux';
|
||||
import { BlockPartPlaceholder } from '../fragments-void/BlockPartPlaceholder';
|
||||
import { BlockPartText_AutoBlocks } from './BlockPartText_AutoBlocks';
|
||||
import { BlockPartToolInvocation } from './BlockPartToolInvocation';
|
||||
@@ -56,9 +57,9 @@ export function ContentFragments(props: {
|
||||
isMobile: boolean,
|
||||
messageRole: DMessageRole,
|
||||
messagePendingIncomplete?: boolean,
|
||||
messageGeneratorLlmId?: string | null,
|
||||
optiAllowSubBlocksMemo?: boolean,
|
||||
disableMarkdownText: boolean,
|
||||
enhanceCodeBlocks: boolean,
|
||||
showUnsafeHtmlCode?: boolean,
|
||||
|
||||
textEditsState: ChatMessageTextPartEditState | null,
|
||||
@@ -86,6 +87,7 @@ export function ContentFragments(props: {
|
||||
// solo placeholder - dataStreamViz trigger
|
||||
const showDataStreamViz =
|
||||
!Release.Features.LIGHTER_ANIMATIONS
|
||||
&& !!props.messagePendingIncomplete // if generating
|
||||
&& props.uiComplexityMode !== 'minimal'
|
||||
&& props.contentFragments.length === 1
|
||||
// && props.noVoidFragments // not needed, we have all the interleaved fragments here
|
||||
@@ -95,7 +97,17 @@ export function ContentFragments(props: {
|
||||
// Content Fragments Edit Zero-State: button to create a new TextContentFragment
|
||||
if (isEditingText && !props.contentFragments.some(isTextContentFragment))
|
||||
return !props.onFragmentAddBlank ? null : (
|
||||
<Button aria-label='message body empty' variant='plain' color='neutral' onClick={props.onFragmentAddBlank} sx={{ justifyContent: 'flex-start' }}>
|
||||
<Button
|
||||
aria-label='message body empty'
|
||||
color={fromAssistant ? 'neutral' : 'primary'}
|
||||
variant='outlined'
|
||||
onClick={props.onFragmentAddBlank}
|
||||
sx={{
|
||||
justifyContent: 'flex-start',
|
||||
backgroundColor: fromAssistant ? 'neutral.softBg' : 'primary.softBg',
|
||||
'&:hover': { backgroundColor: fromAssistant ? 'neutral.softHoverBg' : 'primary.softHoverBg' },
|
||||
}}
|
||||
>
|
||||
add text ...
|
||||
</Button>
|
||||
);
|
||||
@@ -123,6 +135,8 @@ export function ContentFragments(props: {
|
||||
|
||||
// simplify
|
||||
const { fId, ft } = fragment;
|
||||
const isLastFragment = fragmentIndex === props.contentFragments.length - 1;
|
||||
const optimizeMemoBeforeLastBlock = props.optiAllowSubBlocksMemo === true && !isLastFragment;
|
||||
|
||||
// VOID FRAGMENTS (reasoning, placeholders - interleaved with content)
|
||||
if (ft === 'void') {
|
||||
@@ -135,8 +149,13 @@ export function ContentFragments(props: {
|
||||
// return null;
|
||||
|
||||
case 'ma':
|
||||
// skip rendering empty reasoning fragments (created as vehicles for vendor state / reasoning continuity)
|
||||
const isActivelyStreaming = isLastFragment && !!props.messagePendingIncomplete;
|
||||
if (!part.aText && !part.redactedData?.length && !isActivelyStreaming)
|
||||
return null;
|
||||
const BlockPartModelAuxMemoOrNot = optimizeMemoBeforeLastBlock ? BlockPartModelAuxMemo : BlockPartModelAux;
|
||||
return (
|
||||
<BlockPartModelAux
|
||||
<BlockPartModelAuxMemoOrNot
|
||||
key={fId}
|
||||
fragmentId={fId}
|
||||
auxType={part.aType}
|
||||
@@ -146,7 +165,7 @@ export function ContentFragments(props: {
|
||||
messagePendingIncomplete={!!props.messagePendingIncomplete}
|
||||
zenMode={props.uiComplexityMode === 'minimal'}
|
||||
contentScaling={props.contentScaling}
|
||||
isLastFragment={fragmentIndex === props.contentFragments.length - 1}
|
||||
isLastFragment={isLastFragment}
|
||||
onFragmentDelete={props.onFragmentDelete}
|
||||
onFragmentReplace={props.onFragmentReplace}
|
||||
/>
|
||||
@@ -156,14 +175,13 @@ export function ContentFragments(props: {
|
||||
return (
|
||||
<BlockPartPlaceholder
|
||||
key={fId}
|
||||
placeholderText={part.pText}
|
||||
placeholderType={part.pType}
|
||||
placeholderModelOp={part.modelOp}
|
||||
placeholderAixControl={part.aixControl}
|
||||
messageRole={props.messageRole}
|
||||
fragmentId={fId}
|
||||
placeholderPart={part}
|
||||
contentScaling={props.contentScaling}
|
||||
showAsItalic
|
||||
messagePendingIncomplete={!!props.messagePendingIncomplete}
|
||||
showAsDataStreamViz={showDataStreamViz}
|
||||
zenMode={props.uiComplexityMode === 'minimal'}
|
||||
onFragmentDelete={props.messagePendingIncomplete ? undefined : props.onFragmentDelete}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -172,7 +190,7 @@ export function ContentFragments(props: {
|
||||
|
||||
default:
|
||||
const _exhaustiveVoidCheck: never = part;
|
||||
// fallthrough - we don't handle these here anymore
|
||||
// fallthrough - we don't handle these here anymore
|
||||
case 'annotations':
|
||||
return (
|
||||
<ScaledTextBlockRenderer
|
||||
@@ -243,7 +261,9 @@ export function ContentFragments(props: {
|
||||
<BlockPartError
|
||||
key={fId}
|
||||
errorText={part.error}
|
||||
errorHint={part.hint}
|
||||
messageRole={props.messageRole}
|
||||
messageGeneratorLlmId={props.messageGeneratorLlmId}
|
||||
contentScaling={props.contentScaling}
|
||||
/>
|
||||
);
|
||||
@@ -320,10 +340,10 @@ export function ContentFragments(props: {
|
||||
fitScreen={props.fitScreen}
|
||||
isMobile={props.isMobile}
|
||||
disableMarkdownText={props.disableMarkdownText}
|
||||
enhanceCodeBlocks={props.enhanceCodeBlocks}
|
||||
// renderWordsDiff={wordsDiff || undefined}
|
||||
showUnsafeHtmlCode={props.showUnsafeHtmlCode}
|
||||
optiAllowSubBlocksMemo={!!props.optiAllowSubBlocksMemo}
|
||||
optiStreamingLastFragment={!!props.optiAllowSubBlocksMemo && isLastFragment && props.uiComplexityMode === 'minimal'}
|
||||
onContextMenu={props.onContextMenu}
|
||||
onDoubleClick={props.onDoubleClick}
|
||||
/>
|
||||
@@ -349,6 +369,19 @@ export function ContentFragments(props: {
|
||||
/>
|
||||
);
|
||||
|
||||
case 'hosted_resource':
|
||||
return (
|
||||
<BlockPartHostedResource
|
||||
key={fId}
|
||||
hostedResourcePart={part}
|
||||
fragmentId={fId}
|
||||
messageGeneratorLlmId={props.messageGeneratorLlmId}
|
||||
contentScaling={props.contentScaling}
|
||||
onFragmentDelete={props.onFragmentDelete}
|
||||
onFragmentReplace={props.onFragmentReplace}
|
||||
/>
|
||||
);
|
||||
|
||||
case '_pt_sentinel':
|
||||
return null;
|
||||
|
||||
|
||||
@@ -23,10 +23,20 @@ const propGridSx: SxProps = {
|
||||
alignItems: 'center',
|
||||
columnGap: 2,
|
||||
rowGap: 1,
|
||||
// labels
|
||||
'& > :nth-of-type(odd)': {
|
||||
color: 'text.secondary',
|
||||
fontSize: 'xs',
|
||||
},
|
||||
// values
|
||||
'& > :nth-of-type(even)': {
|
||||
// fontWeight: 'bold',
|
||||
color: 'text.primary',
|
||||
// agi-ellipsize
|
||||
whiteSpace: 'nowrap',
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
},
|
||||
};
|
||||
|
||||
const textPageSx: SxProps = {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user